id stringlengths 25 30 | content stringlengths 14 942k | max_stars_repo_path stringlengths 49 55 |
|---|---|---|
crossvul-cpp_data_bad_3927_3 | /*!
* \file soft-se.c
*
* \brief Secure Element software implementation
*
* \copyright Revised BSD License, see section \ref LICENSE.
*
* \code
* ______ _
* / _____) _ | |
* ( (____ _____ ____ _| |_ _____ ____| |__
* \____ \| ___ | (_ _) ___ |/ ___) _ \
* _____) ) ____| | | || |_| ____( (___| | | |
* (______/|_____)_|_|_| \__)_____)\____)_| |_|
* (C)2020 Semtech
*
* ___ _____ _ ___ _ _____ ___ ___ ___ ___
* / __|_ _/_\ / __| |/ / __/ _ \| _ \/ __| __|
* \__ \ | |/ _ \ (__| ' <| _| (_) | / (__| _|
* |___/ |_/_/ \_\___|_|\_\_| \___/|_|_\\___|___|
* embedded.connectivity.solutions===============
*
* \endcode
*
*/
#include <stdlib.h>
#include <stdint.h>
#include "utilities.h"
#include "aes.h"
#include "cmac.h"
#include "LoRaMacHeaderTypes.h"
#include "secure-element.h"
#include "se-identity.h"
#include "soft-se-hal.h"
/*!
* Number of supported crypto keys
*/
#define NUM_OF_KEYS 23
/*!
* Identifier value pair type for Keys
*/
typedef struct sKey
{
/*
* Key identifier
*/
KeyIdentifier_t KeyID;
/*
* Key value
*/
uint8_t KeyValue[SE_KEY_SIZE];
} Key_t;
/*
* Secure Element Non Volatile Context structure
*/
typedef struct sSecureElementNvCtx
{
/*
* DevEUI storage
*/
uint8_t DevEui[SE_EUI_SIZE];
/*
* Join EUI storage
*/
uint8_t JoinEui[SE_EUI_SIZE];
/*
* Pin storage
*/
uint8_t Pin[SE_PIN_SIZE];
/*
* AES computation context variable
*/
aes_context AesContext;
/*
* CMAC computation context variable
*/
AES_CMAC_CTX AesCmacCtx[1];
/*
* Key List
*/
Key_t KeyList[NUM_OF_KEYS];
} SecureElementNvCtx_t;
/*!
* Secure element context
*/
static SecureElementNvCtx_t SeNvmCtx = {
/*!
* end-device IEEE EUI (big endian)
*
* \remark In this application the value is automatically generated by calling
* BoardGetUniqueId function
*/
.DevEui = LORAWAN_DEVICE_EUI,
/*!
* App/Join server IEEE EUI (big endian)
*/
.JoinEui = LORAWAN_JOIN_EUI,
/*!
* Secure-element pin (big endian)
*/
.Pin = SECURE_ELEMENT_PIN,
/*!
* LoRaWAN key list
*/
.KeyList = SOFT_SE_KEY_LIST
};
static SecureElementNvmEvent SeNvmCtxChanged;
/*
* Local functions
*/
/*
* Gets key item from key list.
*
* \param[IN] keyID - Key identifier
* \param[OUT] keyItem - Key item reference
* \retval - Status of the operation
*/
static SecureElementStatus_t GetKeyByID( KeyIdentifier_t keyID, Key_t** keyItem )
{
for( uint8_t i = 0; i < NUM_OF_KEYS; i++ )
{
if( SeNvmCtx.KeyList[i].KeyID == keyID )
{
*keyItem = &( SeNvmCtx.KeyList[i] );
return SECURE_ELEMENT_SUCCESS;
}
}
return SECURE_ELEMENT_ERROR_INVALID_KEY_ID;
}
/*
* Dummy callback in case if the user provides NULL function pointer
*/
static void DummyCB( void )
{
return;
}
/*
* Computes a CMAC of a message using provided initial Bx block
*
* cmac = aes128_cmac(keyID, blocks[i].Buffer)
*
* \param[IN] micBxBuffer - Buffer containing the initial Bx block
* \param[IN] buffer - Data buffer
* \param[IN] size - Data buffer size
* \param[IN] keyID - Key identifier to determine the AES key to be used
* \param[OUT] cmac - Computed cmac
* \retval - Status of the operation
*/
static SecureElementStatus_t ComputeCmac( uint8_t* micBxBuffer, uint8_t* buffer, uint16_t size, KeyIdentifier_t keyID,
uint32_t* cmac )
{
if( ( buffer == NULL ) || ( cmac == NULL ) )
{
return SECURE_ELEMENT_ERROR_NPE;
}
uint8_t Cmac[16];
AES_CMAC_Init( SeNvmCtx.AesCmacCtx );
Key_t* keyItem;
SecureElementStatus_t retval = GetKeyByID( keyID, &keyItem );
if( retval == SECURE_ELEMENT_SUCCESS )
{
AES_CMAC_SetKey( SeNvmCtx.AesCmacCtx, keyItem->KeyValue );
if( micBxBuffer != NULL )
{
AES_CMAC_Update( SeNvmCtx.AesCmacCtx, micBxBuffer, 16 );
}
AES_CMAC_Update( SeNvmCtx.AesCmacCtx, buffer, size );
AES_CMAC_Final( Cmac, SeNvmCtx.AesCmacCtx );
// Bring into the required format
*cmac = ( uint32_t )( ( uint32_t ) Cmac[3] << 24 | ( uint32_t ) Cmac[2] << 16 | ( uint32_t ) Cmac[1] << 8 |
( uint32_t ) Cmac[0] );
}
return retval;
}
/*
* API functions
*/
SecureElementStatus_t SecureElementInit( SecureElementNvmEvent seNvmCtxChanged )
{
// Assign callback
if( seNvmCtxChanged != 0 )
{
SeNvmCtxChanged = seNvmCtxChanged;
}
else
{
SeNvmCtxChanged = DummyCB;
}
#if !defined( SECURE_ELEMENT_PRE_PROVISIONED )
#if( STATIC_DEVICE_EUI == 0 )
// Get a DevEUI from MCU unique ID
SoftSeHalGetUniqueId( SeNvmCtx.DevEui );
#endif
#endif
SeNvmCtxChanged( );
return SECURE_ELEMENT_SUCCESS;
}
SecureElementStatus_t SecureElementRestoreNvmCtx( void* seNvmCtx )
{
// Restore nvm context
if( seNvmCtx != 0 )
{
memcpy1( ( uint8_t* ) &SeNvmCtx, ( uint8_t* ) seNvmCtx, sizeof( SeNvmCtx ) );
return SECURE_ELEMENT_SUCCESS;
}
else
{
return SECURE_ELEMENT_ERROR_NPE;
}
}
void* SecureElementGetNvmCtx( size_t* seNvmCtxSize )
{
*seNvmCtxSize = sizeof( SeNvmCtx );
return &SeNvmCtx;
}
SecureElementStatus_t SecureElementSetKey( KeyIdentifier_t keyID, uint8_t* key )
{
if( key == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
for( uint8_t i = 0; i < NUM_OF_KEYS; i++ )
{
if( SeNvmCtx.KeyList[i].KeyID == keyID )
{
if( ( keyID == MC_KEY_0 ) || ( keyID == MC_KEY_1 ) || ( keyID == MC_KEY_2 ) || ( keyID == MC_KEY_3 ) )
{ // Decrypt the key if its a Mckey
SecureElementStatus_t retval = SECURE_ELEMENT_ERROR;
uint8_t decryptedKey[16] = { 0 };
retval = SecureElementAesEncrypt( key, 16, MC_KE_KEY, decryptedKey );
memcpy1( SeNvmCtx.KeyList[i].KeyValue, decryptedKey, SE_KEY_SIZE );
SeNvmCtxChanged( );
return retval;
}
else
{
memcpy1( SeNvmCtx.KeyList[i].KeyValue, key, SE_KEY_SIZE );
SeNvmCtxChanged( );
return SECURE_ELEMENT_SUCCESS;
}
}
}
return SECURE_ELEMENT_ERROR_INVALID_KEY_ID;
}
SecureElementStatus_t SecureElementComputeAesCmac( uint8_t* micBxBuffer, uint8_t* buffer, uint16_t size,
KeyIdentifier_t keyID, uint32_t* cmac )
{
if( keyID >= LORAMAC_CRYPTO_MULTICAST_KEYS )
{
// Never accept multicast key identifier for cmac computation
return SECURE_ELEMENT_ERROR_INVALID_KEY_ID;
}
return ComputeCmac( micBxBuffer, buffer, size, keyID, cmac );
}
SecureElementStatus_t SecureElementVerifyAesCmac( uint8_t* buffer, uint16_t size, uint32_t expectedCmac,
KeyIdentifier_t keyID )
{
if( buffer == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
SecureElementStatus_t retval = SECURE_ELEMENT_ERROR;
uint32_t compCmac = 0;
retval = ComputeCmac( NULL, buffer, size, keyID, &compCmac );
if( retval != SECURE_ELEMENT_SUCCESS )
{
return retval;
}
if( expectedCmac != compCmac )
{
retval = SECURE_ELEMENT_FAIL_CMAC;
}
return retval;
}
SecureElementStatus_t SecureElementAesEncrypt( uint8_t* buffer, uint16_t size, KeyIdentifier_t keyID,
uint8_t* encBuffer )
{
if( buffer == NULL || encBuffer == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
// Check if the size is divisible by 16,
if( ( size % 16 ) != 0 )
{
return SECURE_ELEMENT_ERROR_BUF_SIZE;
}
memset1( SeNvmCtx.AesContext.ksch, '\0', 240 );
Key_t* pItem;
SecureElementStatus_t retval = GetKeyByID( keyID, &pItem );
if( retval == SECURE_ELEMENT_SUCCESS )
{
aes_set_key( pItem->KeyValue, 16, &SeNvmCtx.AesContext );
uint8_t block = 0;
while( size != 0 )
{
aes_encrypt( &buffer[block], &encBuffer[block], &SeNvmCtx.AesContext );
block = block + 16;
size = size - 16;
}
}
return retval;
}
SecureElementStatus_t SecureElementDeriveAndStoreKey( Version_t version, uint8_t* input, KeyIdentifier_t rootKeyID,
KeyIdentifier_t targetKeyID )
{
if( input == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
SecureElementStatus_t retval = SECURE_ELEMENT_ERROR;
uint8_t key[16] = { 0 };
// In case of MC_KE_KEY, only McRootKey can be used as root key
if( targetKeyID == MC_KE_KEY )
{
if( rootKeyID != MC_ROOT_KEY )
{
return SECURE_ELEMENT_ERROR_INVALID_KEY_ID;
}
}
// Derive key
retval = SecureElementAesEncrypt( input, 16, rootKeyID, key );
if( retval != SECURE_ELEMENT_SUCCESS )
{
return retval;
}
// Store key
retval = SecureElementSetKey( targetKeyID, key );
if( retval != SECURE_ELEMENT_SUCCESS )
{
return retval;
}
return SECURE_ELEMENT_SUCCESS;
}
SecureElementStatus_t SecureElementProcessJoinAccept( JoinReqIdentifier_t joinReqType, uint8_t* joinEui,
uint16_t devNonce, uint8_t* encJoinAccept,
uint8_t encJoinAcceptSize, uint8_t* decJoinAccept,
uint8_t* versionMinor )
{
if( ( encJoinAccept == NULL ) || ( decJoinAccept == NULL ) || ( versionMinor == NULL ) )
{
return SECURE_ELEMENT_ERROR_NPE;
}
// Determine decryption key
KeyIdentifier_t encKeyID = NWK_KEY;
if( joinReqType != JOIN_REQ )
{
encKeyID = J_S_ENC_KEY;
}
memcpy1( decJoinAccept, encJoinAccept, encJoinAcceptSize );
// Decrypt JoinAccept, skip MHDR
if( SecureElementAesEncrypt( encJoinAccept + LORAMAC_MHDR_FIELD_SIZE, encJoinAcceptSize - LORAMAC_MHDR_FIELD_SIZE,
encKeyID, decJoinAccept + LORAMAC_MHDR_FIELD_SIZE ) != SECURE_ELEMENT_SUCCESS )
{
return SECURE_ELEMENT_FAIL_ENCRYPT;
}
*versionMinor = ( ( decJoinAccept[11] & 0x80 ) == 0x80 ) ? 1 : 0;
uint32_t mic = 0;
mic = ( ( uint32_t ) decJoinAccept[encJoinAcceptSize - LORAMAC_MIC_FIELD_SIZE] << 0 );
mic |= ( ( uint32_t ) decJoinAccept[encJoinAcceptSize - LORAMAC_MIC_FIELD_SIZE + 1] << 8 );
mic |= ( ( uint32_t ) decJoinAccept[encJoinAcceptSize - LORAMAC_MIC_FIELD_SIZE + 2] << 16 );
mic |= ( ( uint32_t ) decJoinAccept[encJoinAcceptSize - LORAMAC_MIC_FIELD_SIZE + 3] << 24 );
// - Header buffer to be used for MIC computation
// - LoRaWAN 1.0.x : micHeader = [MHDR(1)]
// - LoRaWAN 1.1.x : micHeader = [JoinReqType(1), JoinEUI(8), DevNonce(2), MHDR(1)]
// Verify mic
if( *versionMinor == 0 )
{
// For LoRaWAN 1.0.x
// cmac = aes128_cmac(NwkKey, MHDR | JoinNonce | NetID | DevAddr | DLSettings | RxDelay | CFList |
// CFListType)
if( SecureElementVerifyAesCmac( decJoinAccept, ( encJoinAcceptSize - LORAMAC_MIC_FIELD_SIZE ), mic, NWK_KEY ) !=
SECURE_ELEMENT_SUCCESS )
{
return SECURE_ELEMENT_FAIL_CMAC;
}
}
#if( USE_LRWAN_1_1_X_CRYPTO == 1 )
else if( *versionMinor == 1 )
{
uint8_t micHeader11[JOIN_ACCEPT_MIC_COMPUTATION_OFFSET] = { 0 };
uint16_t bufItr = 0;
micHeader11[bufItr++] = ( uint8_t ) joinReqType;
memcpyr( micHeader11 + bufItr, joinEui, LORAMAC_JOIN_EUI_FIELD_SIZE );
bufItr += LORAMAC_JOIN_EUI_FIELD_SIZE;
micHeader11[bufItr++] = devNonce & 0xFF;
micHeader11[bufItr++] = ( devNonce >> 8 ) & 0xFF;
// For LoRaWAN 1.1.x and later:
// cmac = aes128_cmac(JSIntKey, JoinReqType | JoinEUI | DevNonce | MHDR | JoinNonce | NetID | DevAddr |
// DLSettings | RxDelay | CFList | CFListType)
// Prepare the msg for integrity check (adding JoinReqType, JoinEUI and DevNonce)
uint8_t localBuffer[LORAMAC_JOIN_ACCEPT_FRAME_MAX_SIZE + JOIN_ACCEPT_MIC_COMPUTATION_OFFSET] = { 0 };
memcpy1( localBuffer, micHeader11, JOIN_ACCEPT_MIC_COMPUTATION_OFFSET );
memcpy1( localBuffer + JOIN_ACCEPT_MIC_COMPUTATION_OFFSET - 1, decJoinAccept, encJoinAcceptSize );
if( SecureElementVerifyAesCmac( localBuffer,
encJoinAcceptSize + JOIN_ACCEPT_MIC_COMPUTATION_OFFSET -
LORAMAC_MHDR_FIELD_SIZE - LORAMAC_MIC_FIELD_SIZE,
mic, J_S_INT_KEY ) != SECURE_ELEMENT_SUCCESS )
{
return SECURE_ELEMENT_FAIL_CMAC;
}
}
#endif
else
{
return SECURE_ELEMENT_ERROR_INVALID_LORAWAM_SPEC_VERSION;
}
return SECURE_ELEMENT_SUCCESS;
}
SecureElementStatus_t SecureElementRandomNumber( uint32_t* randomNum )
{
if( randomNum == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
*randomNum = SoftSeHalGetRandomNumber( );
return SECURE_ELEMENT_SUCCESS;
}
SecureElementStatus_t SecureElementSetDevEui( uint8_t* devEui )
{
if( devEui == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
memcpy1( SeNvmCtx.DevEui, devEui, SE_EUI_SIZE );
SeNvmCtxChanged( );
return SECURE_ELEMENT_SUCCESS;
}
uint8_t* SecureElementGetDevEui( void )
{
return SeNvmCtx.DevEui;
}
SecureElementStatus_t SecureElementSetJoinEui( uint8_t* joinEui )
{
if( joinEui == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
memcpy1( SeNvmCtx.JoinEui, joinEui, SE_EUI_SIZE );
SeNvmCtxChanged( );
return SECURE_ELEMENT_SUCCESS;
}
uint8_t* SecureElementGetJoinEui( void )
{
return SeNvmCtx.JoinEui;
}
SecureElementStatus_t SecureElementSetPin( uint8_t* pin )
{
if( pin == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
memcpy1( SeNvmCtx.Pin, pin, SE_PIN_SIZE );
SeNvmCtxChanged( );
return SECURE_ELEMENT_SUCCESS;
}
uint8_t* SecureElementGetPin( void )
{
return SeNvmCtx.Pin;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_3927_3 |
crossvul-cpp_data_good_3888_0 | /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*
* memcached - memory caching daemon
*
* https://www.memcached.org/
*
* Copyright 2003 Danga Interactive, Inc. All rights reserved.
*
* Use and distribution licensed under the BSD license. See
* the LICENSE file for full text.
*
* Authors:
* Anatoly Vorobey <mellon@pobox.com>
* Brad Fitzpatrick <brad@danga.com>
*/
#include "memcached.h"
#ifdef EXTSTORE
#include "storage.h"
#endif
#include "authfile.h"
#include "restart.h"
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <signal.h>
#include <sys/param.h>
#include <sys/resource.h>
#include <sys/uio.h>
#include <ctype.h>
#include <stdarg.h>
/* some POSIX systems need the following definition
* to get mlockall flags out of sys/mman.h. */
#ifndef _P1003_1B_VISIBLE
#define _P1003_1B_VISIBLE
#endif
#include <pwd.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <sysexits.h>
#include <stddef.h>
#ifdef HAVE_GETOPT_LONG
#include <getopt.h>
#endif
#ifdef TLS
#include "tls.h"
#endif
#if defined(__FreeBSD__)
#include <sys/sysctl.h>
#endif
/*
* forward declarations
*/
static void drive_machine(conn *c);
static int new_socket(struct addrinfo *ai);
static ssize_t tcp_read(conn *arg, void *buf, size_t count);
static ssize_t tcp_sendmsg(conn *arg, struct msghdr *msg, int flags);
static ssize_t tcp_write(conn *arg, void *buf, size_t count);
enum try_read_result {
READ_DATA_RECEIVED,
READ_NO_DATA_RECEIVED,
READ_ERROR, /** an error occurred (on the socket) (or client closed connection) */
READ_MEMORY_ERROR /** failed to allocate more memory */
};
static int try_read_command_negotiate(conn *c);
static int try_read_command_udp(conn *c);
static int try_read_command_binary(conn *c);
static int try_read_command_ascii(conn *c);
static int try_read_command_asciiauth(conn *c);
static enum try_read_result try_read_network(conn *c);
static enum try_read_result try_read_udp(conn *c);
static void conn_set_state(conn *c, enum conn_states state);
static int start_conn_timeout_thread();
static mc_resp* resp_finish(conn *c, mc_resp *resp);
/* stats */
static void stats_init(void);
static void server_stats(ADD_STAT add_stats, conn *c);
static void process_stat_settings(ADD_STAT add_stats, void *c);
static void conn_to_str(const conn *c, char *addr, char *svr_addr);
/** Return a datum for stats in binary protocol */
static bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c);
/* defaults */
static void settings_init(void);
/* event handling, network IO */
static void event_handler(const int fd, const short which, void *arg);
static void conn_close(conn *c);
static void conn_init(void);
static bool update_event(conn *c, const int new_flags);
static void complete_nread(conn *c);
static void process_command(conn *c, char *command);
static void write_and_free(conn *c, char *buf, int bytes);
static void write_bin_error(conn *c, protocol_binary_response_status err,
const char *errstr, int swallow);
static void write_bin_miss_response(conn *c, char *key, size_t nkey);
#ifdef EXTSTORE
static void _get_extstore_cb(void *e, obj_io *io, int ret);
static inline int _get_extstore(conn *c, item *it, mc_resp *resp);
#endif
static void conn_free(conn *c);
/** binprot handlers **/
static void process_bin_flush(conn *c, char *extbuf);
static void process_bin_append_prepend(conn *c);
static void process_bin_update(conn *c, char *extbuf);
static void process_bin_get_or_touch(conn *c, char *extbuf);
static void process_bin_delete(conn *c);
static void complete_incr_bin(conn *c, char *extbuf);
static void process_bin_stat(conn *c);
static void process_bin_sasl_auth(conn *c);
/** exported globals **/
struct stats stats;
struct stats_state stats_state;
struct settings settings;
time_t process_started; /* when the process was started */
conn **conns;
struct slab_rebalance slab_rebal;
volatile int slab_rebalance_signal;
#ifdef EXTSTORE
/* hoping this is temporary; I'd prefer to cut globals, but will complete this
* battle another day.
*/
void *ext_storage = NULL;
#endif
/** file scope variables **/
static conn *listen_conn = NULL;
static int max_fds;
static struct event_base *main_base;
enum transmit_result {
TRANSMIT_COMPLETE, /** All done writing. */
TRANSMIT_INCOMPLETE, /** More data remaining to write. */
TRANSMIT_SOFT_ERROR, /** Can't write any more right now. */
TRANSMIT_HARD_ERROR /** Can't write (c->state is set to conn_closing) */
};
/* Default methods to read from/ write to a socket */
ssize_t tcp_read(conn *c, void *buf, size_t count) {
assert (c != NULL);
return read(c->sfd, buf, count);
}
ssize_t tcp_sendmsg(conn *c, struct msghdr *msg, int flags) {
assert (c != NULL);
return sendmsg(c->sfd, msg, flags);
}
ssize_t tcp_write(conn *c, void *buf, size_t count) {
assert (c != NULL);
return write(c->sfd, buf, count);
}
static enum transmit_result transmit(conn *c);
/* This reduces the latency without adding lots of extra wiring to be able to
* notify the listener thread of when to listen again.
* Also, the clock timer could be broken out into its own thread and we
* can block the listener via a condition.
*/
static volatile bool allow_new_conns = true;
static bool stop_main_loop = false;
static struct event maxconnsevent;
static void maxconns_handler(const int fd, const short which, void *arg) {
struct timeval t = {.tv_sec = 0, .tv_usec = 10000};
if (fd == -42 || allow_new_conns == false) {
/* reschedule in 10ms if we need to keep polling */
evtimer_set(&maxconnsevent, maxconns_handler, 0);
event_base_set(main_base, &maxconnsevent);
evtimer_add(&maxconnsevent, &t);
} else {
evtimer_del(&maxconnsevent);
accept_new_conns(true);
}
}
#define REALTIME_MAXDELTA 60*60*24*30
/* Negative exptimes can underflow and end up immortal. realtime() will
immediately expire values that are greater than REALTIME_MAXDELTA, but less
than process_started, so lets aim for that. */
#define EXPTIME_TO_POSITIVE_TIME(exptime) (exptime < 0) ? \
REALTIME_MAXDELTA + 1 : exptime
/*
* given time value that's either unix time or delta from current unix time, return
* unix time. Use the fact that delta can't exceed one month (and real time value can't
* be that low).
*/
static rel_time_t realtime(const time_t exptime) {
/* no. of seconds in 30 days - largest possible delta exptime */
if (exptime == 0) return 0; /* 0 means never expire */
if (exptime > REALTIME_MAXDELTA) {
/* if item expiration is at/before the server started, give it an
expiration time of 1 second after the server started.
(because 0 means don't expire). without this, we'd
underflow and wrap around to some large value way in the
future, effectively making items expiring in the past
really expiring never */
if (exptime <= process_started)
return (rel_time_t)1;
return (rel_time_t)(exptime - process_started);
} else {
return (rel_time_t)(exptime + current_time);
}
}
static void stats_init(void) {
memset(&stats, 0, sizeof(struct stats));
memset(&stats_state, 0, sizeof(struct stats_state));
stats_state.accepting_conns = true; /* assuming we start in this state. */
/* make the time we started always be 2 seconds before we really
did, so time(0) - time.started is never zero. if so, things
like 'settings.oldest_live' which act as booleans as well as
values are now false in boolean context... */
process_started = time(0) - ITEM_UPDATE_INTERVAL - 2;
stats_prefix_init(settings.prefix_delimiter);
}
static void stats_reset(void) {
STATS_LOCK();
memset(&stats, 0, sizeof(struct stats));
stats_prefix_clear();
STATS_UNLOCK();
threadlocal_stats_reset();
item_stats_reset();
}
static void settings_init(void) {
settings.use_cas = true;
settings.access = 0700;
settings.port = 11211;
settings.udpport = 0;
#ifdef TLS
settings.ssl_enabled = false;
settings.ssl_ctx = NULL;
settings.ssl_chain_cert = NULL;
settings.ssl_key = NULL;
settings.ssl_verify_mode = SSL_VERIFY_NONE;
settings.ssl_keyformat = SSL_FILETYPE_PEM;
settings.ssl_ciphers = NULL;
settings.ssl_ca_cert = NULL;
settings.ssl_last_cert_refresh_time = current_time;
settings.ssl_wbuf_size = 16 * 1024; // default is 16KB (SSL max frame size is 17KB)
#endif
/* By default this string should be NULL for getaddrinfo() */
settings.inter = NULL;
settings.maxbytes = 64 * 1024 * 1024; /* default is 64MB */
settings.maxconns = 1024; /* to limit connections-related memory to about 5MB */
settings.verbose = 0;
settings.oldest_live = 0;
settings.oldest_cas = 0; /* supplements accuracy of oldest_live */
settings.evict_to_free = 1; /* push old items out of cache when memory runs out */
settings.socketpath = NULL; /* by default, not using a unix socket */
settings.auth_file = NULL; /* by default, not using ASCII authentication tokens */
settings.factor = 1.25;
settings.chunk_size = 48; /* space for a modest key and value */
settings.num_threads = 4; /* N workers */
settings.num_threads_per_udp = 0;
settings.prefix_delimiter = ':';
settings.detail_enabled = 0;
settings.reqs_per_event = 20;
settings.backlog = 1024;
settings.binding_protocol = negotiating_prot;
settings.item_size_max = 1024 * 1024; /* The famous 1MB upper limit. */
settings.slab_page_size = 1024 * 1024; /* chunks are split from 1MB pages. */
settings.slab_chunk_size_max = settings.slab_page_size / 2;
settings.sasl = false;
settings.maxconns_fast = true;
settings.lru_crawler = false;
settings.lru_crawler_sleep = 100;
settings.lru_crawler_tocrawl = 0;
settings.lru_maintainer_thread = false;
settings.lru_segmented = true;
settings.hot_lru_pct = 20;
settings.warm_lru_pct = 40;
settings.hot_max_factor = 0.2;
settings.warm_max_factor = 2.0;
settings.temp_lru = false;
settings.temporary_ttl = 61;
settings.idle_timeout = 0; /* disabled */
settings.hashpower_init = 0;
settings.slab_reassign = true;
settings.slab_automove = 1;
settings.slab_automove_ratio = 0.8;
settings.slab_automove_window = 30;
settings.shutdown_command = false;
settings.tail_repair_time = TAIL_REPAIR_TIME_DEFAULT;
settings.flush_enabled = true;
settings.dump_enabled = true;
settings.crawls_persleep = 1000;
settings.logger_watcher_buf_size = LOGGER_WATCHER_BUF_SIZE;
settings.logger_buf_size = LOGGER_BUF_SIZE;
settings.drop_privileges = false;
settings.watch_enabled = true;
settings.resp_obj_mem_limit = 0;
settings.read_buf_mem_limit = 0;
#ifdef MEMCACHED_DEBUG
settings.relaxed_privileges = false;
#endif
}
extern pthread_mutex_t conn_lock;
/* Connection timeout thread bits */
static pthread_t conn_timeout_tid;
static int do_run_conn_timeout_thread;
#define CONNS_PER_SLICE 100
#define TIMEOUT_MSG_SIZE (1 + sizeof(int))
static void *conn_timeout_thread(void *arg) {
int i;
conn *c;
char buf[TIMEOUT_MSG_SIZE];
rel_time_t oldest_last_cmd;
int sleep_time;
useconds_t timeslice = 1000000 / (max_fds / CONNS_PER_SLICE);
while(do_run_conn_timeout_thread) {
if (settings.verbose > 2)
fprintf(stderr, "idle timeout thread at top of connection list\n");
oldest_last_cmd = current_time;
for (i = 0; i < max_fds; i++) {
if ((i % CONNS_PER_SLICE) == 0) {
if (settings.verbose > 2)
fprintf(stderr, "idle timeout thread sleeping for %ulus\n",
(unsigned int)timeslice);
usleep(timeslice);
}
if (!conns[i])
continue;
c = conns[i];
if (!IS_TCP(c->transport))
continue;
if (c->state != conn_new_cmd && c->state != conn_read)
continue;
if ((current_time - c->last_cmd_time) > settings.idle_timeout) {
buf[0] = 't';
memcpy(&buf[1], &i, sizeof(int));
if (write(c->thread->notify_send_fd, buf, TIMEOUT_MSG_SIZE)
!= TIMEOUT_MSG_SIZE)
perror("Failed to write timeout to notify pipe");
} else {
if (c->last_cmd_time < oldest_last_cmd)
oldest_last_cmd = c->last_cmd_time;
}
}
/* This is the soonest we could have another connection time out */
sleep_time = settings.idle_timeout - (current_time - oldest_last_cmd) + 1;
if (sleep_time <= 0)
sleep_time = 1;
if (settings.verbose > 2)
fprintf(stderr,
"idle timeout thread finished pass, sleeping for %ds\n",
sleep_time);
usleep((useconds_t) sleep_time * 1000000);
}
return NULL;
}
static int start_conn_timeout_thread() {
int ret;
if (settings.idle_timeout == 0)
return -1;
do_run_conn_timeout_thread = 1;
if ((ret = pthread_create(&conn_timeout_tid, NULL,
conn_timeout_thread, NULL)) != 0) {
fprintf(stderr, "Can't create idle connection timeout thread: %s\n",
strerror(ret));
return -1;
}
return 0;
}
int stop_conn_timeout_thread(void) {
if (!do_run_conn_timeout_thread)
return -1;
do_run_conn_timeout_thread = 0;
pthread_join(conn_timeout_tid, NULL);
return 0;
}
/*
* read buffer cache helper functions
*/
static void rbuf_release(conn *c) {
if (c->rbuf != NULL && c->rbytes == 0 && !IS_UDP(c->transport)) {
if (c->rbuf_malloced) {
free(c->rbuf);
c->rbuf_malloced = false;
} else {
do_cache_free(c->thread->rbuf_cache, c->rbuf);
}
c->rsize = 0;
c->rbuf = NULL;
c->rcurr = NULL;
}
}
static bool rbuf_alloc(conn *c) {
if (c->rbuf == NULL) {
c->rbuf = do_cache_alloc(c->thread->rbuf_cache);
if (!c->rbuf) {
THR_STATS_LOCK(c);
c->thread->stats.read_buf_oom++;
THR_STATS_UNLOCK(c);
return false;
}
c->rsize = READ_BUFFER_SIZE;
c->rcurr = c->rbuf;
}
return true;
}
// Just for handling huge ASCII multigets.
// The previous system was essentially the same; realloc'ing until big enough,
// then realloc'ing back down after the request finished.
static bool rbuf_switch_to_malloc(conn *c) {
// Might as well start with x2 and work from there.
size_t size = c->rsize * 2;
char *tmp = malloc(size);
if (!tmp)
return false;
do_cache_free(c->thread->rbuf_cache, c->rbuf);
memcpy(tmp, c->rcurr, c->rbytes);
c->rcurr = c->rbuf = tmp;
c->rsize = size;
c->rbuf_malloced = true;
return true;
}
/*
* Initializes the connections array. We don't actually allocate connection
* structures until they're needed, so as to avoid wasting memory when the
* maximum connection count is much higher than the actual number of
* connections.
*
* This does end up wasting a few pointers' worth of memory for FDs that are
* used for things other than connections, but that's worth it in exchange for
* being able to directly index the conns array by FD.
*/
static void conn_init(void) {
/* We're unlikely to see an FD much higher than maxconns. */
int next_fd = dup(1);
if (next_fd < 0) {
perror("Failed to duplicate file descriptor\n");
exit(1);
}
int headroom = 10; /* account for extra unexpected open FDs */
struct rlimit rl;
max_fds = settings.maxconns + headroom + next_fd;
/* But if possible, get the actual highest FD we can possibly ever see. */
if (getrlimit(RLIMIT_NOFILE, &rl) == 0) {
max_fds = rl.rlim_max;
} else {
fprintf(stderr, "Failed to query maximum file descriptor; "
"falling back to maxconns\n");
}
close(next_fd);
if ((conns = calloc(max_fds, sizeof(conn *))) == NULL) {
fprintf(stderr, "Failed to allocate connection structures\n");
/* This is unrecoverable so bail out early. */
exit(1);
}
}
static const char *prot_text(enum protocol prot) {
char *rv = "unknown";
switch(prot) {
case ascii_prot:
rv = "ascii";
break;
case binary_prot:
rv = "binary";
break;
case negotiating_prot:
rv = "auto-negotiate";
break;
}
return rv;
}
void conn_close_idle(conn *c) {
if (settings.idle_timeout > 0 &&
(current_time - c->last_cmd_time) > settings.idle_timeout) {
if (c->state != conn_new_cmd && c->state != conn_read) {
if (settings.verbose > 1)
fprintf(stderr,
"fd %d wants to timeout, but isn't in read state", c->sfd);
return;
}
if (settings.verbose > 1)
fprintf(stderr, "Closing idle fd %d\n", c->sfd);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.idle_kicks++;
pthread_mutex_unlock(&c->thread->stats.mutex);
conn_set_state(c, conn_closing);
drive_machine(c);
}
}
/* bring conn back from a sidethread. could have had its event base moved. */
void conn_worker_readd(conn *c) {
c->ev_flags = EV_READ | EV_PERSIST;
event_set(&c->event, c->sfd, c->ev_flags, event_handler, (void *)c);
event_base_set(c->thread->base, &c->event);
// TODO: call conn_cleanup/fail/etc
if (event_add(&c->event, 0) == -1) {
perror("event_add");
}
// side thread wanted us to close immediately.
if (c->state == conn_closing) {
drive_machine(c);
return;
}
c->state = conn_new_cmd;
#ifdef EXTSTORE
// If we had IO objects, process
if (c->io_wraplist) {
//assert(c->io_wrapleft == 0); // assert no more to process
conn_set_state(c, conn_mwrite);
drive_machine(c);
}
#endif
}
conn *conn_new(const int sfd, enum conn_states init_state,
const int event_flags,
const int read_buffer_size, enum network_transport transport,
struct event_base *base, void *ssl) {
conn *c;
assert(sfd >= 0 && sfd < max_fds);
c = conns[sfd];
if (NULL == c) {
if (!(c = (conn *)calloc(1, sizeof(conn)))) {
STATS_LOCK();
stats.malloc_fails++;
STATS_UNLOCK();
fprintf(stderr, "Failed to allocate connection object\n");
return NULL;
}
MEMCACHED_CONN_CREATE(c);
c->read = NULL;
c->sendmsg = NULL;
c->write = NULL;
c->rbuf = NULL;
c->rsize = read_buffer_size;
// UDP connections use a persistent static buffer.
if (c->rsize) {
c->rbuf = (char *)malloc((size_t)c->rsize);
}
if (c->rsize && c->rbuf == NULL) {
conn_free(c);
STATS_LOCK();
stats.malloc_fails++;
STATS_UNLOCK();
fprintf(stderr, "Failed to allocate buffers for connection\n");
return NULL;
}
STATS_LOCK();
stats_state.conn_structs++;
STATS_UNLOCK();
c->sfd = sfd;
conns[sfd] = c;
}
c->transport = transport;
c->protocol = settings.binding_protocol;
/* unix socket mode doesn't need this, so zeroed out. but why
* is this done for every command? presumably for UDP
* mode. */
if (!settings.socketpath) {
c->request_addr_size = sizeof(c->request_addr);
} else {
c->request_addr_size = 0;
}
if (transport == tcp_transport && init_state == conn_new_cmd) {
if (getpeername(sfd, (struct sockaddr *) &c->request_addr,
&c->request_addr_size)) {
perror("getpeername");
memset(&c->request_addr, 0, sizeof(c->request_addr));
}
}
if (settings.verbose > 1) {
if (init_state == conn_listening) {
fprintf(stderr, "<%d server listening (%s)\n", sfd,
prot_text(c->protocol));
} else if (IS_UDP(transport)) {
fprintf(stderr, "<%d server listening (udp)\n", sfd);
} else if (c->protocol == negotiating_prot) {
fprintf(stderr, "<%d new auto-negotiating client connection\n",
sfd);
} else if (c->protocol == ascii_prot) {
fprintf(stderr, "<%d new ascii client connection.\n", sfd);
} else if (c->protocol == binary_prot) {
fprintf(stderr, "<%d new binary client connection.\n", sfd);
} else {
fprintf(stderr, "<%d new unknown (%d) client connection\n",
sfd, c->protocol);
assert(false);
}
}
#ifdef TLS
c->ssl = NULL;
c->ssl_wbuf = NULL;
c->ssl_enabled = false;
#endif
c->state = init_state;
c->rlbytes = 0;
c->cmd = -1;
c->rbytes = 0;
c->rcurr = c->rbuf;
c->ritem = 0;
c->rbuf_malloced = false;
c->sasl_started = false;
c->set_stale = false;
c->mset_res = false;
c->close_after_write = false;
c->last_cmd_time = current_time; /* initialize for idle kicker */
#ifdef EXTSTORE
c->io_wraplist = NULL;
c->io_wrapleft = 0;
#endif
c->item = 0;
c->noreply = false;
#ifdef TLS
if (ssl) {
c->ssl = (SSL*)ssl;
c->read = ssl_read;
c->sendmsg = ssl_sendmsg;
c->write = ssl_write;
c->ssl_enabled = true;
SSL_set_info_callback(c->ssl, ssl_callback);
} else
#else
// This must be NULL if TLS is not enabled.
assert(ssl == NULL);
#endif
{
c->read = tcp_read;
c->sendmsg = tcp_sendmsg;
c->write = tcp_write;
}
if (IS_UDP(transport)) {
c->try_read_command = try_read_command_udp;
} else {
switch (c->protocol) {
case ascii_prot:
if (settings.auth_file == NULL) {
c->authenticated = true;
c->try_read_command = try_read_command_ascii;
} else {
c->authenticated = false;
c->try_read_command = try_read_command_asciiauth;
}
break;
case binary_prot:
// binprot handles its own authentication via SASL parsing.
c->authenticated = false;
c->try_read_command = try_read_command_binary;
break;
case negotiating_prot:
c->try_read_command = try_read_command_negotiate;
break;
}
}
event_set(&c->event, sfd, event_flags, event_handler, (void *)c);
event_base_set(base, &c->event);
c->ev_flags = event_flags;
if (event_add(&c->event, 0) == -1) {
perror("event_add");
return NULL;
}
STATS_LOCK();
stats_state.curr_conns++;
stats.total_conns++;
STATS_UNLOCK();
MEMCACHED_CONN_ALLOCATE(c->sfd);
return c;
}
#ifdef EXTSTORE
static void recache_or_free(conn *c, io_wrap *wrap) {
item *it;
it = (item *)wrap->io.buf;
bool do_free = true;
if (wrap->active) {
// If request never dispatched, free the read buffer but leave the
// item header alone.
do_free = false;
size_t ntotal = ITEM_ntotal(wrap->hdr_it);
slabs_free(it, ntotal, slabs_clsid(ntotal));
c->io_wrapleft--;
assert(c->io_wrapleft >= 0);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.get_aborted_extstore++;
pthread_mutex_unlock(&c->thread->stats.mutex);
} else if (wrap->miss) {
// If request was ultimately a miss, unlink the header.
do_free = false;
size_t ntotal = ITEM_ntotal(wrap->hdr_it);
item_unlink(wrap->hdr_it);
slabs_free(it, ntotal, slabs_clsid(ntotal));
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.miss_from_extstore++;
if (wrap->badcrc)
c->thread->stats.badcrc_from_extstore++;
pthread_mutex_unlock(&c->thread->stats.mutex);
} else if (settings.ext_recache_rate) {
// hashvalue is cuddled during store
uint32_t hv = (uint32_t)it->time;
// opt to throw away rather than wait on a lock.
void *hold_lock = item_trylock(hv);
if (hold_lock != NULL) {
item *h_it = wrap->hdr_it;
uint8_t flags = ITEM_LINKED|ITEM_FETCHED|ITEM_ACTIVE;
// Item must be recently hit at least twice to recache.
if (((h_it->it_flags & flags) == flags) &&
h_it->time > current_time - ITEM_UPDATE_INTERVAL &&
c->recache_counter++ % settings.ext_recache_rate == 0) {
do_free = false;
// In case it's been updated.
it->exptime = h_it->exptime;
it->it_flags &= ~ITEM_LINKED;
it->refcount = 0;
it->h_next = NULL; // might not be necessary.
STORAGE_delete(c->thread->storage, h_it);
item_replace(h_it, it, hv);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.recache_from_extstore++;
pthread_mutex_unlock(&c->thread->stats.mutex);
}
}
if (hold_lock)
item_trylock_unlock(hold_lock);
}
if (do_free)
slabs_free(it, ITEM_ntotal(it), ITEM_clsid(it));
wrap->io.buf = NULL; // sanity.
wrap->io.next = NULL;
wrap->next = NULL;
wrap->active = false;
// TODO: reuse lock and/or hv.
item_remove(wrap->hdr_it);
}
#endif
static void conn_release_items(conn *c) {
assert(c != NULL);
if (c->item) {
item_remove(c->item);
c->item = 0;
}
#ifdef EXTSTORE
if (c->io_wraplist) {
io_wrap *tmp = c->io_wraplist;
while (tmp) {
io_wrap *next = tmp->next;
recache_or_free(c, tmp);
// malloc'ed iovec list used for chunked extstore fetches.
if (tmp->io.iov) {
free(tmp->io.iov);
tmp->io.iov = NULL;
}
do_cache_free(c->thread->io_cache, tmp); // lockless
tmp = next;
}
c->io_wraplist = NULL;
}
#endif
// Cull any unsent responses.
if (c->resp_head) {
mc_resp *resp = c->resp_head;
// r_f() handles the chain maintenance.
while (resp) {
// temporary by default. hide behind a debug flag in the future:
// double free detection. Transmit loops can drop out early, but
// here we could infinite loop.
if (resp->free) {
fprintf(stderr, "ERROR: double free detected during conn_release_items(): [%d] [%s]\n",
c->sfd, c->protocol == binary_prot ? "binary" : "ascii");
// Since this is a critical failure, just leak the memory.
// If these errors are seen, an abort() can be used instead.
c->resp_head = NULL;
c->resp = NULL;
break;
}
resp = resp_finish(c, resp);
}
}
}
static void conn_cleanup(conn *c) {
assert(c != NULL);
conn_release_items(c);
if (c->sasl_conn) {
assert(settings.sasl);
sasl_dispose(&c->sasl_conn);
c->sasl_conn = NULL;
}
if (IS_UDP(c->transport)) {
conn_set_state(c, conn_read);
}
}
/*
* Frees a connection.
*/
void conn_free(conn *c) {
if (c) {
assert(c != NULL);
assert(c->sfd >= 0 && c->sfd < max_fds);
MEMCACHED_CONN_DESTROY(c);
conns[c->sfd] = NULL;
if (c->rbuf)
free(c->rbuf);
#ifdef TLS
if (c->ssl_wbuf)
c->ssl_wbuf = NULL;
#endif
free(c);
}
}
static void conn_close(conn *c) {
assert(c != NULL);
/* delete the event, the socket and the conn */
event_del(&c->event);
if (settings.verbose > 1)
fprintf(stderr, "<%d connection closed.\n", c->sfd);
conn_cleanup(c);
// force release of read buffer.
if (c->thread) {
c->rbytes = 0;
rbuf_release(c);
}
MEMCACHED_CONN_RELEASE(c->sfd);
conn_set_state(c, conn_closed);
#ifdef TLS
if (c->ssl) {
SSL_shutdown(c->ssl);
SSL_free(c->ssl);
}
#endif
close(c->sfd);
pthread_mutex_lock(&conn_lock);
allow_new_conns = true;
pthread_mutex_unlock(&conn_lock);
STATS_LOCK();
stats_state.curr_conns--;
STATS_UNLOCK();
return;
}
/**
* Convert a state name to a human readable form.
*/
static const char *state_text(enum conn_states state) {
const char* const statenames[] = { "conn_listening",
"conn_new_cmd",
"conn_waiting",
"conn_read",
"conn_parse_cmd",
"conn_write",
"conn_nread",
"conn_swallow",
"conn_closing",
"conn_mwrite",
"conn_closed",
"conn_watch" };
return statenames[state];
}
/*
* Sets a connection's current state in the state machine. Any special
* processing that needs to happen on certain state transitions can
* happen here.
*/
static void conn_set_state(conn *c, enum conn_states state) {
assert(c != NULL);
assert(state >= conn_listening && state < conn_max_state);
if (state != c->state) {
if (settings.verbose > 2) {
fprintf(stderr, "%d: going from %s to %s\n",
c->sfd, state_text(c->state),
state_text(state));
}
if (state == conn_write || state == conn_mwrite) {
MEMCACHED_PROCESS_COMMAND_END(c->sfd, c->resp->wbuf, c->resp->wbytes);
}
c->state = state;
}
}
/*
* response object helper functions
*/
static void resp_reset(mc_resp *resp) {
if (resp->item) {
item_remove(resp->item);
resp->item = NULL;
}
if (resp->write_and_free) {
free(resp->write_and_free);
resp->write_and_free = NULL;
}
resp->wbytes = 0;
resp->tosend = 0;
resp->iovcnt = 0;
resp->chunked_data_iov = 0;
resp->chunked_total = 0;
resp->skip = false;
}
static void resp_add_iov(mc_resp *resp, const void *buf, int len) {
assert(resp->iovcnt < MC_RESP_IOVCOUNT);
int x = resp->iovcnt;
resp->iov[x].iov_base = (void *)buf;
resp->iov[x].iov_len = len;
resp->iovcnt++;
resp->tosend += len;
}
// Notes that an IOV should be handled as a chunked item header.
// TODO: I'm hoping this isn't a permanent abstraction while I learn what the
// API should be.
static void resp_add_chunked_iov(mc_resp *resp, const void *buf, int len) {
resp->chunked_data_iov = resp->iovcnt;
resp->chunked_total = len;
resp_add_iov(resp, buf, len);
}
static bool resp_start(conn *c) {
mc_resp *resp = do_cache_alloc(c->thread->resp_cache);
if (!resp) {
THR_STATS_LOCK(c);
c->thread->stats.response_obj_oom++;
THR_STATS_UNLOCK(c);
return false;
}
// FIXME: make wbuf indirect or use offsetof to zero up until wbuf
memset(resp, 0, sizeof(*resp));
if (!c->resp_head) {
c->resp_head = resp;
}
if (!c->resp) {
c->resp = resp;
} else {
c->resp->next = resp;
c->resp = resp;
}
if (IS_UDP(c->transport)) {
// need to hold on to some data for async responses.
c->resp->request_id = c->request_id;
c->resp->request_addr = c->request_addr;
c->resp->request_addr_size = c->request_addr_size;
}
return true;
}
// returns next response in chain.
static mc_resp* resp_finish(conn *c, mc_resp *resp) {
mc_resp *next = resp->next;
if (resp->item) {
// TODO: cache hash value in resp obj?
item_remove(resp->item);
resp->item = NULL;
}
if (resp->write_and_free) {
free(resp->write_and_free);
}
if (c->resp_head == resp) {
c->resp_head = next;
}
if (c->resp == resp) {
c->resp = NULL;
}
resp->free = true;
do_cache_free(c->thread->resp_cache, resp);
return next;
}
// tells if connection has a depth of response objects to process.
static bool resp_has_stack(conn *c) {
return c->resp_head->next != NULL ? true : false;
}
static void out_string(conn *c, const char *str) {
size_t len;
mc_resp *resp = c->resp;
assert(c != NULL);
// if response was original filled with something, but we're now writing
// out an error or similar, have to reset the object first.
// TODO: since this is often redundant with allocation, how many callers
// are actually requiring it be reset? Can we fast test by just looking at
// tosend and reset if nonzero?
resp_reset(resp);
if (c->noreply) {
// TODO: just invalidate the response since nothing's been attempted
// to send yet?
resp->skip = true;
if (settings.verbose > 1)
fprintf(stderr, ">%d NOREPLY %s\n", c->sfd, str);
conn_set_state(c, conn_new_cmd);
return;
}
if (settings.verbose > 1)
fprintf(stderr, ">%d %s\n", c->sfd, str);
// Fill response object with static string.
len = strlen(str);
if ((len + 2) > WRITE_BUFFER_SIZE) {
/* ought to be always enough. just fail for simplicity */
str = "SERVER_ERROR output line too long";
len = strlen(str);
}
memcpy(resp->wbuf, str, len);
memcpy(resp->wbuf + len, "\r\n", 2);
resp_add_iov(resp, resp->wbuf, len + 2);
conn_set_state(c, conn_new_cmd);
return;
}
// For metaget-style ASCII commands. Ignores noreply, ensuring clients see
// protocol level errors.
static void out_errstring(conn *c, const char *str) {
c->noreply = false;
out_string(c, str);
}
/*
* Outputs a protocol-specific "out of memory" error. For ASCII clients,
* this is equivalent to out_string().
*/
static void out_of_memory(conn *c, char *ascii_error) {
const static char error_prefix[] = "SERVER_ERROR ";
const static int error_prefix_len = sizeof(error_prefix) - 1;
if (c->protocol == binary_prot) {
/* Strip off the generic error prefix; it's irrelevant in binary */
if (!strncmp(ascii_error, error_prefix, error_prefix_len)) {
ascii_error += error_prefix_len;
}
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, ascii_error, 0);
} else {
out_string(c, ascii_error);
}
}
/*
* we get here after reading the value in set/add/replace commands. The command
* has been stored in c->cmd, and the item is ready in c->item.
*/
static void complete_nread_ascii(conn *c) {
assert(c != NULL);
item *it = c->item;
int comm = c->cmd;
enum store_item_type ret;
bool is_valid = false;
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(it)].set_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
if ((it->it_flags & ITEM_CHUNKED) == 0) {
if (strncmp(ITEM_data(it) + it->nbytes - 2, "\r\n", 2) == 0) {
is_valid = true;
}
} else {
char buf[2];
/* should point to the final item chunk */
item_chunk *ch = (item_chunk *) c->ritem;
assert(ch->used != 0);
/* :( We need to look at the last two bytes. This could span two
* chunks.
*/
if (ch->used > 1) {
buf[0] = ch->data[ch->used - 2];
buf[1] = ch->data[ch->used - 1];
} else {
assert(ch->prev);
assert(ch->used == 1);
buf[0] = ch->prev->data[ch->prev->used - 1];
buf[1] = ch->data[ch->used - 1];
}
if (strncmp(buf, "\r\n", 2) == 0) {
is_valid = true;
} else {
assert(1 == 0);
}
}
if (!is_valid) {
// metaset mode always returns errors.
if (c->mset_res) {
c->noreply = false;
}
out_string(c, "CLIENT_ERROR bad data chunk");
} else {
ret = store_item(it, comm, c);
#ifdef ENABLE_DTRACE
uint64_t cas = ITEM_get_cas(it);
switch (c->cmd) {
case NREAD_ADD:
MEMCACHED_COMMAND_ADD(c->sfd, ITEM_key(it), it->nkey,
(ret == 1) ? it->nbytes : -1, cas);
break;
case NREAD_REPLACE:
MEMCACHED_COMMAND_REPLACE(c->sfd, ITEM_key(it), it->nkey,
(ret == 1) ? it->nbytes : -1, cas);
break;
case NREAD_APPEND:
MEMCACHED_COMMAND_APPEND(c->sfd, ITEM_key(it), it->nkey,
(ret == 1) ? it->nbytes : -1, cas);
break;
case NREAD_PREPEND:
MEMCACHED_COMMAND_PREPEND(c->sfd, ITEM_key(it), it->nkey,
(ret == 1) ? it->nbytes : -1, cas);
break;
case NREAD_SET:
MEMCACHED_COMMAND_SET(c->sfd, ITEM_key(it), it->nkey,
(ret == 1) ? it->nbytes : -1, cas);
break;
case NREAD_CAS:
MEMCACHED_COMMAND_CAS(c->sfd, ITEM_key(it), it->nkey, it->nbytes,
cas);
break;
}
#endif
if (c->mset_res) {
// Replace the status code in the response.
// Rest was prepared during mset parsing.
mc_resp *resp = c->resp;
conn_set_state(c, conn_new_cmd);
switch (ret) {
case STORED:
memcpy(resp->wbuf, "OK ", 3);
// Only place noreply is used for meta cmds is a nominal response.
if (c->noreply) {
resp->skip = true;
}
break;
case EXISTS:
memcpy(resp->wbuf, "EX ", 3);
break;
case NOT_FOUND:
memcpy(resp->wbuf, "NF ", 3);
break;
case NOT_STORED:
memcpy(resp->wbuf, "NS ", 3);
break;
default:
c->noreply = false;
out_string(c, "SERVER_ERROR Unhandled storage type.");
}
} else {
switch (ret) {
case STORED:
out_string(c, "STORED");
break;
case EXISTS:
out_string(c, "EXISTS");
break;
case NOT_FOUND:
out_string(c, "NOT_FOUND");
break;
case NOT_STORED:
out_string(c, "NOT_STORED");
break;
default:
out_string(c, "SERVER_ERROR Unhandled storage type.");
}
}
}
c->set_stale = false; /* force flag to be off just in case */
c->mset_res = false;
item_remove(c->item); /* release the c->item reference */
c->item = 0;
}
/**
* get a pointer to the key in this request
*/
static char* binary_get_key(conn *c) {
return c->rcurr - (c->binary_header.request.keylen);
}
static void add_bin_header(conn *c, uint16_t err, uint8_t hdr_len, uint16_t key_len, uint32_t body_len) {
protocol_binary_response_header* header;
mc_resp *resp = c->resp;
assert(c);
resp_reset(resp);
header = (protocol_binary_response_header *)resp->wbuf;
header->response.magic = (uint8_t)PROTOCOL_BINARY_RES;
header->response.opcode = c->binary_header.request.opcode;
header->response.keylen = (uint16_t)htons(key_len);
header->response.extlen = (uint8_t)hdr_len;
header->response.datatype = (uint8_t)PROTOCOL_BINARY_RAW_BYTES;
header->response.status = (uint16_t)htons(err);
header->response.bodylen = htonl(body_len);
header->response.opaque = c->opaque;
header->response.cas = htonll(c->cas);
if (settings.verbose > 1) {
int ii;
fprintf(stderr, ">%d Writing bin response:", c->sfd);
for (ii = 0; ii < sizeof(header->bytes); ++ii) {
if (ii % 4 == 0) {
fprintf(stderr, "\n>%d ", c->sfd);
}
fprintf(stderr, " 0x%02x", header->bytes[ii]);
}
fprintf(stderr, "\n");
}
resp->wbytes = sizeof(header->response);
resp_add_iov(resp, resp->wbuf, resp->wbytes);
}
/**
* Writes a binary error response. If errstr is supplied, it is used as the
* error text; otherwise a generic description of the error status code is
* included.
*/
static void write_bin_error(conn *c, protocol_binary_response_status err,
const char *errstr, int swallow) {
size_t len;
if (!errstr) {
switch (err) {
case PROTOCOL_BINARY_RESPONSE_ENOMEM:
errstr = "Out of memory";
break;
case PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND:
errstr = "Unknown command";
break;
case PROTOCOL_BINARY_RESPONSE_KEY_ENOENT:
errstr = "Not found";
break;
case PROTOCOL_BINARY_RESPONSE_EINVAL:
errstr = "Invalid arguments";
break;
case PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS:
errstr = "Data exists for key.";
break;
case PROTOCOL_BINARY_RESPONSE_E2BIG:
errstr = "Too large.";
break;
case PROTOCOL_BINARY_RESPONSE_DELTA_BADVAL:
errstr = "Non-numeric server-side value for incr or decr";
break;
case PROTOCOL_BINARY_RESPONSE_NOT_STORED:
errstr = "Not stored.";
break;
case PROTOCOL_BINARY_RESPONSE_AUTH_ERROR:
errstr = "Auth failure.";
break;
default:
assert(false);
errstr = "UNHANDLED ERROR";
fprintf(stderr, ">%d UNHANDLED ERROR: %d\n", c->sfd, err);
}
}
if (settings.verbose > 1) {
fprintf(stderr, ">%d Writing an error: %s\n", c->sfd, errstr);
}
len = strlen(errstr);
add_bin_header(c, err, 0, 0, len);
if (len > 0) {
resp_add_iov(c->resp, errstr, len);
}
if (swallow > 0) {
c->sbytes = swallow;
conn_set_state(c, conn_swallow);
} else {
conn_set_state(c, conn_mwrite);
}
}
/* Form and send a response to a command over the binary protocol */
static void write_bin_response(conn *c, void *d, int hlen, int keylen, int dlen) {
if (!c->noreply || c->cmd == PROTOCOL_BINARY_CMD_GET ||
c->cmd == PROTOCOL_BINARY_CMD_GETK) {
add_bin_header(c, 0, hlen, keylen, dlen);
mc_resp *resp = c->resp;
if (dlen > 0) {
resp_add_iov(resp, d, dlen);
}
}
conn_set_state(c, conn_new_cmd);
}
static void complete_incr_bin(conn *c, char *extbuf) {
item *it;
char *key;
size_t nkey;
/* Weird magic in add_delta forces me to pad here */
char tmpbuf[INCR_MAX_STORAGE_LEN];
uint64_t cas = 0;
protocol_binary_response_incr* rsp = (protocol_binary_response_incr*)c->resp->wbuf;
protocol_binary_request_incr* req = (void *)extbuf;
assert(c != NULL);
//assert(c->wsize >= sizeof(*rsp));
/* fix byteorder in the request */
req->message.body.delta = ntohll(req->message.body.delta);
req->message.body.initial = ntohll(req->message.body.initial);
req->message.body.expiration = ntohl(req->message.body.expiration);
key = binary_get_key(c);
nkey = c->binary_header.request.keylen;
if (settings.verbose > 1) {
int i;
fprintf(stderr, "incr ");
for (i = 0; i < nkey; i++) {
fprintf(stderr, "%c", key[i]);
}
fprintf(stderr, " %lld, %llu, %d\n",
(long long)req->message.body.delta,
(long long)req->message.body.initial,
req->message.body.expiration);
}
if (c->binary_header.request.cas != 0) {
cas = c->binary_header.request.cas;
}
switch(add_delta(c, key, nkey, c->cmd == PROTOCOL_BINARY_CMD_INCREMENT,
req->message.body.delta, tmpbuf,
&cas)) {
case OK:
rsp->message.body.value = htonll(strtoull(tmpbuf, NULL, 10));
if (cas) {
c->cas = cas;
}
write_bin_response(c, &rsp->message.body, 0, 0,
sizeof(rsp->message.body.value));
break;
case NON_NUMERIC:
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_DELTA_BADVAL, NULL, 0);
break;
case EOM:
out_of_memory(c, "SERVER_ERROR Out of memory incrementing value");
break;
case DELTA_ITEM_NOT_FOUND:
if (req->message.body.expiration != 0xffffffff) {
/* Save some room for the response */
rsp->message.body.value = htonll(req->message.body.initial);
snprintf(tmpbuf, INCR_MAX_STORAGE_LEN, "%llu",
(unsigned long long)req->message.body.initial);
int res = strlen(tmpbuf);
it = item_alloc(key, nkey, 0, realtime(req->message.body.expiration),
res + 2);
if (it != NULL) {
memcpy(ITEM_data(it), tmpbuf, res);
memcpy(ITEM_data(it) + res, "\r\n", 2);
if (store_item(it, NREAD_ADD, c)) {
c->cas = ITEM_get_cas(it);
write_bin_response(c, &rsp->message.body, 0, 0, sizeof(rsp->message.body.value));
} else {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_NOT_STORED,
NULL, 0);
}
item_remove(it); /* release our reference */
} else {
out_of_memory(c,
"SERVER_ERROR Out of memory allocating new item");
}
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
if (c->cmd == PROTOCOL_BINARY_CMD_INCREMENT) {
c->thread->stats.incr_misses++;
} else {
c->thread->stats.decr_misses++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, NULL, 0);
}
break;
case DELTA_ITEM_CAS_MISMATCH:
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, NULL, 0);
break;
}
}
static void complete_update_bin(conn *c) {
protocol_binary_response_status eno = PROTOCOL_BINARY_RESPONSE_EINVAL;
enum store_item_type ret = NOT_STORED;
assert(c != NULL);
item *it = c->item;
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(it)].set_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
/* We don't actually receive the trailing two characters in the bin
* protocol, so we're going to just set them here */
if ((it->it_flags & ITEM_CHUNKED) == 0) {
*(ITEM_data(it) + it->nbytes - 2) = '\r';
*(ITEM_data(it) + it->nbytes - 1) = '\n';
} else {
assert(c->ritem);
item_chunk *ch = (item_chunk *) c->ritem;
if (ch->size == ch->used)
ch = ch->next;
assert(ch->size - ch->used >= 2);
ch->data[ch->used] = '\r';
ch->data[ch->used + 1] = '\n';
ch->used += 2;
}
ret = store_item(it, c->cmd, c);
#ifdef ENABLE_DTRACE
uint64_t cas = ITEM_get_cas(it);
switch (c->cmd) {
case NREAD_ADD:
MEMCACHED_COMMAND_ADD(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
case NREAD_REPLACE:
MEMCACHED_COMMAND_REPLACE(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
case NREAD_APPEND:
MEMCACHED_COMMAND_APPEND(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
case NREAD_PREPEND:
MEMCACHED_COMMAND_PREPEND(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
case NREAD_SET:
MEMCACHED_COMMAND_SET(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
}
#endif
switch (ret) {
case STORED:
/* Stored */
write_bin_response(c, NULL, 0, 0, 0);
break;
case EXISTS:
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, NULL, 0);
break;
case NOT_FOUND:
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, NULL, 0);
break;
case NOT_STORED:
case TOO_LARGE:
case NO_MEMORY:
if (c->cmd == NREAD_ADD) {
eno = PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS;
} else if(c->cmd == NREAD_REPLACE) {
eno = PROTOCOL_BINARY_RESPONSE_KEY_ENOENT;
} else {
eno = PROTOCOL_BINARY_RESPONSE_NOT_STORED;
}
write_bin_error(c, eno, NULL, 0);
}
item_remove(c->item); /* release the c->item reference */
c->item = 0;
}
static void write_bin_miss_response(conn *c, char *key, size_t nkey) {
if (nkey) {
add_bin_header(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT,
0, nkey, nkey);
char *ofs = c->resp->wbuf + sizeof(protocol_binary_response_header);
memcpy(ofs, key, nkey);
resp_add_iov(c->resp, ofs, nkey);
conn_set_state(c, conn_new_cmd);
} else {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT,
NULL, 0);
}
}
static void process_bin_get_or_touch(conn *c, char *extbuf) {
item *it;
protocol_binary_response_get* rsp = (protocol_binary_response_get*)c->resp->wbuf;
char* key = binary_get_key(c);
size_t nkey = c->binary_header.request.keylen;
int should_touch = (c->cmd == PROTOCOL_BINARY_CMD_TOUCH ||
c->cmd == PROTOCOL_BINARY_CMD_GAT ||
c->cmd == PROTOCOL_BINARY_CMD_GATK);
int should_return_key = (c->cmd == PROTOCOL_BINARY_CMD_GETK ||
c->cmd == PROTOCOL_BINARY_CMD_GATK);
int should_return_value = (c->cmd != PROTOCOL_BINARY_CMD_TOUCH);
bool failed = false;
if (settings.verbose > 1) {
fprintf(stderr, "<%d %s ", c->sfd, should_touch ? "TOUCH" : "GET");
if (fwrite(key, 1, nkey, stderr)) {}
fputc('\n', stderr);
}
if (should_touch) {
protocol_binary_request_touch *t = (void *)extbuf;
time_t exptime = ntohl(t->message.body.expiration);
it = item_touch(key, nkey, realtime(exptime), c);
} else {
it = item_get(key, nkey, c, DO_UPDATE);
}
if (it) {
/* the length has two unnecessary bytes ("\r\n") */
uint16_t keylen = 0;
uint32_t bodylen = sizeof(rsp->message.body) + (it->nbytes - 2);
pthread_mutex_lock(&c->thread->stats.mutex);
if (should_touch) {
c->thread->stats.touch_cmds++;
c->thread->stats.slab_stats[ITEM_clsid(it)].touch_hits++;
} else {
c->thread->stats.get_cmds++;
c->thread->stats.lru_hits[it->slabs_clsid]++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
if (should_touch) {
MEMCACHED_COMMAND_TOUCH(c->sfd, ITEM_key(it), it->nkey,
it->nbytes, ITEM_get_cas(it));
} else {
MEMCACHED_COMMAND_GET(c->sfd, ITEM_key(it), it->nkey,
it->nbytes, ITEM_get_cas(it));
}
if (c->cmd == PROTOCOL_BINARY_CMD_TOUCH) {
bodylen -= it->nbytes - 2;
} else if (should_return_key) {
bodylen += nkey;
keylen = nkey;
}
add_bin_header(c, 0, sizeof(rsp->message.body), keylen, bodylen);
rsp->message.header.response.cas = htonll(ITEM_get_cas(it));
// add the flags
FLAGS_CONV(it, rsp->message.body.flags);
rsp->message.body.flags = htonl(rsp->message.body.flags);
resp_add_iov(c->resp, &rsp->message.body, sizeof(rsp->message.body));
if (should_return_key) {
resp_add_iov(c->resp, ITEM_key(it), nkey);
}
if (should_return_value) {
/* Add the data minus the CRLF */
#ifdef EXTSTORE
if (it->it_flags & ITEM_HDR) {
if (_get_extstore(c, it, c->resp) != 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.get_oom_extstore++;
pthread_mutex_unlock(&c->thread->stats.mutex);
failed = true;
}
} else if ((it->it_flags & ITEM_CHUNKED) == 0) {
resp_add_iov(c->resp, ITEM_data(it), it->nbytes - 2);
} else {
// Allow transmit handler to find the item and expand iov's
resp_add_chunked_iov(c->resp, it, it->nbytes - 2);
}
#else
if ((it->it_flags & ITEM_CHUNKED) == 0) {
resp_add_iov(c->resp, ITEM_data(it), it->nbytes - 2);
} else {
resp_add_chunked_iov(c->resp, it, it->nbytes - 2);
}
#endif
}
if (!failed) {
conn_set_state(c, conn_new_cmd);
/* Remember this command so we can garbage collect it later */
#ifdef EXTSTORE
if ((it->it_flags & ITEM_HDR) != 0 && should_return_value) {
// Only have extstore clean if header and returning value.
c->resp->item = NULL;
} else {
c->resp->item = it;
}
#else
c->resp->item = it;
#endif
} else {
item_remove(it);
}
} else {
failed = true;
}
if (failed) {
pthread_mutex_lock(&c->thread->stats.mutex);
if (should_touch) {
c->thread->stats.touch_cmds++;
c->thread->stats.touch_misses++;
} else {
c->thread->stats.get_cmds++;
c->thread->stats.get_misses++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
if (should_touch) {
MEMCACHED_COMMAND_TOUCH(c->sfd, key, nkey, -1, 0);
} else {
MEMCACHED_COMMAND_GET(c->sfd, key, nkey, -1, 0);
}
if (c->noreply) {
conn_set_state(c, conn_new_cmd);
} else {
if (should_return_key) {
write_bin_miss_response(c, key, nkey);
} else {
write_bin_miss_response(c, NULL, 0);
}
}
}
if (settings.detail_enabled) {
stats_prefix_record_get(key, nkey, NULL != it);
}
}
static void append_bin_stats(const char *key, const uint16_t klen,
const char *val, const uint32_t vlen,
conn *c) {
char *buf = c->stats.buffer + c->stats.offset;
uint32_t bodylen = klen + vlen;
protocol_binary_response_header header = {
.response.magic = (uint8_t)PROTOCOL_BINARY_RES,
.response.opcode = PROTOCOL_BINARY_CMD_STAT,
.response.keylen = (uint16_t)htons(klen),
.response.datatype = (uint8_t)PROTOCOL_BINARY_RAW_BYTES,
.response.bodylen = htonl(bodylen),
.response.opaque = c->opaque
};
memcpy(buf, header.bytes, sizeof(header.response));
buf += sizeof(header.response);
if (klen > 0) {
memcpy(buf, key, klen);
buf += klen;
if (vlen > 0) {
memcpy(buf, val, vlen);
}
}
c->stats.offset += sizeof(header.response) + bodylen;
}
static void append_ascii_stats(const char *key, const uint16_t klen,
const char *val, const uint32_t vlen,
conn *c) {
char *pos = c->stats.buffer + c->stats.offset;
uint32_t nbytes = 0;
int remaining = c->stats.size - c->stats.offset;
int room = remaining - 1;
if (klen == 0 && vlen == 0) {
nbytes = snprintf(pos, room, "END\r\n");
} else if (vlen == 0) {
nbytes = snprintf(pos, room, "STAT %s\r\n", key);
} else {
nbytes = snprintf(pos, room, "STAT %s %s\r\n", key, val);
}
c->stats.offset += nbytes;
}
static bool grow_stats_buf(conn *c, size_t needed) {
size_t nsize = c->stats.size;
size_t available = nsize - c->stats.offset;
bool rv = true;
/* Special case: No buffer -- need to allocate fresh */
if (c->stats.buffer == NULL) {
nsize = 1024;
available = c->stats.size = c->stats.offset = 0;
}
while (needed > available) {
assert(nsize > 0);
nsize = nsize << 1;
available = nsize - c->stats.offset;
}
if (nsize != c->stats.size) {
char *ptr = realloc(c->stats.buffer, nsize);
if (ptr) {
c->stats.buffer = ptr;
c->stats.size = nsize;
} else {
STATS_LOCK();
stats.malloc_fails++;
STATS_UNLOCK();
rv = false;
}
}
return rv;
}
static void append_stats(const char *key, const uint16_t klen,
const char *val, const uint32_t vlen,
const void *cookie)
{
/* value without a key is invalid */
if (klen == 0 && vlen > 0) {
return;
}
conn *c = (conn*)cookie;
if (c->protocol == binary_prot) {
size_t needed = vlen + klen + sizeof(protocol_binary_response_header);
if (!grow_stats_buf(c, needed)) {
return;
}
append_bin_stats(key, klen, val, vlen, c);
} else {
size_t needed = vlen + klen + 10; // 10 == "STAT = \r\n"
if (!grow_stats_buf(c, needed)) {
return;
}
append_ascii_stats(key, klen, val, vlen, c);
}
assert(c->stats.offset <= c->stats.size);
}
static void process_bin_stat(conn *c) {
char *subcommand = binary_get_key(c);
size_t nkey = c->binary_header.request.keylen;
if (settings.verbose > 1) {
int ii;
fprintf(stderr, "<%d STATS ", c->sfd);
for (ii = 0; ii < nkey; ++ii) {
fprintf(stderr, "%c", subcommand[ii]);
}
fprintf(stderr, "\n");
}
if (nkey == 0) {
/* request all statistics */
server_stats(&append_stats, c);
(void)get_stats(NULL, 0, &append_stats, c);
} else if (strncmp(subcommand, "reset", 5) == 0) {
stats_reset();
} else if (strncmp(subcommand, "settings", 8) == 0) {
process_stat_settings(&append_stats, c);
} else if (strncmp(subcommand, "detail", 6) == 0) {
char *subcmd_pos = subcommand + 6;
if (strncmp(subcmd_pos, " dump", 5) == 0) {
int len;
char *dump_buf = stats_prefix_dump(&len);
if (dump_buf == NULL || len <= 0) {
out_of_memory(c, "SERVER_ERROR Out of memory generating stats");
if (dump_buf != NULL)
free(dump_buf);
return;
} else {
append_stats("detailed", strlen("detailed"), dump_buf, len, c);
free(dump_buf);
}
} else if (strncmp(subcmd_pos, " on", 3) == 0) {
settings.detail_enabled = 1;
} else if (strncmp(subcmd_pos, " off", 4) == 0) {
settings.detail_enabled = 0;
} else {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, NULL, 0);
return;
}
} else {
if (get_stats(subcommand, nkey, &append_stats, c)) {
if (c->stats.buffer == NULL) {
out_of_memory(c, "SERVER_ERROR Out of memory generating stats");
} else {
write_and_free(c, c->stats.buffer, c->stats.offset);
c->stats.buffer = NULL;
}
} else {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, NULL, 0);
}
return;
}
/* Append termination package and start the transfer */
append_stats(NULL, 0, NULL, 0, c);
if (c->stats.buffer == NULL) {
out_of_memory(c, "SERVER_ERROR Out of memory preparing to send stats");
} else {
write_and_free(c, c->stats.buffer, c->stats.offset);
c->stats.buffer = NULL;
}
}
/* Just write an error message and disconnect the client */
static void handle_binary_protocol_error(conn *c) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_EINVAL, NULL, 0);
if (settings.verbose) {
fprintf(stderr, "Protocol error (opcode %02x), close connection %d\n",
c->binary_header.request.opcode, c->sfd);
}
c->close_after_write = true;
}
static void init_sasl_conn(conn *c) {
assert(c);
/* should something else be returned? */
if (!settings.sasl)
return;
c->authenticated = false;
if (!c->sasl_conn) {
int result=sasl_server_new("memcached",
NULL,
my_sasl_hostname[0] ? my_sasl_hostname : NULL,
NULL, NULL,
NULL, 0, &c->sasl_conn);
if (result != SASL_OK) {
if (settings.verbose) {
fprintf(stderr, "Failed to initialize SASL conn.\n");
}
c->sasl_conn = NULL;
}
}
}
static void bin_list_sasl_mechs(conn *c) {
// Guard against a disabled SASL.
if (!settings.sasl) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND, NULL,
c->binary_header.request.bodylen
- c->binary_header.request.keylen);
return;
}
init_sasl_conn(c);
const char *result_string = NULL;
unsigned int string_length = 0;
int result=sasl_listmech(c->sasl_conn, NULL,
"", /* What to prepend the string with */
" ", /* What to separate mechanisms with */
"", /* What to append to the string */
&result_string, &string_length,
NULL);
if (result != SASL_OK) {
/* Perhaps there's a better error for this... */
if (settings.verbose) {
fprintf(stderr, "Failed to list SASL mechanisms.\n");
}
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, NULL, 0);
return;
}
write_bin_response(c, (char*)result_string, 0, 0, string_length);
}
static void process_bin_sasl_auth(conn *c) {
// Guard for handling disabled SASL on the server.
if (!settings.sasl) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND, NULL,
c->binary_header.request.bodylen
- c->binary_header.request.keylen);
return;
}
assert(c->binary_header.request.extlen == 0);
int nkey = c->binary_header.request.keylen;
int vlen = c->binary_header.request.bodylen - nkey;
if (nkey > MAX_SASL_MECH_LEN) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_EINVAL, NULL, vlen);
conn_set_state(c, conn_swallow);
return;
}
char *key = binary_get_key(c);
assert(key);
item *it = item_alloc(key, nkey, 0, 0, vlen+2);
/* Can't use a chunked item for SASL authentication. */
if (it == 0 || (it->it_flags & ITEM_CHUNKED)) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, NULL, vlen);
conn_set_state(c, conn_swallow);
if (it) {
do_item_remove(it);
}
return;
}
c->item = it;
c->ritem = ITEM_data(it);
c->rlbytes = vlen;
conn_set_state(c, conn_nread);
c->substate = bin_reading_sasl_auth_data;
}
static void process_bin_complete_sasl_auth(conn *c) {
assert(settings.sasl);
const char *out = NULL;
unsigned int outlen = 0;
assert(c->item);
init_sasl_conn(c);
int nkey = c->binary_header.request.keylen;
int vlen = c->binary_header.request.bodylen - nkey;
if (nkey > ((item*) c->item)->nkey) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_EINVAL, NULL, vlen);
conn_set_state(c, conn_swallow);
return;
}
char mech[nkey+1];
memcpy(mech, ITEM_key((item*)c->item), nkey);
mech[nkey] = 0x00;
if (settings.verbose)
fprintf(stderr, "mech: ``%s'' with %d bytes of data\n", mech, vlen);
const char *challenge = vlen == 0 ? NULL : ITEM_data((item*) c->item);
if (vlen > ((item*) c->item)->nbytes) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_EINVAL, NULL, vlen);
conn_set_state(c, conn_swallow);
return;
}
int result=-1;
switch (c->cmd) {
case PROTOCOL_BINARY_CMD_SASL_AUTH:
result = sasl_server_start(c->sasl_conn, mech,
challenge, vlen,
&out, &outlen);
c->sasl_started = (result == SASL_OK || result == SASL_CONTINUE);
break;
case PROTOCOL_BINARY_CMD_SASL_STEP:
if (!c->sasl_started) {
if (settings.verbose) {
fprintf(stderr, "%d: SASL_STEP called but sasl_server_start "
"not called for this connection!\n", c->sfd);
}
break;
}
result = sasl_server_step(c->sasl_conn,
challenge, vlen,
&out, &outlen);
break;
default:
assert(false); /* CMD should be one of the above */
/* This code is pretty much impossible, but makes the compiler
happier */
if (settings.verbose) {
fprintf(stderr, "Unhandled command %d with challenge %s\n",
c->cmd, challenge);
}
break;
}
if (settings.verbose) {
fprintf(stderr, "sasl result code: %d\n", result);
}
switch(result) {
case SASL_OK:
c->authenticated = true;
write_bin_response(c, "Authenticated", 0, 0, strlen("Authenticated"));
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.auth_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
break;
case SASL_CONTINUE:
add_bin_header(c, PROTOCOL_BINARY_RESPONSE_AUTH_CONTINUE, 0, 0, outlen);
if (outlen > 0) {
resp_add_iov(c->resp, out, outlen);
}
// Immediately flush our write.
conn_set_state(c, conn_mwrite);
break;
default:
if (settings.verbose)
fprintf(stderr, "Unknown sasl response: %d\n", result);
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, NULL, 0);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.auth_cmds++;
c->thread->stats.auth_errors++;
pthread_mutex_unlock(&c->thread->stats.mutex);
}
}
static bool authenticated(conn *c) {
assert(settings.sasl);
bool rv = false;
switch (c->cmd) {
case PROTOCOL_BINARY_CMD_SASL_LIST_MECHS: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_SASL_AUTH: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_SASL_STEP: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_VERSION: /* FALLTHROUGH */
rv = true;
break;
default:
rv = c->authenticated;
}
if (settings.verbose > 1) {
fprintf(stderr, "authenticated() in cmd 0x%02x is %s\n",
c->cmd, rv ? "true" : "false");
}
return rv;
}
static void dispatch_bin_command(conn *c, char *extbuf) {
int protocol_error = 0;
uint8_t extlen = c->binary_header.request.extlen;
uint16_t keylen = c->binary_header.request.keylen;
uint32_t bodylen = c->binary_header.request.bodylen;
if (keylen > bodylen || keylen + extlen > bodylen) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND, NULL, 0);
c->close_after_write = true;
return;
}
if (settings.sasl && !authenticated(c)) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, NULL, 0);
c->close_after_write = true;
return;
}
MEMCACHED_PROCESS_COMMAND_START(c->sfd, c->rcurr, c->rbytes);
c->noreply = true;
/* binprot supports 16bit keys, but internals are still 8bit */
if (keylen > KEY_MAX_LENGTH) {
handle_binary_protocol_error(c);
return;
}
switch (c->cmd) {
case PROTOCOL_BINARY_CMD_SETQ:
c->cmd = PROTOCOL_BINARY_CMD_SET;
break;
case PROTOCOL_BINARY_CMD_ADDQ:
c->cmd = PROTOCOL_BINARY_CMD_ADD;
break;
case PROTOCOL_BINARY_CMD_REPLACEQ:
c->cmd = PROTOCOL_BINARY_CMD_REPLACE;
break;
case PROTOCOL_BINARY_CMD_DELETEQ:
c->cmd = PROTOCOL_BINARY_CMD_DELETE;
break;
case PROTOCOL_BINARY_CMD_INCREMENTQ:
c->cmd = PROTOCOL_BINARY_CMD_INCREMENT;
break;
case PROTOCOL_BINARY_CMD_DECREMENTQ:
c->cmd = PROTOCOL_BINARY_CMD_DECREMENT;
break;
case PROTOCOL_BINARY_CMD_QUITQ:
c->cmd = PROTOCOL_BINARY_CMD_QUIT;
break;
case PROTOCOL_BINARY_CMD_FLUSHQ:
c->cmd = PROTOCOL_BINARY_CMD_FLUSH;
break;
case PROTOCOL_BINARY_CMD_APPENDQ:
c->cmd = PROTOCOL_BINARY_CMD_APPEND;
break;
case PROTOCOL_BINARY_CMD_PREPENDQ:
c->cmd = PROTOCOL_BINARY_CMD_PREPEND;
break;
case PROTOCOL_BINARY_CMD_GETQ:
c->cmd = PROTOCOL_BINARY_CMD_GET;
break;
case PROTOCOL_BINARY_CMD_GETKQ:
c->cmd = PROTOCOL_BINARY_CMD_GETK;
break;
case PROTOCOL_BINARY_CMD_GATQ:
c->cmd = PROTOCOL_BINARY_CMD_GAT;
break;
case PROTOCOL_BINARY_CMD_GATKQ:
c->cmd = PROTOCOL_BINARY_CMD_GATK;
break;
default:
c->noreply = false;
}
switch (c->cmd) {
case PROTOCOL_BINARY_CMD_VERSION:
if (extlen == 0 && keylen == 0 && bodylen == 0) {
write_bin_response(c, VERSION, 0, 0, strlen(VERSION));
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_FLUSH:
if (keylen == 0 && bodylen == extlen && (extlen == 0 || extlen == 4)) {
process_bin_flush(c, extbuf);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_NOOP:
if (extlen == 0 && keylen == 0 && bodylen == 0) {
write_bin_response(c, NULL, 0, 0, 0);
// NOOP forces pipeline flush.
conn_set_state(c, conn_mwrite);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_SET: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_ADD: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_REPLACE:
if (extlen == 8 && keylen != 0 && bodylen >= (keylen + 8)) {
process_bin_update(c, extbuf);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_GETQ: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_GET: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_GETKQ: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_GETK:
if (extlen == 0 && bodylen == keylen && keylen > 0) {
process_bin_get_or_touch(c, extbuf);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_DELETE:
if (keylen > 0 && extlen == 0 && bodylen == keylen) {
process_bin_delete(c);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_INCREMENT:
case PROTOCOL_BINARY_CMD_DECREMENT:
if (keylen > 0 && extlen == 20 && bodylen == (keylen + extlen)) {
complete_incr_bin(c, extbuf);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_APPEND:
case PROTOCOL_BINARY_CMD_PREPEND:
if (keylen > 0 && extlen == 0) {
process_bin_append_prepend(c);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_STAT:
if (extlen == 0) {
process_bin_stat(c);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_QUIT:
if (keylen == 0 && extlen == 0 && bodylen == 0) {
write_bin_response(c, NULL, 0, 0, 0);
conn_set_state(c, conn_mwrite);
c->close_after_write = true;
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_SASL_LIST_MECHS:
if (extlen == 0 && keylen == 0 && bodylen == 0) {
bin_list_sasl_mechs(c);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_SASL_AUTH:
case PROTOCOL_BINARY_CMD_SASL_STEP:
if (extlen == 0 && keylen != 0) {
process_bin_sasl_auth(c);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_TOUCH:
case PROTOCOL_BINARY_CMD_GAT:
case PROTOCOL_BINARY_CMD_GATQ:
case PROTOCOL_BINARY_CMD_GATK:
case PROTOCOL_BINARY_CMD_GATKQ:
if (extlen == 4 && keylen != 0) {
process_bin_get_or_touch(c, extbuf);
} else {
protocol_error = 1;
}
break;
default:
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND, NULL,
bodylen);
}
if (protocol_error)
handle_binary_protocol_error(c);
}
static void process_bin_update(conn *c, char *extbuf) {
char *key;
int nkey;
int vlen;
item *it;
protocol_binary_request_set* req = (void *)extbuf;
assert(c != NULL);
key = binary_get_key(c);
nkey = c->binary_header.request.keylen;
/* fix byteorder in the request */
req->message.body.flags = ntohl(req->message.body.flags);
req->message.body.expiration = ntohl(req->message.body.expiration);
vlen = c->binary_header.request.bodylen - (nkey + c->binary_header.request.extlen);
if (settings.verbose > 1) {
int ii;
if (c->cmd == PROTOCOL_BINARY_CMD_ADD) {
fprintf(stderr, "<%d ADD ", c->sfd);
} else if (c->cmd == PROTOCOL_BINARY_CMD_SET) {
fprintf(stderr, "<%d SET ", c->sfd);
} else {
fprintf(stderr, "<%d REPLACE ", c->sfd);
}
for (ii = 0; ii < nkey; ++ii) {
fprintf(stderr, "%c", key[ii]);
}
fprintf(stderr, " Value len is %d", vlen);
fprintf(stderr, "\n");
}
if (settings.detail_enabled) {
stats_prefix_record_set(key, nkey);
}
it = item_alloc(key, nkey, req->message.body.flags,
realtime(req->message.body.expiration), vlen+2);
if (it == 0) {
enum store_item_type status;
if (! item_size_ok(nkey, req->message.body.flags, vlen + 2)) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_E2BIG, NULL, vlen);
status = TOO_LARGE;
} else {
out_of_memory(c, "SERVER_ERROR Out of memory allocating item");
/* This error generating method eats the swallow value. Add here. */
c->sbytes = vlen;
status = NO_MEMORY;
}
/* FIXME: losing c->cmd since it's translated below. refactor? */
LOGGER_LOG(c->thread->l, LOG_MUTATIONS, LOGGER_ITEM_STORE,
NULL, status, 0, key, nkey, req->message.body.expiration,
ITEM_clsid(it), c->sfd);
/* Avoid stale data persisting in cache because we failed alloc.
* Unacceptable for SET. Anywhere else too? */
if (c->cmd == PROTOCOL_BINARY_CMD_SET) {
it = item_get(key, nkey, c, DONT_UPDATE);
if (it) {
item_unlink(it);
STORAGE_delete(c->thread->storage, it);
item_remove(it);
}
}
/* swallow the data line */
conn_set_state(c, conn_swallow);
return;
}
ITEM_set_cas(it, c->binary_header.request.cas);
switch (c->cmd) {
case PROTOCOL_BINARY_CMD_ADD:
c->cmd = NREAD_ADD;
break;
case PROTOCOL_BINARY_CMD_SET:
c->cmd = NREAD_SET;
break;
case PROTOCOL_BINARY_CMD_REPLACE:
c->cmd = NREAD_REPLACE;
break;
default:
assert(0);
}
if (ITEM_get_cas(it) != 0) {
c->cmd = NREAD_CAS;
}
c->item = it;
#ifdef NEED_ALIGN
if (it->it_flags & ITEM_CHUNKED) {
c->ritem = ITEM_schunk(it);
} else {
c->ritem = ITEM_data(it);
}
#else
c->ritem = ITEM_data(it);
#endif
c->rlbytes = vlen;
conn_set_state(c, conn_nread);
c->substate = bin_read_set_value;
}
static void process_bin_append_prepend(conn *c) {
char *key;
int nkey;
int vlen;
item *it;
assert(c != NULL);
key = binary_get_key(c);
nkey = c->binary_header.request.keylen;
vlen = c->binary_header.request.bodylen - nkey;
if (settings.verbose > 1) {
fprintf(stderr, "Value len is %d\n", vlen);
}
if (settings.detail_enabled) {
stats_prefix_record_set(key, nkey);
}
it = item_alloc(key, nkey, 0, 0, vlen+2);
if (it == 0) {
if (! item_size_ok(nkey, 0, vlen + 2)) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_E2BIG, NULL, vlen);
} else {
out_of_memory(c, "SERVER_ERROR Out of memory allocating item");
/* OOM calls eat the swallow value. Add here. */
c->sbytes = vlen;
}
/* swallow the data line */
conn_set_state(c, conn_swallow);
return;
}
ITEM_set_cas(it, c->binary_header.request.cas);
switch (c->cmd) {
case PROTOCOL_BINARY_CMD_APPEND:
c->cmd = NREAD_APPEND;
break;
case PROTOCOL_BINARY_CMD_PREPEND:
c->cmd = NREAD_PREPEND;
break;
default:
assert(0);
}
c->item = it;
#ifdef NEED_ALIGN
if (it->it_flags & ITEM_CHUNKED) {
c->ritem = ITEM_schunk(it);
} else {
c->ritem = ITEM_data(it);
}
#else
c->ritem = ITEM_data(it);
#endif
c->rlbytes = vlen;
conn_set_state(c, conn_nread);
c->substate = bin_read_set_value;
}
static void process_bin_flush(conn *c, char *extbuf) {
time_t exptime = 0;
protocol_binary_request_flush* req = (void *)extbuf;
rel_time_t new_oldest = 0;
if (!settings.flush_enabled) {
// flush_all is not allowed but we log it on stats
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, NULL, 0);
return;
}
if (c->binary_header.request.extlen == sizeof(req->message.body)) {
exptime = ntohl(req->message.body.expiration);
}
if (exptime > 0) {
new_oldest = realtime(exptime);
} else {
new_oldest = current_time;
}
if (settings.use_cas) {
settings.oldest_live = new_oldest - 1;
if (settings.oldest_live <= current_time)
settings.oldest_cas = get_cas_id();
} else {
settings.oldest_live = new_oldest;
}
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.flush_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
write_bin_response(c, NULL, 0, 0, 0);
}
static void process_bin_delete(conn *c) {
item *it;
uint32_t hv;
char* key = binary_get_key(c);
size_t nkey = c->binary_header.request.keylen;
assert(c != NULL);
if (settings.verbose > 1) {
int ii;
fprintf(stderr, "Deleting ");
for (ii = 0; ii < nkey; ++ii) {
fprintf(stderr, "%c", key[ii]);
}
fprintf(stderr, "\n");
}
if (settings.detail_enabled) {
stats_prefix_record_delete(key, nkey);
}
it = item_get_locked(key, nkey, c, DONT_UPDATE, &hv);
if (it) {
uint64_t cas = c->binary_header.request.cas;
if (cas == 0 || cas == ITEM_get_cas(it)) {
MEMCACHED_COMMAND_DELETE(c->sfd, ITEM_key(it), it->nkey);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(it)].delete_hits++;
pthread_mutex_unlock(&c->thread->stats.mutex);
do_item_unlink(it, hv);
STORAGE_delete(c->thread->storage, it);
write_bin_response(c, NULL, 0, 0, 0);
} else {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, NULL, 0);
}
do_item_remove(it); /* release our reference */
} else {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, NULL, 0);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.delete_misses++;
pthread_mutex_unlock(&c->thread->stats.mutex);
}
item_unlock(hv);
}
static void complete_nread_binary(conn *c) {
assert(c != NULL);
assert(c->cmd >= 0);
switch(c->substate) {
case bin_read_set_value:
complete_update_bin(c);
break;
case bin_reading_sasl_auth_data:
process_bin_complete_sasl_auth(c);
if (c->item) {
do_item_remove(c->item);
c->item = NULL;
}
break;
default:
fprintf(stderr, "Not handling substate %d\n", c->substate);
assert(0);
}
}
static void reset_cmd_handler(conn *c) {
c->cmd = -1;
c->substate = bin_no_state;
if (c->item != NULL) {
// TODO: Any other way to get here?
// SASL auth was mistakenly using it. Nothing else should?
item_remove(c->item);
c->item = NULL;
}
if (c->rbytes > 0) {
conn_set_state(c, conn_parse_cmd);
} else if (c->resp_head) {
conn_set_state(c, conn_mwrite);
} else {
conn_set_state(c, conn_waiting);
}
}
static void complete_nread(conn *c) {
assert(c != NULL);
assert(c->protocol == ascii_prot
|| c->protocol == binary_prot);
if (c->protocol == ascii_prot) {
complete_nread_ascii(c);
} else if (c->protocol == binary_prot) {
complete_nread_binary(c);
}
}
/* Destination must always be chunked */
/* This should be part of item.c */
static int _store_item_copy_chunks(item *d_it, item *s_it, const int len) {
item_chunk *dch = (item_chunk *) ITEM_schunk(d_it);
/* Advance dch until we find free space */
while (dch->size == dch->used) {
if (dch->next) {
dch = dch->next;
} else {
break;
}
}
if (s_it->it_flags & ITEM_CHUNKED) {
int remain = len;
item_chunk *sch = (item_chunk *) ITEM_schunk(s_it);
int copied = 0;
/* Fills dch's to capacity, not straight copy sch in case data is
* being added or removed (ie append/prepend)
*/
while (sch && dch && remain) {
assert(dch->used <= dch->size);
int todo = (dch->size - dch->used < sch->used - copied)
? dch->size - dch->used : sch->used - copied;
if (remain < todo)
todo = remain;
memcpy(dch->data + dch->used, sch->data + copied, todo);
dch->used += todo;
copied += todo;
remain -= todo;
assert(dch->used <= dch->size);
if (dch->size == dch->used) {
item_chunk *tch = do_item_alloc_chunk(dch, remain);
if (tch) {
dch = tch;
} else {
return -1;
}
}
assert(copied <= sch->used);
if (copied == sch->used) {
copied = 0;
sch = sch->next;
}
}
/* assert that the destination had enough space for the source */
assert(remain == 0);
} else {
int done = 0;
/* Fill dch's via a non-chunked item. */
while (len > done && dch) {
int todo = (dch->size - dch->used < len - done)
? dch->size - dch->used : len - done;
//assert(dch->size - dch->used != 0);
memcpy(dch->data + dch->used, ITEM_data(s_it) + done, todo);
done += todo;
dch->used += todo;
assert(dch->used <= dch->size);
if (dch->size == dch->used) {
item_chunk *tch = do_item_alloc_chunk(dch, len - done);
if (tch) {
dch = tch;
} else {
return -1;
}
}
}
assert(len == done);
}
return 0;
}
static int _store_item_copy_data(int comm, item *old_it, item *new_it, item *add_it) {
if (comm == NREAD_APPEND) {
if (new_it->it_flags & ITEM_CHUNKED) {
if (_store_item_copy_chunks(new_it, old_it, old_it->nbytes - 2) == -1 ||
_store_item_copy_chunks(new_it, add_it, add_it->nbytes) == -1) {
return -1;
}
} else {
memcpy(ITEM_data(new_it), ITEM_data(old_it), old_it->nbytes);
memcpy(ITEM_data(new_it) + old_it->nbytes - 2 /* CRLF */, ITEM_data(add_it), add_it->nbytes);
}
} else {
/* NREAD_PREPEND */
if (new_it->it_flags & ITEM_CHUNKED) {
if (_store_item_copy_chunks(new_it, add_it, add_it->nbytes - 2) == -1 ||
_store_item_copy_chunks(new_it, old_it, old_it->nbytes) == -1) {
return -1;
}
} else {
memcpy(ITEM_data(new_it), ITEM_data(add_it), add_it->nbytes);
memcpy(ITEM_data(new_it) + add_it->nbytes - 2 /* CRLF */, ITEM_data(old_it), old_it->nbytes);
}
}
return 0;
}
/*
* Stores an item in the cache according to the semantics of one of the set
* commands. Protected by the item lock.
*
* Returns the state of storage.
*/
enum store_item_type do_store_item(item *it, int comm, conn *c, const uint32_t hv) {
char *key = ITEM_key(it);
item *old_it = do_item_get(key, it->nkey, hv, c, DONT_UPDATE);
enum store_item_type stored = NOT_STORED;
enum cas_result { CAS_NONE, CAS_MATCH, CAS_BADVAL, CAS_STALE, CAS_MISS };
item *new_it = NULL;
uint32_t flags;
/* Do the CAS test up front so we can apply to all store modes */
enum cas_result cas_res = CAS_NONE;
bool do_store = false;
if (old_it != NULL) {
// Most of the CAS work requires something to compare to.
uint64_t it_cas = ITEM_get_cas(it);
uint64_t old_cas = ITEM_get_cas(old_it);
if (it_cas == 0) {
cas_res = CAS_NONE;
} else if (it_cas == old_cas) {
cas_res = CAS_MATCH;
} else if (c->set_stale && it_cas < old_cas) {
cas_res = CAS_STALE;
} else {
cas_res = CAS_BADVAL;
}
switch (comm) {
case NREAD_ADD:
/* add only adds a nonexistent item, but promote to head of LRU */
do_item_update(old_it);
break;
case NREAD_CAS:
if (cas_res == CAS_MATCH) {
// cas validates
// it and old_it may belong to different classes.
// I'm updating the stats for the one that's getting pushed out
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(old_it)].cas_hits++;
pthread_mutex_unlock(&c->thread->stats.mutex);
do_store = true;
} else if (cas_res == CAS_STALE) {
// if we're allowed to set a stale value, CAS must be lower than
// the current item's CAS.
// This replaces the value, but should preserve TTL, and stale
// item marker bit + token sent if exists.
it->exptime = old_it->exptime;
it->it_flags |= ITEM_STALE;
if (old_it->it_flags & ITEM_TOKEN_SENT) {
it->it_flags |= ITEM_TOKEN_SENT;
}
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(old_it)].cas_hits++;
pthread_mutex_unlock(&c->thread->stats.mutex);
do_store = true;
} else {
// NONE or BADVAL are the same for CAS cmd
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(old_it)].cas_badval++;
pthread_mutex_unlock(&c->thread->stats.mutex);
if (settings.verbose > 1) {
fprintf(stderr, "CAS: failure: expected %llu, got %llu\n",
(unsigned long long)ITEM_get_cas(old_it),
(unsigned long long)ITEM_get_cas(it));
}
stored = EXISTS;
}
break;
case NREAD_APPEND:
case NREAD_PREPEND:
if (cas_res != CAS_NONE && cas_res != CAS_MATCH) {
stored = EXISTS;
break;
}
#ifdef EXTSTORE
if ((old_it->it_flags & ITEM_HDR) != 0) {
/* block append/prepend from working with extstore-d items.
* leave response code to NOT_STORED default */
break;
}
#endif
/* we have it and old_it here - alloc memory to hold both */
FLAGS_CONV(old_it, flags);
new_it = do_item_alloc(key, it->nkey, flags, old_it->exptime, it->nbytes + old_it->nbytes - 2 /* CRLF */);
// OOM trying to copy.
if (new_it == NULL)
break;
/* copy data from it and old_it to new_it */
if (_store_item_copy_data(comm, old_it, new_it, it) == -1) {
// failed data copy
break;
} else {
// refcount of new_it is 1 here. will end up 2 after link.
// it's original ref is managed outside of this function
it = new_it;
do_store = true;
}
break;
case NREAD_REPLACE:
case NREAD_SET:
do_store = true;
break;
}
if (do_store) {
STORAGE_delete(c->thread->storage, old_it);
item_replace(old_it, it, hv);
stored = STORED;
}
do_item_remove(old_it); /* release our reference */
if (new_it != NULL) {
// append/prepend end up with an extra reference for new_it.
do_item_remove(new_it);
}
} else {
/* No pre-existing item to replace or compare to. */
if (ITEM_get_cas(it) != 0) {
/* Asked for a CAS match but nothing to compare it to. */
cas_res = CAS_MISS;
}
switch (comm) {
case NREAD_ADD:
case NREAD_SET:
do_store = true;
break;
case NREAD_CAS:
// LRU expired
stored = NOT_FOUND;
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.cas_misses++;
pthread_mutex_unlock(&c->thread->stats.mutex);
break;
case NREAD_REPLACE:
case NREAD_APPEND:
case NREAD_PREPEND:
/* Requires an existing item. */
break;
}
if (do_store) {
do_item_link(it, hv);
stored = STORED;
}
}
if (stored == STORED) {
c->cas = ITEM_get_cas(it);
}
LOGGER_LOG(c->thread->l, LOG_MUTATIONS, LOGGER_ITEM_STORE, NULL,
stored, comm, ITEM_key(it), it->nkey, it->exptime, ITEM_clsid(it), c->sfd);
return stored;
}
typedef struct token_s {
char *value;
size_t length;
} token_t;
#define COMMAND_TOKEN 0
#define SUBCOMMAND_TOKEN 1
#define KEY_TOKEN 1
#define MAX_TOKENS 24
/*
* Tokenize the command string by replacing whitespace with '\0' and update
* the token array tokens with pointer to start of each token and length.
* Returns total number of tokens. The last valid token is the terminal
* token (value points to the first unprocessed character of the string and
* length zero).
*
* Usage example:
*
* while(tokenize_command(command, ncommand, tokens, max_tokens) > 0) {
* for(int ix = 0; tokens[ix].length != 0; ix++) {
* ...
* }
* ncommand = tokens[ix].value - command;
* command = tokens[ix].value;
* }
*/
static size_t tokenize_command(char *command, token_t *tokens, const size_t max_tokens) {
char *s, *e;
size_t ntokens = 0;
size_t len = strlen(command);
unsigned int i = 0;
assert(command != NULL && tokens != NULL && max_tokens > 1);
s = e = command;
for (i = 0; i < len; i++) {
if (*e == ' ') {
if (s != e) {
tokens[ntokens].value = s;
tokens[ntokens].length = e - s;
ntokens++;
*e = '\0';
if (ntokens == max_tokens - 1) {
e++;
s = e; /* so we don't add an extra token */
break;
}
}
s = e + 1;
}
e++;
}
if (s != e) {
tokens[ntokens].value = s;
tokens[ntokens].length = e - s;
ntokens++;
}
/*
* If we scanned the whole string, the terminal value pointer is null,
* otherwise it is the first unprocessed character.
*/
tokens[ntokens].value = *e == '\0' ? NULL : e;
tokens[ntokens].length = 0;
ntokens++;
return ntokens;
}
/* set up a connection to write a buffer then free it, used for stats */
static void write_and_free(conn *c, char *buf, int bytes) {
if (buf) {
mc_resp *resp = c->resp;
resp->write_and_free = buf;
resp_add_iov(resp, buf, bytes);
conn_set_state(c, conn_new_cmd);
} else {
out_of_memory(c, "SERVER_ERROR out of memory writing stats");
}
}
static inline bool set_noreply_maybe(conn *c, token_t *tokens, size_t ntokens)
{
int noreply_index = ntokens - 2;
/*
NOTE: this function is not the first place where we are going to
send the reply. We could send it instead from process_command()
if the request line has wrong number of tokens. However parsing
malformed line for "noreply" option is not reliable anyway, so
it can't be helped.
*/
if (tokens[noreply_index].value
&& strcmp(tokens[noreply_index].value, "noreply") == 0) {
c->noreply = true;
}
return c->noreply;
}
void append_stat(const char *name, ADD_STAT add_stats, conn *c,
const char *fmt, ...) {
char val_str[STAT_VAL_LEN];
int vlen;
va_list ap;
assert(name);
assert(add_stats);
assert(c);
assert(fmt);
va_start(ap, fmt);
vlen = vsnprintf(val_str, sizeof(val_str) - 1, fmt, ap);
va_end(ap);
add_stats(name, strlen(name), val_str, vlen, c);
}
inline static void process_stats_detail(conn *c, const char *command) {
assert(c != NULL);
if (strcmp(command, "on") == 0) {
settings.detail_enabled = 1;
out_string(c, "OK");
}
else if (strcmp(command, "off") == 0) {
settings.detail_enabled = 0;
out_string(c, "OK");
}
else if (strcmp(command, "dump") == 0) {
int len;
char *stats = stats_prefix_dump(&len);
write_and_free(c, stats, len);
}
else {
out_string(c, "CLIENT_ERROR usage: stats detail on|off|dump");
}
}
/* return server specific stats only */
static void server_stats(ADD_STAT add_stats, conn *c) {
pid_t pid = getpid();
rel_time_t now = current_time;
struct thread_stats thread_stats;
threadlocal_stats_aggregate(&thread_stats);
struct slab_stats slab_stats;
slab_stats_aggregate(&thread_stats, &slab_stats);
#ifdef EXTSTORE
struct extstore_stats st;
#endif
#ifndef WIN32
struct rusage usage;
getrusage(RUSAGE_SELF, &usage);
#endif /* !WIN32 */
STATS_LOCK();
APPEND_STAT("pid", "%lu", (long)pid);
APPEND_STAT("uptime", "%u", now - ITEM_UPDATE_INTERVAL);
APPEND_STAT("time", "%ld", now + (long)process_started);
APPEND_STAT("version", "%s", VERSION);
APPEND_STAT("libevent", "%s", event_get_version());
APPEND_STAT("pointer_size", "%d", (int)(8 * sizeof(void *)));
#ifndef WIN32
append_stat("rusage_user", add_stats, c, "%ld.%06ld",
(long)usage.ru_utime.tv_sec,
(long)usage.ru_utime.tv_usec);
append_stat("rusage_system", add_stats, c, "%ld.%06ld",
(long)usage.ru_stime.tv_sec,
(long)usage.ru_stime.tv_usec);
#endif /* !WIN32 */
APPEND_STAT("max_connections", "%d", settings.maxconns);
APPEND_STAT("curr_connections", "%llu", (unsigned long long)stats_state.curr_conns - 1);
APPEND_STAT("total_connections", "%llu", (unsigned long long)stats.total_conns);
if (settings.maxconns_fast) {
APPEND_STAT("rejected_connections", "%llu", (unsigned long long)stats.rejected_conns);
}
APPEND_STAT("connection_structures", "%u", stats_state.conn_structs);
APPEND_STAT("response_obj_bytes", "%llu", (unsigned long long)thread_stats.response_obj_bytes);
APPEND_STAT("response_obj_total", "%llu", (unsigned long long)thread_stats.response_obj_total);
APPEND_STAT("response_obj_free", "%llu", (unsigned long long)thread_stats.response_obj_free);
APPEND_STAT("response_obj_oom", "%llu", (unsigned long long)thread_stats.response_obj_oom);
APPEND_STAT("read_buf_bytes", "%llu", (unsigned long long)thread_stats.read_buf_bytes);
APPEND_STAT("read_buf_bytes_free", "%llu", (unsigned long long)thread_stats.read_buf_bytes_free);
APPEND_STAT("read_buf_oom", "%llu", (unsigned long long)thread_stats.read_buf_oom);
APPEND_STAT("reserved_fds", "%u", stats_state.reserved_fds);
APPEND_STAT("cmd_get", "%llu", (unsigned long long)thread_stats.get_cmds);
APPEND_STAT("cmd_set", "%llu", (unsigned long long)slab_stats.set_cmds);
APPEND_STAT("cmd_flush", "%llu", (unsigned long long)thread_stats.flush_cmds);
APPEND_STAT("cmd_touch", "%llu", (unsigned long long)thread_stats.touch_cmds);
APPEND_STAT("cmd_meta", "%llu", (unsigned long long)thread_stats.meta_cmds);
APPEND_STAT("get_hits", "%llu", (unsigned long long)slab_stats.get_hits);
APPEND_STAT("get_misses", "%llu", (unsigned long long)thread_stats.get_misses);
APPEND_STAT("get_expired", "%llu", (unsigned long long)thread_stats.get_expired);
APPEND_STAT("get_flushed", "%llu", (unsigned long long)thread_stats.get_flushed);
#ifdef EXTSTORE
if (c->thread->storage) {
APPEND_STAT("get_extstore", "%llu", (unsigned long long)thread_stats.get_extstore);
APPEND_STAT("get_aborted_extstore", "%llu", (unsigned long long)thread_stats.get_aborted_extstore);
APPEND_STAT("get_oom_extstore", "%llu", (unsigned long long)thread_stats.get_oom_extstore);
APPEND_STAT("recache_from_extstore", "%llu", (unsigned long long)thread_stats.recache_from_extstore);
APPEND_STAT("miss_from_extstore", "%llu", (unsigned long long)thread_stats.miss_from_extstore);
APPEND_STAT("badcrc_from_extstore", "%llu", (unsigned long long)thread_stats.badcrc_from_extstore);
}
#endif
APPEND_STAT("delete_misses", "%llu", (unsigned long long)thread_stats.delete_misses);
APPEND_STAT("delete_hits", "%llu", (unsigned long long)slab_stats.delete_hits);
APPEND_STAT("incr_misses", "%llu", (unsigned long long)thread_stats.incr_misses);
APPEND_STAT("incr_hits", "%llu", (unsigned long long)slab_stats.incr_hits);
APPEND_STAT("decr_misses", "%llu", (unsigned long long)thread_stats.decr_misses);
APPEND_STAT("decr_hits", "%llu", (unsigned long long)slab_stats.decr_hits);
APPEND_STAT("cas_misses", "%llu", (unsigned long long)thread_stats.cas_misses);
APPEND_STAT("cas_hits", "%llu", (unsigned long long)slab_stats.cas_hits);
APPEND_STAT("cas_badval", "%llu", (unsigned long long)slab_stats.cas_badval);
APPEND_STAT("touch_hits", "%llu", (unsigned long long)slab_stats.touch_hits);
APPEND_STAT("touch_misses", "%llu", (unsigned long long)thread_stats.touch_misses);
APPEND_STAT("auth_cmds", "%llu", (unsigned long long)thread_stats.auth_cmds);
APPEND_STAT("auth_errors", "%llu", (unsigned long long)thread_stats.auth_errors);
if (settings.idle_timeout) {
APPEND_STAT("idle_kicks", "%llu", (unsigned long long)thread_stats.idle_kicks);
}
APPEND_STAT("bytes_read", "%llu", (unsigned long long)thread_stats.bytes_read);
APPEND_STAT("bytes_written", "%llu", (unsigned long long)thread_stats.bytes_written);
APPEND_STAT("limit_maxbytes", "%llu", (unsigned long long)settings.maxbytes);
APPEND_STAT("accepting_conns", "%u", stats_state.accepting_conns);
APPEND_STAT("listen_disabled_num", "%llu", (unsigned long long)stats.listen_disabled_num);
APPEND_STAT("time_in_listen_disabled_us", "%llu", stats.time_in_listen_disabled_us);
APPEND_STAT("threads", "%d", settings.num_threads);
APPEND_STAT("conn_yields", "%llu", (unsigned long long)thread_stats.conn_yields);
APPEND_STAT("hash_power_level", "%u", stats_state.hash_power_level);
APPEND_STAT("hash_bytes", "%llu", (unsigned long long)stats_state.hash_bytes);
APPEND_STAT("hash_is_expanding", "%u", stats_state.hash_is_expanding);
if (settings.slab_reassign) {
APPEND_STAT("slab_reassign_rescues", "%llu", stats.slab_reassign_rescues);
APPEND_STAT("slab_reassign_chunk_rescues", "%llu", stats.slab_reassign_chunk_rescues);
APPEND_STAT("slab_reassign_evictions_nomem", "%llu", stats.slab_reassign_evictions_nomem);
APPEND_STAT("slab_reassign_inline_reclaim", "%llu", stats.slab_reassign_inline_reclaim);
APPEND_STAT("slab_reassign_busy_items", "%llu", stats.slab_reassign_busy_items);
APPEND_STAT("slab_reassign_busy_deletes", "%llu", stats.slab_reassign_busy_deletes);
APPEND_STAT("slab_reassign_running", "%u", stats_state.slab_reassign_running);
APPEND_STAT("slabs_moved", "%llu", stats.slabs_moved);
}
if (settings.lru_crawler) {
APPEND_STAT("lru_crawler_running", "%u", stats_state.lru_crawler_running);
APPEND_STAT("lru_crawler_starts", "%u", stats.lru_crawler_starts);
}
if (settings.lru_maintainer_thread) {
APPEND_STAT("lru_maintainer_juggles", "%llu", (unsigned long long)stats.lru_maintainer_juggles);
}
APPEND_STAT("malloc_fails", "%llu",
(unsigned long long)stats.malloc_fails);
APPEND_STAT("log_worker_dropped", "%llu", (unsigned long long)stats.log_worker_dropped);
APPEND_STAT("log_worker_written", "%llu", (unsigned long long)stats.log_worker_written);
APPEND_STAT("log_watcher_skipped", "%llu", (unsigned long long)stats.log_watcher_skipped);
APPEND_STAT("log_watcher_sent", "%llu", (unsigned long long)stats.log_watcher_sent);
STATS_UNLOCK();
#ifdef EXTSTORE
if (c->thread->storage) {
STATS_LOCK();
APPEND_STAT("extstore_compact_lost", "%llu", (unsigned long long)stats.extstore_compact_lost);
APPEND_STAT("extstore_compact_rescues", "%llu", (unsigned long long)stats.extstore_compact_rescues);
APPEND_STAT("extstore_compact_skipped", "%llu", (unsigned long long)stats.extstore_compact_skipped);
STATS_UNLOCK();
extstore_get_stats(c->thread->storage, &st);
APPEND_STAT("extstore_page_allocs", "%llu", (unsigned long long)st.page_allocs);
APPEND_STAT("extstore_page_evictions", "%llu", (unsigned long long)st.page_evictions);
APPEND_STAT("extstore_page_reclaims", "%llu", (unsigned long long)st.page_reclaims);
APPEND_STAT("extstore_pages_free", "%llu", (unsigned long long)st.pages_free);
APPEND_STAT("extstore_pages_used", "%llu", (unsigned long long)st.pages_used);
APPEND_STAT("extstore_objects_evicted", "%llu", (unsigned long long)st.objects_evicted);
APPEND_STAT("extstore_objects_read", "%llu", (unsigned long long)st.objects_read);
APPEND_STAT("extstore_objects_written", "%llu", (unsigned long long)st.objects_written);
APPEND_STAT("extstore_objects_used", "%llu", (unsigned long long)st.objects_used);
APPEND_STAT("extstore_bytes_evicted", "%llu", (unsigned long long)st.bytes_evicted);
APPEND_STAT("extstore_bytes_written", "%llu", (unsigned long long)st.bytes_written);
APPEND_STAT("extstore_bytes_read", "%llu", (unsigned long long)st.bytes_read);
APPEND_STAT("extstore_bytes_used", "%llu", (unsigned long long)st.bytes_used);
APPEND_STAT("extstore_bytes_fragmented", "%llu", (unsigned long long)st.bytes_fragmented);
APPEND_STAT("extstore_limit_maxbytes", "%llu", (unsigned long long)(st.page_count * st.page_size));
APPEND_STAT("extstore_io_queue", "%llu", (unsigned long long)(st.io_queue));
}
#endif
#ifdef TLS
if (settings.ssl_enabled) {
APPEND_STAT("ssl_handshake_errors", "%llu", (unsigned long long)stats.ssl_handshake_errors);
APPEND_STAT("time_since_server_cert_refresh", "%u", now - settings.ssl_last_cert_refresh_time);
}
#endif
}
static void process_stat_settings(ADD_STAT add_stats, void *c) {
assert(add_stats);
APPEND_STAT("maxbytes", "%llu", (unsigned long long)settings.maxbytes);
APPEND_STAT("maxconns", "%d", settings.maxconns);
APPEND_STAT("tcpport", "%d", settings.port);
APPEND_STAT("udpport", "%d", settings.udpport);
APPEND_STAT("inter", "%s", settings.inter ? settings.inter : "NULL");
APPEND_STAT("verbosity", "%d", settings.verbose);
APPEND_STAT("oldest", "%lu", (unsigned long)settings.oldest_live);
APPEND_STAT("evictions", "%s", settings.evict_to_free ? "on" : "off");
APPEND_STAT("domain_socket", "%s",
settings.socketpath ? settings.socketpath : "NULL");
APPEND_STAT("umask", "%o", settings.access);
APPEND_STAT("growth_factor", "%.2f", settings.factor);
APPEND_STAT("chunk_size", "%d", settings.chunk_size);
APPEND_STAT("num_threads", "%d", settings.num_threads);
APPEND_STAT("num_threads_per_udp", "%d", settings.num_threads_per_udp);
APPEND_STAT("stat_key_prefix", "%c", settings.prefix_delimiter);
APPEND_STAT("detail_enabled", "%s",
settings.detail_enabled ? "yes" : "no");
APPEND_STAT("reqs_per_event", "%d", settings.reqs_per_event);
APPEND_STAT("cas_enabled", "%s", settings.use_cas ? "yes" : "no");
APPEND_STAT("tcp_backlog", "%d", settings.backlog);
APPEND_STAT("binding_protocol", "%s",
prot_text(settings.binding_protocol));
APPEND_STAT("auth_enabled_sasl", "%s", settings.sasl ? "yes" : "no");
APPEND_STAT("auth_enabled_ascii", "%s", settings.auth_file ? settings.auth_file : "no");
APPEND_STAT("item_size_max", "%d", settings.item_size_max);
APPEND_STAT("maxconns_fast", "%s", settings.maxconns_fast ? "yes" : "no");
APPEND_STAT("hashpower_init", "%d", settings.hashpower_init);
APPEND_STAT("slab_reassign", "%s", settings.slab_reassign ? "yes" : "no");
APPEND_STAT("slab_automove", "%d", settings.slab_automove);
APPEND_STAT("slab_automove_ratio", "%.2f", settings.slab_automove_ratio);
APPEND_STAT("slab_automove_window", "%u", settings.slab_automove_window);
APPEND_STAT("slab_chunk_max", "%d", settings.slab_chunk_size_max);
APPEND_STAT("lru_crawler", "%s", settings.lru_crawler ? "yes" : "no");
APPEND_STAT("lru_crawler_sleep", "%d", settings.lru_crawler_sleep);
APPEND_STAT("lru_crawler_tocrawl", "%lu", (unsigned long)settings.lru_crawler_tocrawl);
APPEND_STAT("tail_repair_time", "%d", settings.tail_repair_time);
APPEND_STAT("flush_enabled", "%s", settings.flush_enabled ? "yes" : "no");
APPEND_STAT("dump_enabled", "%s", settings.dump_enabled ? "yes" : "no");
APPEND_STAT("hash_algorithm", "%s", settings.hash_algorithm);
APPEND_STAT("lru_maintainer_thread", "%s", settings.lru_maintainer_thread ? "yes" : "no");
APPEND_STAT("lru_segmented", "%s", settings.lru_segmented ? "yes" : "no");
APPEND_STAT("hot_lru_pct", "%d", settings.hot_lru_pct);
APPEND_STAT("warm_lru_pct", "%d", settings.warm_lru_pct);
APPEND_STAT("hot_max_factor", "%.2f", settings.hot_max_factor);
APPEND_STAT("warm_max_factor", "%.2f", settings.warm_max_factor);
APPEND_STAT("temp_lru", "%s", settings.temp_lru ? "yes" : "no");
APPEND_STAT("temporary_ttl", "%u", settings.temporary_ttl);
APPEND_STAT("idle_timeout", "%d", settings.idle_timeout);
APPEND_STAT("watcher_logbuf_size", "%u", settings.logger_watcher_buf_size);
APPEND_STAT("worker_logbuf_size", "%u", settings.logger_buf_size);
APPEND_STAT("resp_obj_mem_limit", "%u", settings.resp_obj_mem_limit);
APPEND_STAT("read_buf_mem_limit", "%u", settings.read_buf_mem_limit);
APPEND_STAT("track_sizes", "%s", item_stats_sizes_status() ? "yes" : "no");
APPEND_STAT("inline_ascii_response", "%s", "no"); // setting is dead, cannot be yes.
#ifdef HAVE_DROP_PRIVILEGES
APPEND_STAT("drop_privileges", "%s", settings.drop_privileges ? "yes" : "no");
#endif
#ifdef EXTSTORE
APPEND_STAT("ext_item_size", "%u", settings.ext_item_size);
APPEND_STAT("ext_item_age", "%u", settings.ext_item_age);
APPEND_STAT("ext_low_ttl", "%u", settings.ext_low_ttl);
APPEND_STAT("ext_recache_rate", "%u", settings.ext_recache_rate);
APPEND_STAT("ext_wbuf_size", "%u", settings.ext_wbuf_size);
APPEND_STAT("ext_compact_under", "%u", settings.ext_compact_under);
APPEND_STAT("ext_drop_under", "%u", settings.ext_drop_under);
APPEND_STAT("ext_max_frag", "%.2f", settings.ext_max_frag);
APPEND_STAT("slab_automove_freeratio", "%.3f", settings.slab_automove_freeratio);
APPEND_STAT("ext_drop_unread", "%s", settings.ext_drop_unread ? "yes" : "no");
#endif
#ifdef TLS
APPEND_STAT("ssl_enabled", "%s", settings.ssl_enabled ? "yes" : "no");
APPEND_STAT("ssl_chain_cert", "%s", settings.ssl_chain_cert);
APPEND_STAT("ssl_key", "%s", settings.ssl_key);
APPEND_STAT("ssl_verify_mode", "%d", settings.ssl_verify_mode);
APPEND_STAT("ssl_keyformat", "%d", settings.ssl_keyformat);
APPEND_STAT("ssl_ciphers", "%s", settings.ssl_ciphers ? settings.ssl_ciphers : "NULL");
APPEND_STAT("ssl_ca_cert", "%s", settings.ssl_ca_cert ? settings.ssl_ca_cert : "NULL");
APPEND_STAT("ssl_wbuf_size", "%u", settings.ssl_wbuf_size);
#endif
}
static int nz_strcmp(int nzlength, const char *nz, const char *z) {
int zlength=strlen(z);
return (zlength == nzlength) && (strncmp(nz, z, zlength) == 0) ? 0 : -1;
}
static bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c) {
bool ret = true;
if (add_stats != NULL) {
if (!stat_type) {
/* prepare general statistics for the engine */
STATS_LOCK();
APPEND_STAT("bytes", "%llu", (unsigned long long)stats_state.curr_bytes);
APPEND_STAT("curr_items", "%llu", (unsigned long long)stats_state.curr_items);
APPEND_STAT("total_items", "%llu", (unsigned long long)stats.total_items);
STATS_UNLOCK();
APPEND_STAT("slab_global_page_pool", "%u", global_page_pool_size(NULL));
item_stats_totals(add_stats, c);
} else if (nz_strcmp(nkey, stat_type, "items") == 0) {
item_stats(add_stats, c);
} else if (nz_strcmp(nkey, stat_type, "slabs") == 0) {
slabs_stats(add_stats, c);
} else if (nz_strcmp(nkey, stat_type, "sizes") == 0) {
item_stats_sizes(add_stats, c);
} else if (nz_strcmp(nkey, stat_type, "sizes_enable") == 0) {
item_stats_sizes_enable(add_stats, c);
} else if (nz_strcmp(nkey, stat_type, "sizes_disable") == 0) {
item_stats_sizes_disable(add_stats, c);
} else {
ret = false;
}
} else {
ret = false;
}
return ret;
}
static inline void get_conn_text(const conn *c, const int af,
char* addr, struct sockaddr *sock_addr) {
char addr_text[MAXPATHLEN];
addr_text[0] = '\0';
const char *protoname = "?";
unsigned short port = 0;
size_t pathlen = 0;
switch (af) {
case AF_INET:
(void) inet_ntop(af,
&((struct sockaddr_in *)sock_addr)->sin_addr,
addr_text,
sizeof(addr_text) - 1);
port = ntohs(((struct sockaddr_in *)sock_addr)->sin_port);
protoname = IS_UDP(c->transport) ? "udp" : "tcp";
break;
case AF_INET6:
addr_text[0] = '[';
addr_text[1] = '\0';
if (inet_ntop(af,
&((struct sockaddr_in6 *)sock_addr)->sin6_addr,
addr_text + 1,
sizeof(addr_text) - 2)) {
strcat(addr_text, "]");
}
port = ntohs(((struct sockaddr_in6 *)sock_addr)->sin6_port);
protoname = IS_UDP(c->transport) ? "udp6" : "tcp6";
break;
case AF_UNIX:
// this strncpy call originally could piss off an address
// sanitizer; we supplied the size of the dest buf as a limiter,
// but optimized versions of strncpy could read past the end of
// *src while looking for a null terminator. Since buf and
// sun_path here are both on the stack they could even overlap,
// which is "undefined". In all OSS versions of strncpy I could
// find this has no effect; it'll still only copy until the first null
// terminator is found. Thus it's possible to get the OS to
// examine past the end of sun_path but it's unclear to me if this
// can cause any actual problem.
//
// We need a safe_strncpy util function but I'll punt on figuring
// that out for now.
pathlen = sizeof(((struct sockaddr_un *)sock_addr)->sun_path);
if (MAXPATHLEN <= pathlen) {
pathlen = MAXPATHLEN - 1;
}
strncpy(addr_text,
((struct sockaddr_un *)sock_addr)->sun_path,
pathlen);
addr_text[pathlen] = '\0';
protoname = "unix";
break;
}
if (strlen(addr_text) < 2) {
/* Most likely this is a connected UNIX-domain client which
* has no peer socket address, but there's no portable way
* to tell for sure.
*/
sprintf(addr_text, "<AF %d>", af);
}
if (port) {
sprintf(addr, "%s:%s:%u", protoname, addr_text, port);
} else {
sprintf(addr, "%s:%s", protoname, addr_text);
}
}
static void conn_to_str(const conn *c, char *addr, char *svr_addr) {
if (!c) {
strcpy(addr, "<null>");
} else if (c->state == conn_closed) {
strcpy(addr, "<closed>");
} else {
struct sockaddr_in6 local_addr;
struct sockaddr *sock_addr = (void *)&c->request_addr;
/* For listen ports and idle UDP ports, show listen address */
if (c->state == conn_listening ||
(IS_UDP(c->transport) &&
c->state == conn_read)) {
socklen_t local_addr_len = sizeof(local_addr);
if (getsockname(c->sfd,
(struct sockaddr *)&local_addr,
&local_addr_len) == 0) {
sock_addr = (struct sockaddr *)&local_addr;
}
}
get_conn_text(c, sock_addr->sa_family, addr, sock_addr);
if (c->state != conn_listening && !(IS_UDP(c->transport) &&
c->state == conn_read)) {
struct sockaddr_storage svr_sock_addr;
socklen_t svr_addr_len = sizeof(svr_sock_addr);
getsockname(c->sfd, (struct sockaddr *)&svr_sock_addr, &svr_addr_len);
get_conn_text(c, svr_sock_addr.ss_family, svr_addr, (struct sockaddr *)&svr_sock_addr);
}
}
}
static void process_stats_conns(ADD_STAT add_stats, void *c) {
int i;
char key_str[STAT_KEY_LEN];
char val_str[STAT_VAL_LEN];
size_t extras_len = sizeof("unix:") + sizeof("65535");
char addr[MAXPATHLEN + extras_len];
char svr_addr[MAXPATHLEN + extras_len];
int klen = 0, vlen = 0;
assert(add_stats);
for (i = 0; i < max_fds; i++) {
if (conns[i]) {
/* This is safe to do unlocked because conns are never freed; the
* worst that'll happen will be a minor inconsistency in the
* output -- not worth the complexity of the locking that'd be
* required to prevent it.
*/
if (IS_UDP(conns[i]->transport)) {
APPEND_NUM_STAT(i, "UDP", "%s", "UDP");
}
if (conns[i]->state != conn_closed) {
conn_to_str(conns[i], addr, svr_addr);
APPEND_NUM_STAT(i, "addr", "%s", addr);
if (conns[i]->state != conn_listening &&
!(IS_UDP(conns[i]->transport) && conns[i]->state == conn_read)) {
APPEND_NUM_STAT(i, "listen_addr", "%s", svr_addr);
}
APPEND_NUM_STAT(i, "state", "%s",
state_text(conns[i]->state));
APPEND_NUM_STAT(i, "secs_since_last_cmd", "%d",
current_time - conns[i]->last_cmd_time);
}
}
}
}
#ifdef EXTSTORE
static void process_extstore_stats(ADD_STAT add_stats, conn *c) {
int i;
char key_str[STAT_KEY_LEN];
char val_str[STAT_VAL_LEN];
int klen = 0, vlen = 0;
struct extstore_stats st;
assert(add_stats);
void *storage = c->thread->storage;
extstore_get_stats(storage, &st);
st.page_data = calloc(st.page_count, sizeof(struct extstore_page_data));
extstore_get_page_data(storage, &st);
for (i = 0; i < st.page_count; i++) {
APPEND_NUM_STAT(i, "version", "%llu",
(unsigned long long) st.page_data[i].version);
APPEND_NUM_STAT(i, "bytes", "%llu",
(unsigned long long) st.page_data[i].bytes_used);
APPEND_NUM_STAT(i, "bucket", "%u",
st.page_data[i].bucket);
APPEND_NUM_STAT(i, "free_bucket", "%u",
st.page_data[i].free_bucket);
}
}
#endif
static void process_stat(conn *c, token_t *tokens, const size_t ntokens) {
const char *subcommand = tokens[SUBCOMMAND_TOKEN].value;
assert(c != NULL);
if (ntokens < 2) {
out_string(c, "CLIENT_ERROR bad command line");
return;
}
if (ntokens == 2) {
server_stats(&append_stats, c);
(void)get_stats(NULL, 0, &append_stats, c);
} else if (strcmp(subcommand, "reset") == 0) {
stats_reset();
out_string(c, "RESET");
return;
} else if (strcmp(subcommand, "detail") == 0) {
/* NOTE: how to tackle detail with binary? */
if (ntokens < 4)
process_stats_detail(c, ""); /* outputs the error message */
else
process_stats_detail(c, tokens[2].value);
/* Output already generated */
return;
} else if (strcmp(subcommand, "settings") == 0) {
process_stat_settings(&append_stats, c);
} else if (strcmp(subcommand, "cachedump") == 0) {
char *buf;
unsigned int bytes, id, limit = 0;
if (!settings.dump_enabled) {
out_string(c, "CLIENT_ERROR stats cachedump not allowed");
return;
}
if (ntokens < 5) {
out_string(c, "CLIENT_ERROR bad command line");
return;
}
if (!safe_strtoul(tokens[2].value, &id) ||
!safe_strtoul(tokens[3].value, &limit)) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
if (id >= MAX_NUMBER_OF_SLAB_CLASSES) {
out_string(c, "CLIENT_ERROR Illegal slab id");
return;
}
buf = item_cachedump(id, limit, &bytes);
write_and_free(c, buf, bytes);
return;
} else if (strcmp(subcommand, "conns") == 0) {
process_stats_conns(&append_stats, c);
#ifdef EXTSTORE
} else if (strcmp(subcommand, "extstore") == 0) {
process_extstore_stats(&append_stats, c);
#endif
} else {
/* getting here means that the subcommand is either engine specific or
is invalid. query the engine and see. */
if (get_stats(subcommand, strlen(subcommand), &append_stats, c)) {
if (c->stats.buffer == NULL) {
out_of_memory(c, "SERVER_ERROR out of memory writing stats");
} else {
write_and_free(c, c->stats.buffer, c->stats.offset);
c->stats.buffer = NULL;
}
} else {
out_string(c, "ERROR");
}
return;
}
/* append terminator and start the transfer */
append_stats(NULL, 0, NULL, 0, c);
if (c->stats.buffer == NULL) {
out_of_memory(c, "SERVER_ERROR out of memory writing stats");
} else {
write_and_free(c, c->stats.buffer, c->stats.offset);
c->stats.buffer = NULL;
}
}
/* client flags == 0 means use no storage for client flags */
static inline int make_ascii_get_suffix(char *suffix, item *it, bool return_cas, int nbytes) {
char *p = suffix;
*p = ' ';
p++;
if (FLAGS_SIZE(it) == 0) {
*p = '0';
p++;
} else {
p = itoa_u32(*((uint32_t *) ITEM_suffix(it)), p);
}
*p = ' ';
p = itoa_u32(nbytes-2, p+1);
if (return_cas) {
*p = ' ';
p = itoa_u64(ITEM_get_cas(it), p+1);
}
*p = '\r';
*(p+1) = '\n';
*(p+2) = '\0';
return (p - suffix) + 2;
}
#define IT_REFCOUNT_LIMIT 60000
static inline item* limited_get(char *key, size_t nkey, conn *c, uint32_t exptime, bool should_touch, bool do_update, bool *overflow) {
item *it;
if (should_touch) {
it = item_touch(key, nkey, exptime, c);
} else {
it = item_get(key, nkey, c, do_update);
}
if (it && it->refcount > IT_REFCOUNT_LIMIT) {
item_remove(it);
it = NULL;
*overflow = true;
} else {
*overflow = false;
}
return it;
}
// Semantics are different than limited_get; since the item is returned
// locked, caller can directly change what it needs.
// though it might eventually be a better interface to sink it all into
// items.c.
static inline item* limited_get_locked(char *key, size_t nkey, conn *c, bool do_update, uint32_t *hv, bool *overflow) {
item *it;
it = item_get_locked(key, nkey, c, do_update, hv);
if (it && it->refcount > IT_REFCOUNT_LIMIT) {
do_item_remove(it);
it = NULL;
item_unlock(*hv);
*overflow = true;
} else {
*overflow = false;
}
return it;
}
#ifdef EXTSTORE
// FIXME: This runs in the IO thread. to get better IO performance this should
// simply mark the io wrapper with the return value and decrement wrapleft, if
// zero redispatching. Still a bit of work being done in the side thread but
// minimized at least.
static void _get_extstore_cb(void *e, obj_io *io, int ret) {
// FIXME: assumes success
io_wrap *wrap = (io_wrap *)io->data;
mc_resp *resp = wrap->resp;
conn *c = wrap->c;
assert(wrap->active == true);
item *read_it = (item *)io->buf;
bool miss = false;
// TODO: How to do counters for hit/misses?
if (ret < 1) {
miss = true;
} else {
uint32_t crc2;
uint32_t crc = (uint32_t) read_it->exptime;
int x;
// item is chunked, crc the iov's
if (io->iov != NULL) {
// first iov is the header, which we don't use beyond crc
crc2 = crc32c(0, (char *)io->iov[0].iov_base+STORE_OFFSET, io->iov[0].iov_len-STORE_OFFSET);
// make sure it's not sent. hack :(
io->iov[0].iov_len = 0;
for (x = 1; x < io->iovcnt; x++) {
crc2 = crc32c(crc2, (char *)io->iov[x].iov_base, io->iov[x].iov_len);
}
} else {
crc2 = crc32c(0, (char *)read_it+STORE_OFFSET, io->len-STORE_OFFSET);
}
if (crc != crc2) {
miss = true;
wrap->badcrc = true;
}
}
if (miss) {
if (wrap->noreply) {
// In all GET cases, noreply means we send nothing back.
resp->skip = true;
} else {
// TODO: This should be movable to the worker thread.
// Convert the binprot response into a miss response.
// The header requires knowing a bunch of stateful crap, so rather
// than simply writing out a "new" miss response we mangle what's
// already there.
if (c->protocol == binary_prot) {
protocol_binary_response_header *header =
(protocol_binary_response_header *)resp->wbuf;
// cut the extra nbytes off of the body_len
uint32_t body_len = ntohl(header->response.bodylen);
uint8_t hdr_len = header->response.extlen;
body_len -= resp->iov[wrap->iovec_data].iov_len + hdr_len;
resp->tosend -= resp->iov[wrap->iovec_data].iov_len + hdr_len;
header->response.extlen = 0;
header->response.status = (uint16_t)htons(PROTOCOL_BINARY_RESPONSE_KEY_ENOENT);
header->response.bodylen = htonl(body_len);
// truncate the data response.
resp->iov[wrap->iovec_data].iov_len = 0;
// wipe the extlen iov... wish it was just a flat buffer.
resp->iov[wrap->iovec_data-1].iov_len = 0;
resp->chunked_data_iov = 0;
} else {
int i;
// Meta commands have EN status lines for miss, rather than
// END as a trailer as per normal ascii.
if (resp->iov[0].iov_len >= 3
&& memcmp(resp->iov[0].iov_base, "VA ", 3) == 0) {
// TODO: These miss translators should use specific callback
// functions attached to the io wrap. This is weird :(
resp->iovcnt = 1;
resp->iov[0].iov_len = 4;
resp->iov[0].iov_base = "EN\r\n";
resp->tosend = 4;
} else {
// Wipe the iovecs up through our data injection.
// Allows trailers to be returned (END)
for (i = 0; i <= wrap->iovec_data; i++) {
resp->tosend -= resp->iov[i].iov_len;
resp->iov[i].iov_len = 0;
resp->iov[i].iov_base = NULL;
}
}
resp->chunked_total = 0;
resp->chunked_data_iov = 0;
}
}
wrap->miss = true;
} else {
assert(read_it->slabs_clsid != 0);
// TODO: should always use it instead of ITEM_data to kill more
// chunked special casing.
if ((read_it->it_flags & ITEM_CHUNKED) == 0) {
resp->iov[wrap->iovec_data].iov_base = ITEM_data(read_it);
}
wrap->miss = false;
}
c->io_wrapleft--;
wrap->active = false;
//assert(c->io_wrapleft >= 0);
// All IO's have returned, lets re-attach this connection to our original
// thread.
if (c->io_wrapleft == 0) {
assert(c->io_queued == true);
c->io_queued = false;
redispatch_conn(c);
}
}
static inline int _get_extstore(conn *c, item *it, mc_resp *resp) {
#ifdef NEED_ALIGN
item_hdr hdr;
memcpy(&hdr, ITEM_data(it), sizeof(hdr));
#else
item_hdr *hdr = (item_hdr *)ITEM_data(it);
#endif
size_t ntotal = ITEM_ntotal(it);
unsigned int clsid = slabs_clsid(ntotal);
item *new_it;
bool chunked = false;
if (ntotal > settings.slab_chunk_size_max) {
// Pull a chunked item header.
uint32_t flags;
FLAGS_CONV(it, flags);
new_it = item_alloc(ITEM_key(it), it->nkey, flags, it->exptime, it->nbytes);
assert(new_it == NULL || (new_it->it_flags & ITEM_CHUNKED));
chunked = true;
} else {
new_it = do_item_alloc_pull(ntotal, clsid);
}
if (new_it == NULL)
return -1;
assert(!c->io_queued); // FIXME: debugging.
// so we can free the chunk on a miss
new_it->slabs_clsid = clsid;
io_wrap *io = do_cache_alloc(c->thread->io_cache);
io->active = true;
io->miss = false;
io->badcrc = false;
io->noreply = c->noreply;
// io_wrap owns the reference for this object now.
io->hdr_it = it;
io->resp = resp;
io->io.iov = NULL;
// FIXME: error handling.
if (chunked) {
unsigned int ciovcnt = 0;
size_t remain = new_it->nbytes;
item_chunk *chunk = (item_chunk *) ITEM_schunk(new_it);
// TODO: This might make sense as a _global_ cache vs a per-thread.
// but we still can't load objects requiring > IOV_MAX iovs.
// In the meantime, these objects are rare/slow enough that
// malloc/freeing a statically sized object won't cause us much pain.
io->io.iov = malloc(sizeof(struct iovec) * IOV_MAX);
if (io->io.iov == NULL) {
item_remove(new_it);
do_cache_free(c->thread->io_cache, io);
return -1;
}
// fill the header so we can get the full data + crc back.
io->io.iov[0].iov_base = new_it;
io->io.iov[0].iov_len = ITEM_ntotal(new_it) - new_it->nbytes;
ciovcnt++;
while (remain > 0) {
chunk = do_item_alloc_chunk(chunk, remain);
// FIXME: _pure evil_, silently erroring if item is too large.
if (chunk == NULL || ciovcnt > IOV_MAX-1) {
item_remove(new_it);
free(io->io.iov);
// TODO: wrapper function for freeing up an io wrap?
io->io.iov = NULL;
do_cache_free(c->thread->io_cache, io);
return -1;
}
io->io.iov[ciovcnt].iov_base = chunk->data;
io->io.iov[ciovcnt].iov_len = (remain < chunk->size) ? remain : chunk->size;
chunk->used = (remain < chunk->size) ? remain : chunk->size;
remain -= chunk->size;
ciovcnt++;
}
io->io.iovcnt = ciovcnt;
}
// Chunked or non chunked we reserve a response iov here.
io->iovec_data = resp->iovcnt;
int iovtotal = (c->protocol == binary_prot) ? it->nbytes - 2 : it->nbytes;
if (chunked) {
resp_add_chunked_iov(resp, new_it, iovtotal);
} else {
resp_add_iov(resp, "", iovtotal);
}
io->io.buf = (void *)new_it;
io->c = c;
// We need to stack the sub-struct IO's together as well.
if (c->io_wraplist) {
io->io.next = &c->io_wraplist->io;
} else {
io->io.next = NULL;
}
// IO queue for this connection.
io->next = c->io_wraplist;
c->io_wraplist = io;
assert(c->io_wrapleft >= 0);
c->io_wrapleft++;
// reference ourselves for the callback.
io->io.data = (void *)io;
// Now, fill in io->io based on what was in our header.
#ifdef NEED_ALIGN
io->io.page_version = hdr.page_version;
io->io.page_id = hdr.page_id;
io->io.offset = hdr.offset;
#else
io->io.page_version = hdr->page_version;
io->io.page_id = hdr->page_id;
io->io.offset = hdr->offset;
#endif
io->io.len = ntotal;
io->io.mode = OBJ_IO_READ;
io->io.cb = _get_extstore_cb;
//fprintf(stderr, "EXTSTORE: IO stacked %u\n", io->iovec_data);
// FIXME: This stat needs to move to reflect # of flash hits vs misses
// for now it's a good gauge on how often we request out to flash at
// least.
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.get_extstore++;
pthread_mutex_unlock(&c->thread->stats.mutex);
return 0;
}
#endif
/* ntokens is overwritten here... shrug.. */
static inline void process_get_command(conn *c, token_t *tokens, size_t ntokens, bool return_cas, bool should_touch) {
char *key;
size_t nkey;
item *it;
token_t *key_token = &tokens[KEY_TOKEN];
int32_t exptime_int = 0;
rel_time_t exptime = 0;
bool fail_length = false;
assert(c != NULL);
mc_resp *resp = c->resp;
if (should_touch) {
// For get and touch commands, use first token as exptime
if (!safe_strtol(tokens[1].value, &exptime_int)) {
out_string(c, "CLIENT_ERROR invalid exptime argument");
return;
}
key_token++;
exptime = realtime(EXPTIME_TO_POSITIVE_TIME(exptime_int));
}
do {
while(key_token->length != 0) {
bool overflow; // not used here.
key = key_token->value;
nkey = key_token->length;
if (nkey > KEY_MAX_LENGTH) {
fail_length = true;
goto stop;
}
it = limited_get(key, nkey, c, exptime, should_touch, DO_UPDATE, &overflow);
if (settings.detail_enabled) {
stats_prefix_record_get(key, nkey, NULL != it);
}
if (it) {
/*
* Construct the response. Each hit adds three elements to the
* outgoing data list:
* "VALUE "
* key
* " " + flags + " " + data length + "\r\n" + data (with \r\n)
*/
{
MEMCACHED_COMMAND_GET(c->sfd, ITEM_key(it), it->nkey,
it->nbytes, ITEM_get_cas(it));
int nbytes = it->nbytes;;
nbytes = it->nbytes;
char *p = resp->wbuf;
memcpy(p, "VALUE ", 6);
p += 6;
memcpy(p, ITEM_key(it), it->nkey);
p += it->nkey;
p += make_ascii_get_suffix(p, it, return_cas, nbytes);
resp_add_iov(resp, resp->wbuf, p - resp->wbuf);
#ifdef EXTSTORE
if (it->it_flags & ITEM_HDR) {
if (_get_extstore(c, it, resp) != 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.get_oom_extstore++;
pthread_mutex_unlock(&c->thread->stats.mutex);
item_remove(it);
goto stop;
}
} else if ((it->it_flags & ITEM_CHUNKED) == 0) {
resp_add_iov(resp, ITEM_data(it), it->nbytes);
} else {
resp_add_chunked_iov(resp, it, it->nbytes);
}
#else
if ((it->it_flags & ITEM_CHUNKED) == 0) {
resp_add_iov(resp, ITEM_data(it), it->nbytes);
} else {
resp_add_chunked_iov(resp, it, it->nbytes);
}
#endif
}
if (settings.verbose > 1) {
int ii;
fprintf(stderr, ">%d sending key ", c->sfd);
for (ii = 0; ii < it->nkey; ++ii) {
fprintf(stderr, "%c", key[ii]);
}
fprintf(stderr, "\n");
}
/* item_get() has incremented it->refcount for us */
pthread_mutex_lock(&c->thread->stats.mutex);
if (should_touch) {
c->thread->stats.touch_cmds++;
c->thread->stats.slab_stats[ITEM_clsid(it)].touch_hits++;
} else {
c->thread->stats.lru_hits[it->slabs_clsid]++;
c->thread->stats.get_cmds++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
#ifdef EXTSTORE
/* If ITEM_HDR, an io_wrap owns the reference. */
if ((it->it_flags & ITEM_HDR) == 0) {
resp->item = it;
}
#else
resp->item = it;
#endif
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
if (should_touch) {
c->thread->stats.touch_cmds++;
c->thread->stats.touch_misses++;
} else {
c->thread->stats.get_misses++;
c->thread->stats.get_cmds++;
}
MEMCACHED_COMMAND_GET(c->sfd, key, nkey, -1, 0);
pthread_mutex_unlock(&c->thread->stats.mutex);
}
key_token++;
if (key_token->length != 0) {
if (!resp_start(c)) {
goto stop;
}
resp = c->resp;
}
}
/*
* If the command string hasn't been fully processed, get the next set
* of tokens.
*/
if (key_token->value != NULL) {
ntokens = tokenize_command(key_token->value, tokens, MAX_TOKENS);
key_token = tokens;
if (!resp_start(c)) {
goto stop;
}
resp = c->resp;
}
} while(key_token->value != NULL);
stop:
if (settings.verbose > 1)
fprintf(stderr, ">%d END\n", c->sfd);
/*
If the loop was terminated because of out-of-memory, it is not
reliable to add END\r\n to the buffer, because it might not end
in \r\n. So we send SERVER_ERROR instead.
*/
if (key_token->value != NULL) {
// Kill any stacked responses we had.
conn_release_items(c);
// Start a new response object for the error message.
if (!resp_start(c)) {
// severe out of memory error.
conn_set_state(c, conn_closing);
return;
}
if (fail_length) {
out_string(c, "CLIENT_ERROR bad command line format");
} else {
out_of_memory(c, "SERVER_ERROR out of memory writing get response");
}
} else {
// Tag the end token onto the most recent response object.
resp_add_iov(resp, "END\r\n", 5);
conn_set_state(c, conn_mwrite);
}
}
// slow snprintf for debugging purposes.
static void process_meta_command(conn *c, token_t *tokens, const size_t ntokens) {
assert(c != NULL);
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
char *key = tokens[KEY_TOKEN].value;
size_t nkey = tokens[KEY_TOKEN].length;
bool overflow; // not used here.
item *it = limited_get(key, nkey, c, 0, false, DONT_UPDATE, &overflow);
if (it) {
mc_resp *resp = c->resp;
size_t total = 0;
size_t ret;
// similar to out_string().
memcpy(resp->wbuf, "ME ", 3);
total += 3;
memcpy(resp->wbuf + total, ITEM_key(it), it->nkey);
total += it->nkey;
resp->wbuf[total] = ' ';
total++;
ret = snprintf(resp->wbuf + total, WRITE_BUFFER_SIZE - (it->nkey + 12),
"exp=%d la=%llu cas=%llu fetch=%s cls=%u size=%lu\r\n",
(it->exptime == 0) ? -1 : (current_time - it->exptime),
(unsigned long long)(current_time - it->time),
(unsigned long long)ITEM_get_cas(it),
(it->it_flags & ITEM_FETCHED) ? "yes" : "no",
ITEM_clsid(it),
(unsigned long) ITEM_ntotal(it));
item_remove(it);
resp->wbytes = total + ret;
resp_add_iov(resp, resp->wbuf, resp->wbytes);
conn_set_state(c, conn_new_cmd);
} else {
out_string(c, "EN");
}
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.meta_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
}
#define MFLAG_MAX_OPT_LENGTH 20
#define MFLAG_MAX_OPAQUE_LENGTH 32
struct _meta_flags {
unsigned int has_error :1; // flipped if we found an error during parsing.
unsigned int no_update :1;
unsigned int locked :1;
unsigned int vivify :1;
unsigned int la :1;
unsigned int hit :1;
unsigned int value :1;
unsigned int set_stale :1;
unsigned int no_reply :1;
unsigned int has_cas :1;
unsigned int new_ttl :1;
char mode; // single character mode switch, common to ms/ma
rel_time_t exptime;
rel_time_t autoviv_exptime;
rel_time_t recache_time;
int32_t value_len;
uint32_t client_flags;
uint64_t req_cas_id;
uint64_t delta; // ma
uint64_t initial; // ma
};
static int _meta_flag_preparse(token_t *tokens, const size_t ntokens,
struct _meta_flags *of, char **errstr) {
unsigned int i;
int32_t tmp_int;
uint8_t seen[127] = {0};
// Start just past the key token. Look at first character of each token.
for (i = KEY_TOKEN+1; i < ntokens-1; i++) {
uint8_t o = (uint8_t)tokens[i].value[0];
// zero out repeat flags so we don't over-parse for return data.
if (o >= 127 || seen[o] != 0) {
*errstr = "CLIENT_ERROR duplicate flag";
return -1;
}
seen[o] = 1;
switch (o) {
/* Negative exptimes can underflow and end up immortal. realtime() will
immediately expire values that are greater than REALTIME_MAXDELTA, but less
than process_started, so lets aim for that. */
case 'N':
of->locked = 1;
of->vivify = 1;
if (!safe_strtol(tokens[i].value+1, &tmp_int)) {
*errstr = "CLIENT_ERROR bad token in command line format";
of->has_error = 1;
} else {
of->autoviv_exptime = realtime(EXPTIME_TO_POSITIVE_TIME(tmp_int));
}
break;
case 'T':
of->locked = 1;
if (!safe_strtol(tokens[i].value+1, &tmp_int)) {
*errstr = "CLIENT_ERROR bad token in command line format";
of->has_error = 1;
} else {
of->exptime = realtime(EXPTIME_TO_POSITIVE_TIME(tmp_int));
of->new_ttl = true;
}
break;
case 'R':
of->locked = 1;
if (!safe_strtol(tokens[i].value+1, &tmp_int)) {
*errstr = "CLIENT_ERROR bad token in command line format";
of->has_error = 1;
} else {
of->recache_time = realtime(EXPTIME_TO_POSITIVE_TIME(tmp_int));
}
break;
case 'l':
of->la = 1;
of->locked = 1; // need locked to delay LRU bump
break;
case 'O':
break;
case 'k': // known but no special handling
case 's':
case 't':
case 'c':
case 'f':
break;
case 'v':
of->value = 1;
break;
case 'h':
of->locked = 1; // need locked to delay LRU bump
break;
case 'u':
of->no_update = 1;
break;
case 'q':
of->no_reply = 1;
break;
// mset-related.
case 'F':
if (!safe_strtoul(tokens[i].value+1, &of->client_flags)) {
of->has_error = true;
}
break;
case 'S':
if (!safe_strtol(tokens[i].value+1, &tmp_int)) {
of->has_error = true;
} else {
// Size is adjusted for underflow or overflow once the
// \r\n terminator is added.
if (tmp_int < 0 || tmp_int > (INT_MAX - 2)) {
*errstr = "CLIENT_ERROR invalid length";
of->has_error = true;
} else {
of->value_len = tmp_int + 2; // \r\n
}
}
break;
case 'C': // mset, mdelete, marithmetic
if (!safe_strtoull(tokens[i].value+1, &of->req_cas_id)) {
*errstr = "CLIENT_ERROR bad token in command line format";
of->has_error = true;
} else {
of->has_cas = true;
}
break;
case 'M': // mset and marithmetic mode switch
if (tokens[i].length != 2) {
*errstr = "CLIENT_ERROR incorrect length for M token";
of->has_error = 1;
} else {
of->mode = tokens[i].value[1];
}
break;
case 'J': // marithmetic initial value
if (!safe_strtoull(tokens[i].value+1, &of->initial)) {
*errstr = "CLIENT_ERROR invalid numeric initial value";
of->has_error = 1;
}
break;
case 'D': // marithmetic delta value
if (!safe_strtoull(tokens[i].value+1, &of->delta)) {
*errstr = "CLIENT_ERROR invalid numeric delta value";
of->has_error = 1;
}
break;
case 'I':
of->set_stale = 1;
break;
default: // unknown flag, bail.
*errstr = "CLIENT_ERROR invalid flag";
return -1;
}
}
return of->has_error ? -1 : 0;
}
#define META_SPACE(p) { \
*p = ' '; \
p++; \
}
#define META_CHAR(p, c) { \
*p = ' '; \
*(p+1) = c; \
p += 2; \
}
static void process_mget_command(conn *c, token_t *tokens, const size_t ntokens) {
char *key;
size_t nkey;
item *it;
unsigned int i = 0;
struct _meta_flags of = {0}; // option bitflags.
uint32_t hv; // cached hash value for unlocking an item.
bool failed = false;
bool item_created = false;
bool won_token = false;
bool ttl_set = false;
char *errstr;
mc_resp *resp = c->resp;
char *p = resp->wbuf;
assert(c != NULL);
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_errstring(c, "CLIENT_ERROR bad command line format");
return;
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
// NOTE: final token has length == 0.
// KEY_TOKEN == 1. 0 is command.
if (ntokens == 3) {
// TODO: any way to fix this?
out_errstring(c, "CLIENT_ERROR bad command line format");
return;
} else if (ntokens > MFLAG_MAX_OPT_LENGTH) {
// TODO: ensure the command tokenizer gives us at least this many
out_errstring(c, "CLIENT_ERROR options flags are too long");
return;
}
// scrubs duplicated options and sets flags for how to load the item.
if (_meta_flag_preparse(tokens, ntokens, &of, &errstr) != 0) {
out_errstring(c, errstr);
return;
}
c->noreply = of.no_reply;
// TODO: need to indicate if the item was overflowed or not?
// I think we do, since an overflow shouldn't trigger an alloc/replace.
bool overflow = false;
if (!of.locked) {
it = limited_get(key, nkey, c, 0, false, !of.no_update, &overflow);
} else {
// If we had to lock the item, we're doing our own bump later.
it = limited_get_locked(key, nkey, c, DONT_UPDATE, &hv, &overflow);
}
// Since we're a new protocol, we can actually inform users that refcount
// overflow is happening by straight up throwing an error.
// We definitely don't want to re-autovivify by accident.
if (overflow) {
assert(it == NULL);
out_errstring(c, "SERVER_ERROR refcount overflow during fetch");
return;
}
if (it == NULL && of.vivify) {
// Fill in the exptime during parsing later.
it = item_alloc(key, nkey, 0, realtime(0), 2);
// We don't actually need any of do_store_item's logic:
// - already fetched and missed an existing item.
// - lock is still held.
// - not append/prepend/replace
// - not testing CAS
if (it != NULL) {
// I look forward to the day I get rid of this :)
memcpy(ITEM_data(it), "\r\n", 2);
// NOTE: This initializes the CAS value.
do_item_link(it, hv);
item_created = true;
}
}
// don't have to check result of add_iov() since the iov size defaults are
// enough.
if (it) {
if (of.value) {
memcpy(p, "VA ", 3);
p = itoa_u32(it->nbytes-2, p+3);
} else {
memcpy(p, "OK", 2);
p += 2;
}
for (i = KEY_TOKEN+1; i < ntokens-1; i++) {
switch (tokens[i].value[0]) {
case 'T':
ttl_set = true;
it->exptime = of.exptime;
break;
case 'N':
if (item_created) {
it->exptime = of.autoviv_exptime;
won_token = true;
}
break;
case 'R':
// If we haven't autovivified and supplied token is less
// than current TTL, mark a win.
if ((it->it_flags & ITEM_TOKEN_SENT) == 0
&& !item_created
&& it->exptime != 0
&& it->exptime < of.recache_time) {
won_token = true;
}
break;
case 's':
META_CHAR(p, 's');
p = itoa_u32(it->nbytes-2, p);
break;
case 't':
// TTL remaining as of this request.
// needs to be relative because server clocks may not be in sync.
META_CHAR(p, 't');
if (it->exptime == 0) {
*p = '-';
*(p+1) = '1';
p += 2;
} else {
p = itoa_u32(it->exptime - current_time, p);
}
break;
case 'c':
META_CHAR(p, 'c');
p = itoa_u64(ITEM_get_cas(it), p);
break;
case 'f':
META_CHAR(p, 'f');
if (FLAGS_SIZE(it) == 0) {
*p = '0';
p++;
} else {
p = itoa_u32(*((uint32_t *) ITEM_suffix(it)), p);
}
break;
case 'l':
META_CHAR(p, 'l');
p = itoa_u32(current_time - it->time, p);
break;
case 'h':
META_CHAR(p, 'h');
if (it->it_flags & ITEM_FETCHED) {
*p = '1';
} else {
*p = '0';
}
p++;
break;
case 'O':
if (tokens[i].length > MFLAG_MAX_OPAQUE_LENGTH) {
errstr = "CLIENT_ERROR opaque token too long";
goto error;
}
META_SPACE(p);
memcpy(p, tokens[i].value, tokens[i].length);
p += tokens[i].length;
break;
case 'k':
META_CHAR(p, 'k');
memcpy(p, ITEM_key(it), it->nkey);
p += it->nkey;
break;
}
}
// Has this item already sent a token?
// Important to do this here so we don't send W with Z.
// Isn't critical, but easier for client authors to understand.
if (it->it_flags & ITEM_TOKEN_SENT) {
META_CHAR(p, 'Z');
}
if (it->it_flags & ITEM_STALE) {
META_CHAR(p, 'X');
// FIXME: think hard about this. is this a default, or a flag?
if ((it->it_flags & ITEM_TOKEN_SENT) == 0) {
// If we're stale but no token already sent, now send one.
won_token = true;
}
}
if (won_token) {
// Mark a win into the flag buffer.
META_CHAR(p, 'W');
it->it_flags |= ITEM_TOKEN_SENT;
}
*p = '\r';
*(p+1) = '\n';
*(p+2) = '\0';
p += 2;
// finally, chain in the buffer.
resp_add_iov(resp, resp->wbuf, p - resp->wbuf);
if (of.value) {
#ifdef EXTSTORE
if (it->it_flags & ITEM_HDR) {
if (_get_extstore(c, it, resp) != 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.get_oom_extstore++;
pthread_mutex_unlock(&c->thread->stats.mutex);
failed = true;
}
} else if ((it->it_flags & ITEM_CHUNKED) == 0) {
resp_add_iov(resp, ITEM_data(it), it->nbytes);
} else {
resp_add_chunked_iov(resp, it, it->nbytes);
}
#else
if ((it->it_flags & ITEM_CHUNKED) == 0) {
resp_add_iov(resp, ITEM_data(it), it->nbytes);
} else {
resp_add_chunked_iov(resp, it, it->nbytes);
}
#endif
}
// need to hold the ref at least because of the key above.
#ifdef EXTSTORE
if (!failed) {
if ((it->it_flags & ITEM_HDR) != 0 && of.value) {
// Only have extstore clean if header and returning value.
resp->item = NULL;
} else {
resp->item = it;
}
} else {
// Failed to set up extstore fetch.
if (of.locked) {
do_item_remove(it);
} else {
item_remove(it);
}
}
#else
resp->item = it;
#endif
} else {
failed = true;
}
if (of.locked) {
// Delayed bump so we could get fetched/last access time pre-update.
if (!of.no_update && it != NULL) {
do_item_bump(c, it, hv);
}
item_unlock(hv);
}
// we count this command as a normal one if we've gotten this far.
// TODO: for autovivify case, miss never happens. Is this okay?
if (!failed) {
pthread_mutex_lock(&c->thread->stats.mutex);
if (ttl_set) {
c->thread->stats.touch_cmds++;
c->thread->stats.slab_stats[ITEM_clsid(it)].touch_hits++;
} else {
c->thread->stats.lru_hits[it->slabs_clsid]++;
c->thread->stats.get_cmds++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
conn_set_state(c, conn_new_cmd);
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
if (ttl_set) {
c->thread->stats.touch_cmds++;
c->thread->stats.touch_misses++;
} else {
c->thread->stats.get_misses++;
c->thread->stats.get_cmds++;
}
MEMCACHED_COMMAND_GET(c->sfd, key, nkey, -1, 0);
pthread_mutex_unlock(&c->thread->stats.mutex);
// This gets elided in noreply mode.
out_string(c, "EN");
}
return;
error:
if (it) {
do_item_remove(it);
if (of.locked) {
item_unlock(hv);
}
}
out_errstring(c, errstr);
}
static void process_mset_command(conn *c, token_t *tokens, const size_t ntokens) {
char *key;
size_t nkey;
item *it;
int i;
short comm = NREAD_SET;
struct _meta_flags of = {0}; // option bitflags.
char *errstr = "CLIENT_ERROR bad command line format";
uint32_t hv;
mc_resp *resp = c->resp;
char *p = resp->wbuf;
assert(c != NULL);
// TODO: most of this is identical to mget.
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_errstring(c, "CLIENT_ERROR bad command line format");
return;
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
if (ntokens == 3) {
out_errstring(c, "CLIENT_ERROR bad command line format");
return;
}
if (ntokens > MFLAG_MAX_OPT_LENGTH) {
out_errstring(c, "CLIENT_ERROR options flags too long");
return;
}
// leave space for the status code.
p = resp->wbuf + 3;
// We need to at least try to get the size to properly slurp bad bytes
// after an error.
if (_meta_flag_preparse(tokens, ntokens, &of, &errstr) != 0) {
goto error;
}
// Set noreply after tokens are understood.
c->noreply = of.no_reply;
bool has_error = false;
for (i = KEY_TOKEN+1; i < ntokens-1; i++) {
switch (tokens[i].value[0]) {
// TODO: macro perhaps?
case 'O':
if (tokens[i].length > MFLAG_MAX_OPAQUE_LENGTH) {
errstr = "CLIENT_ERROR opaque token too long";
has_error = true;
break;
}
META_SPACE(p);
memcpy(p, tokens[i].value, tokens[i].length);
p += tokens[i].length;
break;
case 'k':
META_CHAR(p, 'k');
memcpy(p, key, nkey);
p += nkey;
break;
}
}
// "mode switch" to alternative commands
switch (of.mode) {
case 0:
break; // no mode supplied.
case 'E': // Add...
comm = NREAD_ADD;
break;
case 'A': // Append.
comm = NREAD_APPEND;
break;
case 'P': // Prepend.
comm = NREAD_PREPEND;
break;
case 'R': // Replace.
comm = NREAD_REPLACE;
break;
case 'S': // Set. Default.
comm = NREAD_SET;
break;
default:
errstr = "CLIENT_ERROR invalid mode for ms M token";
goto error;
}
// The item storage function doesn't exactly map to mset.
// If a CAS value is supplied, upgrade default SET mode to CAS mode.
// Also allows REPLACE to work, as REPLACE + CAS works the same as CAS.
// add-with-cas works the same as add; but could only LRU bump if match..
// APPEND/PREPEND allow a simplified CAS check.
if (of.has_cas && (comm == NREAD_SET || comm == NREAD_REPLACE)) {
comm = NREAD_CAS;
}
// We attempt to process as much as we can in hopes of getting a valid and
// adjusted vlen, or else the data swallowed after error will be for 0b.
if (has_error)
goto error;
it = item_alloc(key, nkey, of.client_flags, of.exptime, of.value_len);
if (it == 0) {
enum store_item_type status;
// TODO: These could be normalized codes (TL and OM). Need to
// reorganize the output stuff a bit though.
if (! item_size_ok(nkey, of.client_flags, of.value_len)) {
errstr = "SERVER_ERROR object too large for cache";
status = TOO_LARGE;
} else {
errstr = "SERVER_ERROR out of memory storing object";
status = NO_MEMORY;
}
// FIXME: LOGGER_LOG specific to mset, include options.
LOGGER_LOG(c->thread->l, LOG_MUTATIONS, LOGGER_ITEM_STORE,
NULL, status, comm, key, nkey, 0, 0);
/* Avoid stale data persisting in cache because we failed alloc. */
// NOTE: only if SET mode?
it = item_get_locked(key, nkey, c, DONT_UPDATE, &hv);
if (it) {
do_item_unlink(it, hv);
STORAGE_delete(c->thread->storage, it);
do_item_remove(it);
}
item_unlock(hv);
goto error;
}
ITEM_set_cas(it, of.req_cas_id);
c->item = it;
#ifdef NEED_ALIGN
if (it->it_flags & ITEM_CHUNKED) {
c->ritem = ITEM_schunk(it);
} else {
c->ritem = ITEM_data(it);
}
#else
c->ritem = ITEM_data(it);
#endif
c->rlbytes = it->nbytes;
c->cmd = comm;
if (of.set_stale && comm == NREAD_CAS) {
c->set_stale = true;
}
resp->wbytes = p - resp->wbuf;
memcpy(resp->wbuf + resp->wbytes, "\r\n", 2);
resp->wbytes += 2;
// We've written the status line into wbuf, use wbytes to finalize later.
resp_add_iov(resp, resp->wbuf, resp->wbytes);
c->mset_res = true;
conn_set_state(c, conn_nread);
return;
error:
/* swallow the data line */
c->sbytes = of.value_len;
// Note: no errors possible after the item was successfully allocated.
// So we're just looking at dumping error codes and returning.
out_errstring(c, errstr);
// TODO: pass state in? else switching twice meh.
conn_set_state(c, conn_swallow);
}
static void process_mdelete_command(conn *c, token_t *tokens, const size_t ntokens) {
char *key;
size_t nkey;
uint64_t req_cas_id = 0;
item *it = NULL;
int i;
uint32_t hv;
struct _meta_flags of = {0}; // option bitflags.
char *errstr = "CLIENT_ERROR bad command line format";
mc_resp *resp = c->resp;
// reserve 3 bytes for status code
char *p = resp->wbuf + 3;
assert(c != NULL);
// TODO: most of this is identical to mget.
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
if (ntokens > MFLAG_MAX_OPT_LENGTH) {
out_string(c, "CLIENT_ERROR options flags too long");
return;
}
// scrubs duplicated options and sets flags for how to load the item.
if (_meta_flag_preparse(tokens, ntokens, &of, &errstr) != 0) {
out_errstring(c, "CLIENT_ERROR invalid or duplicate flag");
return;
}
c->noreply = of.no_reply;
assert(c != NULL);
for (i = KEY_TOKEN+1; i < ntokens-1; i++) {
switch (tokens[i].value[0]) {
// TODO: macro perhaps?
case 'O':
if (tokens[i].length > MFLAG_MAX_OPAQUE_LENGTH) {
errstr = "CLIENT_ERROR opaque token too long";
goto error;
}
META_SPACE(p);
memcpy(p, tokens[i].value, tokens[i].length);
p += tokens[i].length;
break;
case 'k':
META_CHAR(p, 'k');
memcpy(p, key, nkey);
p += nkey;
break;
}
}
it = item_get_locked(key, nkey, c, DONT_UPDATE, &hv);
if (it) {
MEMCACHED_COMMAND_DELETE(c->sfd, ITEM_key(it), it->nkey);
// allow only deleting/marking if a CAS value matches.
if (of.has_cas && ITEM_get_cas(it) != req_cas_id) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.delete_misses++;
pthread_mutex_unlock(&c->thread->stats.mutex);
memcpy(resp->wbuf, "EX ", 3);
goto cleanup;
}
// If we're to set this item as stale, we don't actually want to
// delete it. We mark the stale bit, bump CAS, and update exptime if
// we were supplied a new TTL.
if (of.set_stale) {
if (of.new_ttl) {
it->exptime = of.exptime;
}
it->it_flags |= ITEM_STALE;
// Also need to remove TOKEN_SENT, so next client can win.
it->it_flags &= ~ITEM_TOKEN_SENT;
ITEM_set_cas(it, (settings.use_cas) ? get_cas_id() : 0);
// Clients can noreply nominal responses.
if (c->noreply)
resp->skip = true;
memcpy(resp->wbuf, "OK ", 3);
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(it)].delete_hits++;
pthread_mutex_unlock(&c->thread->stats.mutex);
do_item_unlink(it, hv);
STORAGE_delete(c->thread->storage, it);
if (c->noreply)
resp->skip = true;
memcpy(resp->wbuf, "OK ", 3);
}
goto cleanup;
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.delete_misses++;
pthread_mutex_unlock(&c->thread->stats.mutex);
memcpy(resp->wbuf, "NF ", 3);
goto cleanup;
}
cleanup:
if (it) {
do_item_remove(it);
}
// Item is always returned locked, even if missing.
item_unlock(hv);
resp->wbytes = p - resp->wbuf;
memcpy(resp->wbuf + resp->wbytes, "\r\n", 2);
resp->wbytes += 2;
resp_add_iov(resp, resp->wbuf, resp->wbytes);
conn_set_state(c, conn_new_cmd);
return;
error:
out_errstring(c, errstr);
}
static void process_marithmetic_command(conn *c, token_t *tokens, const size_t ntokens) {
char *key;
size_t nkey;
int i;
struct _meta_flags of = {0}; // option bitflags.
char *errstr = "CLIENT_ERROR bad command line format";
mc_resp *resp = c->resp;
// no reservation (like del/set) since we post-process the status line.
char *p = resp->wbuf;
// If no argument supplied, incr or decr by one.
of.delta = 1;
of.initial = 0; // redundant, for clarity.
bool incr = true; // default mode is to increment.
bool locked = false;
uint32_t hv = 0;
item *it = NULL; // item returned by do_add_delta.
assert(c != NULL);
// TODO: most of this is identical to mget.
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
if (ntokens > MFLAG_MAX_OPT_LENGTH) {
out_string(c, "CLIENT_ERROR options flags too long");
return;
}
// scrubs duplicated options and sets flags for how to load the item.
if (_meta_flag_preparse(tokens, ntokens, &of, &errstr) != 0) {
out_errstring(c, "CLIENT_ERROR invalid or duplicate flag");
return;
}
c->noreply = of.no_reply;
assert(c != NULL);
// "mode switch" to alternative commands
switch (of.mode) {
case 0: // no switch supplied.
break;
case 'I': // Incr (default)
case '+':
incr = true;
break;
case 'D': // Decr.
case '-':
incr = false;
break;
default:
errstr = "CLIENT_ERROR invalid mode for ma M token";
goto error;
break;
}
// take hash value and manually lock item... hold lock during store phase
// on miss and avoid recalculating the hash multiple times.
hv = hash(key, nkey);
item_lock(hv);
locked = true;
char tmpbuf[INCR_MAX_STORAGE_LEN];
// return a referenced item if it exists, so we can modify it here, rather
// than adding even more parameters to do_add_delta.
bool item_created = false;
switch(do_add_delta(c, key, nkey, incr, of.delta, tmpbuf, &of.req_cas_id, hv, &it)) {
case OK:
if (c->noreply)
resp->skip = true;
memcpy(resp->wbuf, "OK ", 3);
break;
case NON_NUMERIC:
errstr = "CLIENT_ERROR cannot increment or decrement non-numeric value";
goto error;
break;
case EOM:
errstr = "SERVER_ERROR out of memory";
goto error;
break;
case DELTA_ITEM_NOT_FOUND:
if (of.vivify) {
itoa_u64(of.initial, tmpbuf);
int vlen = strlen(tmpbuf);
it = item_alloc(key, nkey, 0, 0, vlen+2);
if (it != NULL) {
memcpy(ITEM_data(it), tmpbuf, vlen);
memcpy(ITEM_data(it) + vlen, "\r\n", 2);
if (do_store_item(it, NREAD_ADD, c, hv)) {
item_created = true;
} else {
// Not sure how we can get here if we're holding the lock.
memcpy(resp->wbuf, "NS ", 3);
}
} else {
errstr = "SERVER_ERROR Out of memory allocating new item";
goto error;
}
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
if (incr) {
c->thread->stats.incr_misses++;
} else {
c->thread->stats.decr_misses++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
// won't have a valid it here.
memcpy(p, "NF ", 3);
p += 3;
}
break;
case DELTA_ITEM_CAS_MISMATCH:
// also returns without a valid it.
memcpy(p, "EX ", 3);
p += 3;
break;
}
// final loop
// allows building the response with information after vivifying from a
// miss, or returning a new CAS value after add_delta().
if (it) {
size_t vlen = strlen(tmpbuf);
if (of.value) {
memcpy(p, "VA ", 3);
p = itoa_u32(vlen, p+3);
} else {
memcpy(p, "OK", 2);
p += 2;
}
for (i = KEY_TOKEN+1; i < ntokens-1; i++) {
switch (tokens[i].value[0]) {
case 'c':
META_CHAR(p, 'c');
p = itoa_u64(ITEM_get_cas(it), p);
break;
case 't':
META_CHAR(p, 't');
if (it->exptime == 0) {
*p = '-';
*(p+1) = '1';
p += 2;
} else {
p = itoa_u32(it->exptime - current_time, p);
}
break;
case 'T':
it->exptime = of.exptime;
break;
case 'N':
if (item_created) {
it->exptime = of.autoviv_exptime;
}
break;
// TODO: macro perhaps?
case 'O':
if (tokens[i].length > MFLAG_MAX_OPAQUE_LENGTH) {
errstr = "CLIENT_ERROR opaque token too long";
goto error;
}
META_SPACE(p);
memcpy(p, tokens[i].value, tokens[i].length);
p += tokens[i].length;
break;
case 'k':
META_CHAR(p, 'k');
memcpy(p, key, nkey);
p += nkey;
break;
}
}
if (of.value) {
*p = '\r';
*(p+1) = '\n';
p += 2;
memcpy(p, tmpbuf, vlen);
p += vlen;
}
do_item_remove(it);
} else {
// No item to handle. still need to return opaque/key tokens
for (i = KEY_TOKEN+1; i < ntokens-1; i++) {
switch (tokens[i].value[0]) {
// TODO: macro perhaps?
case 'O':
if (tokens[i].length > MFLAG_MAX_OPAQUE_LENGTH) {
errstr = "CLIENT_ERROR opaque token too long";
goto error;
}
META_SPACE(p);
memcpy(p, tokens[i].value, tokens[i].length);
p += tokens[i].length;
break;
case 'k':
META_CHAR(p, 'k');
memcpy(p, key, nkey);
p += nkey;
break;
}
}
}
item_unlock(hv);
resp->wbytes = p - resp->wbuf;
memcpy(resp->wbuf + resp->wbytes, "\r\n", 2);
resp->wbytes += 2;
resp_add_iov(resp, resp->wbuf, resp->wbytes);
conn_set_state(c, conn_new_cmd);
return;
error:
if (it != NULL)
do_item_remove(it);
if (locked)
item_unlock(hv);
out_errstring(c, errstr);
}
static void process_update_command(conn *c, token_t *tokens, const size_t ntokens, int comm, bool handle_cas) {
char *key;
size_t nkey;
unsigned int flags;
int32_t exptime_int = 0;
rel_time_t exptime = 0;
int vlen;
uint64_t req_cas_id=0;
item *it;
assert(c != NULL);
set_noreply_maybe(c, tokens, ntokens);
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
if (! (safe_strtoul(tokens[2].value, (uint32_t *)&flags)
&& safe_strtol(tokens[3].value, &exptime_int)
&& safe_strtol(tokens[4].value, (int32_t *)&vlen))) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
exptime = realtime(EXPTIME_TO_POSITIVE_TIME(exptime_int));
// does cas value exist?
if (handle_cas) {
if (!safe_strtoull(tokens[5].value, &req_cas_id)) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
}
if (vlen < 0 || vlen > (INT_MAX - 2)) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
vlen += 2;
if (settings.detail_enabled) {
stats_prefix_record_set(key, nkey);
}
it = item_alloc(key, nkey, flags, exptime, vlen);
if (it == 0) {
enum store_item_type status;
if (! item_size_ok(nkey, flags, vlen)) {
out_string(c, "SERVER_ERROR object too large for cache");
status = TOO_LARGE;
} else {
out_of_memory(c, "SERVER_ERROR out of memory storing object");
status = NO_MEMORY;
}
LOGGER_LOG(c->thread->l, LOG_MUTATIONS, LOGGER_ITEM_STORE,
NULL, status, comm, key, nkey, 0, 0, c->sfd);
/* swallow the data line */
conn_set_state(c, conn_swallow);
c->sbytes = vlen;
/* Avoid stale data persisting in cache because we failed alloc.
* Unacceptable for SET. Anywhere else too? */
if (comm == NREAD_SET) {
it = item_get(key, nkey, c, DONT_UPDATE);
if (it) {
item_unlink(it);
STORAGE_delete(c->thread->storage, it);
item_remove(it);
}
}
return;
}
ITEM_set_cas(it, req_cas_id);
c->item = it;
#ifdef NEED_ALIGN
if (it->it_flags & ITEM_CHUNKED) {
c->ritem = ITEM_schunk(it);
} else {
c->ritem = ITEM_data(it);
}
#else
c->ritem = ITEM_data(it);
#endif
c->rlbytes = it->nbytes;
c->cmd = comm;
conn_set_state(c, conn_nread);
}
static void process_touch_command(conn *c, token_t *tokens, const size_t ntokens) {
char *key;
size_t nkey;
int32_t exptime_int = 0;
rel_time_t exptime = 0;
item *it;
assert(c != NULL);
set_noreply_maybe(c, tokens, ntokens);
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
if (!safe_strtol(tokens[2].value, &exptime_int)) {
out_string(c, "CLIENT_ERROR invalid exptime argument");
return;
}
exptime = realtime(EXPTIME_TO_POSITIVE_TIME(exptime_int));
it = item_touch(key, nkey, exptime, c);
if (it) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.touch_cmds++;
c->thread->stats.slab_stats[ITEM_clsid(it)].touch_hits++;
pthread_mutex_unlock(&c->thread->stats.mutex);
out_string(c, "TOUCHED");
item_remove(it);
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.touch_cmds++;
c->thread->stats.touch_misses++;
pthread_mutex_unlock(&c->thread->stats.mutex);
out_string(c, "NOT_FOUND");
}
}
static void process_arithmetic_command(conn *c, token_t *tokens, const size_t ntokens, const bool incr) {
char temp[INCR_MAX_STORAGE_LEN];
uint64_t delta;
char *key;
size_t nkey;
assert(c != NULL);
set_noreply_maybe(c, tokens, ntokens);
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
if (!safe_strtoull(tokens[2].value, &delta)) {
out_string(c, "CLIENT_ERROR invalid numeric delta argument");
return;
}
switch(add_delta(c, key, nkey, incr, delta, temp, NULL)) {
case OK:
out_string(c, temp);
break;
case NON_NUMERIC:
out_string(c, "CLIENT_ERROR cannot increment or decrement non-numeric value");
break;
case EOM:
out_of_memory(c, "SERVER_ERROR out of memory");
break;
case DELTA_ITEM_NOT_FOUND:
pthread_mutex_lock(&c->thread->stats.mutex);
if (incr) {
c->thread->stats.incr_misses++;
} else {
c->thread->stats.decr_misses++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
out_string(c, "NOT_FOUND");
break;
case DELTA_ITEM_CAS_MISMATCH:
break; /* Should never get here */
}
}
/*
* adds a delta value to a numeric item.
*
* c connection requesting the operation
* it item to adjust
* incr true to increment value, false to decrement
* delta amount to adjust value by
* buf buffer for response string
*
* returns a response string to send back to the client.
*/
enum delta_result_type do_add_delta(conn *c, const char *key, const size_t nkey,
const bool incr, const int64_t delta,
char *buf, uint64_t *cas,
const uint32_t hv,
item **it_ret) {
char *ptr;
uint64_t value;
int res;
item *it;
it = do_item_get(key, nkey, hv, c, DONT_UPDATE);
if (!it) {
return DELTA_ITEM_NOT_FOUND;
}
/* Can't delta zero byte values. 2-byte are the "\r\n" */
/* Also can't delta for chunked items. Too large to be a number */
#ifdef EXTSTORE
if (it->nbytes <= 2 || (it->it_flags & (ITEM_CHUNKED|ITEM_HDR)) != 0) {
#else
if (it->nbytes <= 2 || (it->it_flags & (ITEM_CHUNKED)) != 0) {
#endif
do_item_remove(it);
return NON_NUMERIC;
}
if (cas != NULL && *cas != 0 && ITEM_get_cas(it) != *cas) {
do_item_remove(it);
return DELTA_ITEM_CAS_MISMATCH;
}
ptr = ITEM_data(it);
if (!safe_strtoull(ptr, &value)) {
do_item_remove(it);
return NON_NUMERIC;
}
if (incr) {
value += delta;
MEMCACHED_COMMAND_INCR(c->sfd, ITEM_key(it), it->nkey, value);
} else {
if(delta > value) {
value = 0;
} else {
value -= delta;
}
MEMCACHED_COMMAND_DECR(c->sfd, ITEM_key(it), it->nkey, value);
}
pthread_mutex_lock(&c->thread->stats.mutex);
if (incr) {
c->thread->stats.slab_stats[ITEM_clsid(it)].incr_hits++;
} else {
c->thread->stats.slab_stats[ITEM_clsid(it)].decr_hits++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
itoa_u64(value, buf);
res = strlen(buf);
/* refcount == 2 means we are the only ones holding the item, and it is
* linked. We hold the item's lock in this function, so refcount cannot
* increase. */
if (res + 2 <= it->nbytes && it->refcount == 2) { /* replace in-place */
/* When changing the value without replacing the item, we
need to update the CAS on the existing item. */
/* We also need to fiddle it in the sizes tracker in case the tracking
* was enabled at runtime, since it relies on the CAS value to know
* whether to remove an item or not. */
item_stats_sizes_remove(it);
ITEM_set_cas(it, (settings.use_cas) ? get_cas_id() : 0);
item_stats_sizes_add(it);
memcpy(ITEM_data(it), buf, res);
memset(ITEM_data(it) + res, ' ', it->nbytes - res - 2);
do_item_update(it);
} else if (it->refcount > 1) {
item *new_it;
uint32_t flags;
FLAGS_CONV(it, flags);
new_it = do_item_alloc(ITEM_key(it), it->nkey, flags, it->exptime, res + 2);
if (new_it == 0) {
do_item_remove(it);
return EOM;
}
memcpy(ITEM_data(new_it), buf, res);
memcpy(ITEM_data(new_it) + res, "\r\n", 2);
item_replace(it, new_it, hv);
// Overwrite the older item's CAS with our new CAS since we're
// returning the CAS of the old item below.
ITEM_set_cas(it, (settings.use_cas) ? ITEM_get_cas(new_it) : 0);
do_item_remove(new_it); /* release our reference */
} else {
/* Should never get here. This means we somehow fetched an unlinked
* item. TODO: Add a counter? */
if (settings.verbose) {
fprintf(stderr, "Tried to do incr/decr on invalid item\n");
}
if (it->refcount == 1)
do_item_remove(it);
return DELTA_ITEM_NOT_FOUND;
}
if (cas) {
*cas = ITEM_get_cas(it); /* swap the incoming CAS value */
}
if (it_ret != NULL) {
*it_ret = it;
} else {
do_item_remove(it); /* release our reference */
}
return OK;
}
static void process_delete_command(conn *c, token_t *tokens, const size_t ntokens) {
char *key;
size_t nkey;
item *it;
uint32_t hv;
assert(c != NULL);
if (ntokens > 3) {
bool hold_is_zero = strcmp(tokens[KEY_TOKEN+1].value, "0") == 0;
bool sets_noreply = set_noreply_maybe(c, tokens, ntokens);
bool valid = (ntokens == 4 && (hold_is_zero || sets_noreply))
|| (ntokens == 5 && hold_is_zero && sets_noreply);
if (!valid) {
out_string(c, "CLIENT_ERROR bad command line format. "
"Usage: delete <key> [noreply]");
return;
}
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
if(nkey > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
if (settings.detail_enabled) {
stats_prefix_record_delete(key, nkey);
}
it = item_get_locked(key, nkey, c, DONT_UPDATE, &hv);
if (it) {
MEMCACHED_COMMAND_DELETE(c->sfd, ITEM_key(it), it->nkey);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(it)].delete_hits++;
pthread_mutex_unlock(&c->thread->stats.mutex);
do_item_unlink(it, hv);
STORAGE_delete(c->thread->storage, it);
do_item_remove(it); /* release our reference */
out_string(c, "DELETED");
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.delete_misses++;
pthread_mutex_unlock(&c->thread->stats.mutex);
out_string(c, "NOT_FOUND");
}
item_unlock(hv);
}
static void process_verbosity_command(conn *c, token_t *tokens, const size_t ntokens) {
unsigned int level;
assert(c != NULL);
set_noreply_maybe(c, tokens, ntokens);
level = strtoul(tokens[1].value, NULL, 10);
settings.verbose = level > MAX_VERBOSITY_LEVEL ? MAX_VERBOSITY_LEVEL : level;
out_string(c, "OK");
return;
}
#ifdef MEMCACHED_DEBUG
static void process_misbehave_command(conn *c) {
int allowed = 0;
// try opening new TCP socket
int i = socket(AF_INET, SOCK_STREAM, 0);
if (i != -1) {
allowed++;
close(i);
}
// try executing new commands
i = system("sleep 0");
if (i != -1) {
allowed++;
}
if (allowed) {
out_string(c, "ERROR");
} else {
out_string(c, "OK");
}
}
#endif
static void process_slabs_automove_command(conn *c, token_t *tokens, const size_t ntokens) {
unsigned int level;
double ratio;
assert(c != NULL);
set_noreply_maybe(c, tokens, ntokens);
if (strcmp(tokens[2].value, "ratio") == 0) {
if (ntokens < 5 || !safe_strtod(tokens[3].value, &ratio)) {
out_string(c, "ERROR");
return;
}
settings.slab_automove_ratio = ratio;
} else {
level = strtoul(tokens[2].value, NULL, 10);
if (level == 0) {
settings.slab_automove = 0;
} else if (level == 1 || level == 2) {
settings.slab_automove = level;
} else {
out_string(c, "ERROR");
return;
}
}
out_string(c, "OK");
return;
}
/* TODO: decide on syntax for sampling? */
static void process_watch_command(conn *c, token_t *tokens, const size_t ntokens) {
uint16_t f = 0;
int x;
assert(c != NULL);
set_noreply_maybe(c, tokens, ntokens);
if (!settings.watch_enabled) {
out_string(c, "CLIENT_ERROR watch commands not allowed");
return;
}
if (ntokens > 2) {
for (x = COMMAND_TOKEN + 1; x < ntokens - 1; x++) {
if ((strcmp(tokens[x].value, "rawcmds") == 0)) {
f |= LOG_RAWCMDS;
} else if ((strcmp(tokens[x].value, "evictions") == 0)) {
f |= LOG_EVICTIONS;
} else if ((strcmp(tokens[x].value, "fetchers") == 0)) {
f |= LOG_FETCHERS;
} else if ((strcmp(tokens[x].value, "mutations") == 0)) {
f |= LOG_MUTATIONS;
} else if ((strcmp(tokens[x].value, "sysevents") == 0)) {
f |= LOG_SYSEVENTS;
} else {
out_string(c, "ERROR");
return;
}
}
} else {
f |= LOG_FETCHERS;
}
switch(logger_add_watcher(c, c->sfd, f)) {
case LOGGER_ADD_WATCHER_TOO_MANY:
out_string(c, "WATCHER_TOO_MANY log watcher limit reached");
break;
case LOGGER_ADD_WATCHER_FAILED:
out_string(c, "WATCHER_FAILED failed to add log watcher");
break;
case LOGGER_ADD_WATCHER_OK:
conn_set_state(c, conn_watch);
event_del(&c->event);
break;
}
}
static void process_memlimit_command(conn *c, token_t *tokens, const size_t ntokens) {
uint32_t memlimit;
assert(c != NULL);
set_noreply_maybe(c, tokens, ntokens);
if (!safe_strtoul(tokens[1].value, &memlimit)) {
out_string(c, "ERROR");
} else {
if (memlimit < 8) {
out_string(c, "MEMLIMIT_TOO_SMALL cannot set maxbytes to less than 8m");
} else {
if (memlimit > 1000000000) {
out_string(c, "MEMLIMIT_ADJUST_FAILED input value is megabytes not bytes");
} else if (slabs_adjust_mem_limit((size_t) memlimit * 1024 * 1024)) {
if (settings.verbose > 0) {
fprintf(stderr, "maxbytes adjusted to %llum\n", (unsigned long long)memlimit);
}
out_string(c, "OK");
} else {
out_string(c, "MEMLIMIT_ADJUST_FAILED out of bounds or unable to adjust");
}
}
}
}
static void process_lru_command(conn *c, token_t *tokens, const size_t ntokens) {
uint32_t pct_hot;
uint32_t pct_warm;
double hot_factor;
int32_t ttl;
double factor;
set_noreply_maybe(c, tokens, ntokens);
if (strcmp(tokens[1].value, "tune") == 0 && ntokens >= 7) {
if (!safe_strtoul(tokens[2].value, &pct_hot) ||
!safe_strtoul(tokens[3].value, &pct_warm) ||
!safe_strtod(tokens[4].value, &hot_factor) ||
!safe_strtod(tokens[5].value, &factor)) {
out_string(c, "ERROR");
} else {
if (pct_hot + pct_warm > 80) {
out_string(c, "ERROR hot and warm pcts must not exceed 80");
} else if (factor <= 0 || hot_factor <= 0) {
out_string(c, "ERROR hot/warm age factors must be greater than 0");
} else {
settings.hot_lru_pct = pct_hot;
settings.warm_lru_pct = pct_warm;
settings.hot_max_factor = hot_factor;
settings.warm_max_factor = factor;
out_string(c, "OK");
}
}
} else if (strcmp(tokens[1].value, "mode") == 0 && ntokens >= 4 &&
settings.lru_maintainer_thread) {
if (strcmp(tokens[2].value, "flat") == 0) {
settings.lru_segmented = false;
out_string(c, "OK");
} else if (strcmp(tokens[2].value, "segmented") == 0) {
settings.lru_segmented = true;
out_string(c, "OK");
} else {
out_string(c, "ERROR");
}
} else if (strcmp(tokens[1].value, "temp_ttl") == 0 && ntokens >= 4 &&
settings.lru_maintainer_thread) {
if (!safe_strtol(tokens[2].value, &ttl)) {
out_string(c, "ERROR");
} else {
if (ttl < 0) {
settings.temp_lru = false;
} else {
settings.temp_lru = true;
settings.temporary_ttl = ttl;
}
out_string(c, "OK");
}
} else {
out_string(c, "ERROR");
}
}
#ifdef EXTSTORE
static void process_extstore_command(conn *c, token_t *tokens, const size_t ntokens) {
set_noreply_maybe(c, tokens, ntokens);
bool ok = true;
if (ntokens < 4) {
ok = false;
} else if (strcmp(tokens[1].value, "free_memchunks") == 0 && ntokens > 4) {
/* per-slab-class free chunk setting. */
unsigned int clsid = 0;
unsigned int limit = 0;
if (!safe_strtoul(tokens[2].value, &clsid) ||
!safe_strtoul(tokens[3].value, &limit)) {
ok = false;
} else {
if (clsid < MAX_NUMBER_OF_SLAB_CLASSES) {
settings.ext_free_memchunks[clsid] = limit;
} else {
ok = false;
}
}
} else if (strcmp(tokens[1].value, "item_size") == 0) {
if (!safe_strtoul(tokens[2].value, &settings.ext_item_size))
ok = false;
} else if (strcmp(tokens[1].value, "item_age") == 0) {
if (!safe_strtoul(tokens[2].value, &settings.ext_item_age))
ok = false;
} else if (strcmp(tokens[1].value, "low_ttl") == 0) {
if (!safe_strtoul(tokens[2].value, &settings.ext_low_ttl))
ok = false;
} else if (strcmp(tokens[1].value, "recache_rate") == 0) {
if (!safe_strtoul(tokens[2].value, &settings.ext_recache_rate))
ok = false;
} else if (strcmp(tokens[1].value, "compact_under") == 0) {
if (!safe_strtoul(tokens[2].value, &settings.ext_compact_under))
ok = false;
} else if (strcmp(tokens[1].value, "drop_under") == 0) {
if (!safe_strtoul(tokens[2].value, &settings.ext_drop_under))
ok = false;
} else if (strcmp(tokens[1].value, "max_frag") == 0) {
if (!safe_strtod(tokens[2].value, &settings.ext_max_frag))
ok = false;
} else if (strcmp(tokens[1].value, "drop_unread") == 0) {
unsigned int v;
if (!safe_strtoul(tokens[2].value, &v)) {
ok = false;
} else {
settings.ext_drop_unread = v == 0 ? false : true;
}
} else {
ok = false;
}
if (!ok) {
out_string(c, "ERROR");
} else {
out_string(c, "OK");
}
}
#endif
// TODO: pipelined commands are incompatible with shifting connections to a
// side thread. Given this only happens in two instances (watch and
// lru_crawler metadump) it should be fine for things to bail. It _should_ be
// unusual for these commands.
// This is hard to fix since tokenize_command() mutilates the read buffer, so
// we can't drop out and back in again.
// Leaving this note here to spend more time on a fix when necessary, or if an
// opportunity becomes obvious.
static void process_command(conn *c, char *command) {
token_t tokens[MAX_TOKENS];
size_t ntokens;
int comm;
assert(c != NULL);
MEMCACHED_PROCESS_COMMAND_START(c->sfd, c->rcurr, c->rbytes);
if (settings.verbose > 1)
fprintf(stderr, "<%d %s\n", c->sfd, command);
/*
* for commands set/add/replace, we build an item and read the data
* directly into it, then continue in nread_complete().
*/
// Prep the response object for this query.
if (!resp_start(c)) {
conn_set_state(c, conn_closing);
return;
}
ntokens = tokenize_command(command, tokens, MAX_TOKENS);
if (ntokens >= 3 &&
((strcmp(tokens[COMMAND_TOKEN].value, "get") == 0) ||
(strcmp(tokens[COMMAND_TOKEN].value, "bget") == 0))) {
process_get_command(c, tokens, ntokens, false, false);
} else if ((ntokens == 6 || ntokens == 7) &&
((strcmp(tokens[COMMAND_TOKEN].value, "add") == 0 && (comm = NREAD_ADD)) ||
(strcmp(tokens[COMMAND_TOKEN].value, "set") == 0 && (comm = NREAD_SET)) ||
(strcmp(tokens[COMMAND_TOKEN].value, "replace") == 0 && (comm = NREAD_REPLACE)) ||
(strcmp(tokens[COMMAND_TOKEN].value, "prepend") == 0 && (comm = NREAD_PREPEND)) ||
(strcmp(tokens[COMMAND_TOKEN].value, "append") == 0 && (comm = NREAD_APPEND)) )) {
process_update_command(c, tokens, ntokens, comm, false);
} else if ((ntokens == 7 || ntokens == 8) && (strcmp(tokens[COMMAND_TOKEN].value, "cas") == 0 && (comm = NREAD_CAS))) {
process_update_command(c, tokens, ntokens, comm, true);
} else if ((ntokens == 4 || ntokens == 5) && (strcmp(tokens[COMMAND_TOKEN].value, "incr") == 0)) {
process_arithmetic_command(c, tokens, ntokens, 1);
} else if (ntokens >= 3 && (strcmp(tokens[COMMAND_TOKEN].value, "gets") == 0)) {
process_get_command(c, tokens, ntokens, true, false);
} else if (ntokens >= 3 && (strcmp(tokens[COMMAND_TOKEN].value, "mg") == 0)) {
process_mget_command(c, tokens, ntokens);
} else if (ntokens >= 3 && (strcmp(tokens[COMMAND_TOKEN].value, "ms") == 0)) {
process_mset_command(c, tokens, ntokens);
} else if (ntokens >= 3 && (strcmp(tokens[COMMAND_TOKEN].value, "md") == 0)) {
process_mdelete_command(c, tokens, ntokens);
} else if (ntokens >= 2 && (strcmp(tokens[COMMAND_TOKEN].value, "mn") == 0)) {
out_string(c, "MN");
// mn command forces immediate writeback flush.
conn_set_state(c, conn_mwrite);
return;
} else if (ntokens >= 2 && (strcmp(tokens[COMMAND_TOKEN].value, "ma") == 0)) {
process_marithmetic_command(c, tokens, ntokens);
} else if (ntokens >= 2 && (strcmp(tokens[COMMAND_TOKEN].value, "me") == 0)) {
process_meta_command(c, tokens, ntokens);
return;
} else if ((ntokens == 4 || ntokens == 5) && (strcmp(tokens[COMMAND_TOKEN].value, "decr") == 0)) {
process_arithmetic_command(c, tokens, ntokens, 0);
} else if (ntokens >= 3 && ntokens <= 5 && (strcmp(tokens[COMMAND_TOKEN].value, "delete") == 0)) {
process_delete_command(c, tokens, ntokens);
} else if ((ntokens == 4 || ntokens == 5) && (strcmp(tokens[COMMAND_TOKEN].value, "touch") == 0)) {
process_touch_command(c, tokens, ntokens);
} else if (ntokens >= 4 && (strcmp(tokens[COMMAND_TOKEN].value, "gat") == 0)) {
process_get_command(c, tokens, ntokens, false, true);
} else if (ntokens >= 4 && (strcmp(tokens[COMMAND_TOKEN].value, "gats") == 0)) {
process_get_command(c, tokens, ntokens, true, true);
} else if (ntokens >= 2 && (strcmp(tokens[COMMAND_TOKEN].value, "stats") == 0)) {
process_stat(c, tokens, ntokens);
} else if (ntokens >= 2 && ntokens <= 4 && (strcmp(tokens[COMMAND_TOKEN].value, "flush_all") == 0)) {
time_t exptime = 0;
rel_time_t new_oldest = 0;
set_noreply_maybe(c, tokens, ntokens);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.flush_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
if (!settings.flush_enabled) {
// flush_all is not allowed but we log it on stats
out_string(c, "CLIENT_ERROR flush_all not allowed");
return;
}
if (ntokens != (c->noreply ? 3 : 2)) {
exptime = strtol(tokens[1].value, NULL, 10);
if(errno == ERANGE) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
}
/*
If exptime is zero realtime() would return zero too, and
realtime(exptime) - 1 would overflow to the max unsigned
value. So we process exptime == 0 the same way we do when
no delay is given at all.
*/
if (exptime > 0) {
new_oldest = realtime(exptime);
} else { /* exptime == 0 */
new_oldest = current_time;
}
if (settings.use_cas) {
settings.oldest_live = new_oldest - 1;
if (settings.oldest_live <= current_time)
settings.oldest_cas = get_cas_id();
} else {
settings.oldest_live = new_oldest;
}
out_string(c, "OK");
return;
} else if (ntokens == 2 && (strcmp(tokens[COMMAND_TOKEN].value, "version") == 0)) {
out_string(c, "VERSION " VERSION);
} else if (ntokens == 2 && (strcmp(tokens[COMMAND_TOKEN].value, "quit") == 0)) {
conn_set_state(c, conn_mwrite);
c->close_after_write = true;
} else if (ntokens == 2 && (strcmp(tokens[COMMAND_TOKEN].value, "shutdown") == 0)) {
if (settings.shutdown_command) {
conn_set_state(c, conn_closing);
raise(SIGINT);
} else {
out_string(c, "ERROR: shutdown not enabled");
}
} else if (ntokens > 1 && strcmp(tokens[COMMAND_TOKEN].value, "slabs") == 0) {
if (ntokens == 5 && strcmp(tokens[COMMAND_TOKEN + 1].value, "reassign") == 0) {
int src, dst, rv;
if (settings.slab_reassign == false) {
out_string(c, "CLIENT_ERROR slab reassignment disabled");
return;
}
src = strtol(tokens[2].value, NULL, 10);
dst = strtol(tokens[3].value, NULL, 10);
if (errno == ERANGE) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
rv = slabs_reassign(src, dst);
switch (rv) {
case REASSIGN_OK:
out_string(c, "OK");
break;
case REASSIGN_RUNNING:
out_string(c, "BUSY currently processing reassign request");
break;
case REASSIGN_BADCLASS:
out_string(c, "BADCLASS invalid src or dst class id");
break;
case REASSIGN_NOSPARE:
out_string(c, "NOSPARE source class has no spare pages");
break;
case REASSIGN_SRC_DST_SAME:
out_string(c, "SAME src and dst class are identical");
break;
}
return;
} else if (ntokens >= 4 &&
(strcmp(tokens[COMMAND_TOKEN + 1].value, "automove") == 0)) {
process_slabs_automove_command(c, tokens, ntokens);
} else {
out_string(c, "ERROR");
}
} else if (ntokens > 1 && strcmp(tokens[COMMAND_TOKEN].value, "lru_crawler") == 0) {
if (ntokens == 4 && strcmp(tokens[COMMAND_TOKEN + 1].value, "crawl") == 0) {
int rv;
if (settings.lru_crawler == false) {
out_string(c, "CLIENT_ERROR lru crawler disabled");
return;
}
rv = lru_crawler_crawl(tokens[2].value, CRAWLER_EXPIRED, NULL, 0,
settings.lru_crawler_tocrawl);
switch(rv) {
case CRAWLER_OK:
out_string(c, "OK");
break;
case CRAWLER_RUNNING:
out_string(c, "BUSY currently processing crawler request");
break;
case CRAWLER_BADCLASS:
out_string(c, "BADCLASS invalid class id");
break;
case CRAWLER_NOTSTARTED:
out_string(c, "NOTSTARTED no items to crawl");
break;
case CRAWLER_ERROR:
out_string(c, "ERROR an unknown error happened");
break;
}
return;
} else if (ntokens == 4 && strcmp(tokens[COMMAND_TOKEN + 1].value, "metadump") == 0) {
if (settings.lru_crawler == false) {
out_string(c, "CLIENT_ERROR lru crawler disabled");
return;
}
if (!settings.dump_enabled) {
out_string(c, "ERROR metadump not allowed");
return;
}
if (resp_has_stack(c)) {
out_string(c, "ERROR cannot pipeline other commands before metadump");
return;
}
int rv = lru_crawler_crawl(tokens[2].value, CRAWLER_METADUMP,
c, c->sfd, LRU_CRAWLER_CAP_REMAINING);
switch(rv) {
case CRAWLER_OK:
// TODO: documentation says this string is returned, but
// it never was before. We never switch to conn_write so
// this o_s call never worked. Need to talk to users and
// decide if removing the OK from docs is fine.
//out_string(c, "OK");
// TODO: Don't reuse conn_watch here.
conn_set_state(c, conn_watch);
event_del(&c->event);
break;
case CRAWLER_RUNNING:
out_string(c, "BUSY currently processing crawler request");
break;
case CRAWLER_BADCLASS:
out_string(c, "BADCLASS invalid class id");
break;
case CRAWLER_NOTSTARTED:
out_string(c, "NOTSTARTED no items to crawl");
break;
case CRAWLER_ERROR:
out_string(c, "ERROR an unknown error happened");
break;
}
return;
} else if (ntokens == 4 && strcmp(tokens[COMMAND_TOKEN + 1].value, "tocrawl") == 0) {
uint32_t tocrawl;
if (!safe_strtoul(tokens[2].value, &tocrawl)) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
settings.lru_crawler_tocrawl = tocrawl;
out_string(c, "OK");
return;
} else if (ntokens == 4 && strcmp(tokens[COMMAND_TOKEN + 1].value, "sleep") == 0) {
uint32_t tosleep;
if (!safe_strtoul(tokens[2].value, &tosleep)) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
if (tosleep > 1000000) {
out_string(c, "CLIENT_ERROR sleep must be one second or less");
return;
}
settings.lru_crawler_sleep = tosleep;
out_string(c, "OK");
return;
} else if (ntokens == 3) {
if ((strcmp(tokens[COMMAND_TOKEN + 1].value, "enable") == 0)) {
if (start_item_crawler_thread() == 0) {
out_string(c, "OK");
} else {
out_string(c, "ERROR failed to start lru crawler thread");
}
} else if ((strcmp(tokens[COMMAND_TOKEN + 1].value, "disable") == 0)) {
if (stop_item_crawler_thread(CRAWLER_NOWAIT) == 0) {
out_string(c, "OK");
} else {
out_string(c, "ERROR failed to stop lru crawler thread");
}
} else {
out_string(c, "ERROR");
}
return;
} else {
out_string(c, "ERROR");
}
} else if (ntokens > 1 && strcmp(tokens[COMMAND_TOKEN].value, "watch") == 0) {
if (resp_has_stack(c)) {
out_string(c, "ERROR cannot pipeline other commands before watch");
return;
}
process_watch_command(c, tokens, ntokens);
} else if ((ntokens == 3 || ntokens == 4) && (strcmp(tokens[COMMAND_TOKEN].value, "cache_memlimit") == 0)) {
process_memlimit_command(c, tokens, ntokens);
} else if ((ntokens == 3 || ntokens == 4) && (strcmp(tokens[COMMAND_TOKEN].value, "verbosity") == 0)) {
process_verbosity_command(c, tokens, ntokens);
} else if (ntokens >= 3 && strcmp(tokens[COMMAND_TOKEN].value, "lru") == 0) {
process_lru_command(c, tokens, ntokens);
#ifdef MEMCACHED_DEBUG
// commands which exist only for testing the memcached's security protection
} else if (ntokens == 2 && (strcmp(tokens[COMMAND_TOKEN].value, "misbehave") == 0)) {
process_misbehave_command(c);
#endif
#ifdef EXTSTORE
} else if (ntokens >= 3 && strcmp(tokens[COMMAND_TOKEN].value, "extstore") == 0) {
process_extstore_command(c, tokens, ntokens);
#endif
#ifdef TLS
} else if (ntokens == 2 && strcmp(tokens[COMMAND_TOKEN].value, "refresh_certs") == 0) {
set_noreply_maybe(c, tokens, ntokens);
char *errmsg = NULL;
if (refresh_certs(&errmsg)) {
out_string(c, "OK");
} else {
write_and_free(c, errmsg, strlen(errmsg));
}
return;
#endif
} else {
if (ntokens >= 2 && strncmp(tokens[ntokens - 2].value, "HTTP/", 5) == 0) {
conn_set_state(c, conn_closing);
} else {
out_string(c, "ERROR");
}
}
return;
}
static int try_read_command_negotiate(conn *c) {
assert(c->protocol == negotiating_prot);
assert(c != NULL);
assert(c->rcurr <= (c->rbuf + c->rsize));
assert(c->rbytes > 0);
if ((unsigned char)c->rbuf[0] == (unsigned char)PROTOCOL_BINARY_REQ) {
c->protocol = binary_prot;
c->try_read_command = try_read_command_binary;
} else {
// authentication doesn't work with negotiated protocol.
c->protocol = ascii_prot;
c->try_read_command = try_read_command_ascii;
}
if (settings.verbose > 1) {
fprintf(stderr, "%d: Client using the %s protocol\n", c->sfd,
prot_text(c->protocol));
}
return c->try_read_command(c);
}
static int try_read_command_udp(conn *c) {
assert(c != NULL);
assert(c->rcurr <= (c->rbuf + c->rsize));
assert(c->rbytes > 0);
if ((unsigned char)c->rbuf[0] == (unsigned char)PROTOCOL_BINARY_REQ) {
c->protocol = binary_prot;
return try_read_command_binary(c);
} else {
c->protocol = ascii_prot;
return try_read_command_ascii(c);
}
}
static int try_read_command_binary(conn *c) {
/* Do we have the complete packet header? */
if (c->rbytes < sizeof(c->binary_header)) {
/* need more data! */
return 0;
} else {
memcpy(&c->binary_header, c->rcurr, sizeof(c->binary_header));
protocol_binary_request_header* req;
req = &c->binary_header;
if (settings.verbose > 1) {
/* Dump the packet before we convert it to host order */
int ii;
fprintf(stderr, "<%d Read binary protocol data:", c->sfd);
for (ii = 0; ii < sizeof(req->bytes); ++ii) {
if (ii % 4 == 0) {
fprintf(stderr, "\n<%d ", c->sfd);
}
fprintf(stderr, " 0x%02x", req->bytes[ii]);
}
fprintf(stderr, "\n");
}
c->binary_header = *req;
c->binary_header.request.keylen = ntohs(req->request.keylen);
c->binary_header.request.bodylen = ntohl(req->request.bodylen);
c->binary_header.request.cas = ntohll(req->request.cas);
if (c->binary_header.request.magic != PROTOCOL_BINARY_REQ) {
if (settings.verbose) {
fprintf(stderr, "Invalid magic: %x\n",
c->binary_header.request.magic);
}
conn_set_state(c, conn_closing);
return -1;
}
uint8_t extlen = c->binary_header.request.extlen;
uint16_t keylen = c->binary_header.request.keylen;
if (c->rbytes < keylen + extlen + sizeof(c->binary_header)) {
// Still need more bytes. Let try_read_network() realign the
// read-buffer and fetch more data as necessary.
return 0;
}
if (!resp_start(c)) {
conn_set_state(c, conn_closing);
return -1;
}
c->cmd = c->binary_header.request.opcode;
c->keylen = c->binary_header.request.keylen;
c->opaque = c->binary_header.request.opaque;
/* clear the returned cas value */
c->cas = 0;
c->last_cmd_time = current_time;
// sigh. binprot has no "largest possible extlen" define, and I don't
// want to refactor a ton of code either. Header is only ever used out
// of c->binary_header, but the extlen stuff is used for the latter
// bytes. Just wastes 24 bytes on the stack this way.
char extbuf[sizeof(c->binary_header) + BIN_MAX_EXTLEN+1];
memcpy(extbuf + sizeof(c->binary_header), c->rcurr + sizeof(c->binary_header),
extlen > BIN_MAX_EXTLEN ? BIN_MAX_EXTLEN : extlen);
c->rbytes -= sizeof(c->binary_header) + extlen + keylen;
c->rcurr += sizeof(c->binary_header) + extlen + keylen;
dispatch_bin_command(c, extbuf);
}
return 1;
}
static int try_read_command_asciiauth(conn *c) {
token_t tokens[MAX_TOKENS];
size_t ntokens;
char *cont = NULL;
if (!c->resp) {
if (!resp_start(c)) {
conn_set_state(c, conn_closing);
return 1;
}
}
// TODO: move to another function.
if (!c->sasl_started) {
char *el;
uint32_t size = 0;
// impossible for the auth command to be this short.
if (c->rbytes < 2)
return 0;
el = memchr(c->rcurr, '\n', c->rbytes);
// If no newline after 1k, getting junk data, close out.
if (!el) {
if (c->rbytes > 1024) {
conn_set_state(c, conn_closing);
return 1;
}
return 0;
}
// Looking for: "set foo 0 0 N\r\nuser pass\r\n"
// key, flags, and ttl are ignored. N is used to see if we have the rest.
// so tokenize doesn't walk past into the value.
// it's fine to leave the \r in, as strtoul will stop at it.
*el = '\0';
ntokens = tokenize_command(c->rcurr, tokens, MAX_TOKENS);
// ensure the buffer is consumed.
c->rbytes -= (el - c->rcurr) + 1;
c->rcurr += (el - c->rcurr) + 1;
// final token is a NULL ender, so we have one more than expected.
if (ntokens < 6
|| strcmp(tokens[0].value, "set") != 0
|| !safe_strtoul(tokens[4].value, &size)) {
out_string(c, "CLIENT_ERROR unauthenticated");
return 1;
}
// we don't actually care about the key at all; it can be anything.
// we do care about the size of the remaining read.
c->rlbytes = size + 2;
c->sasl_started = true; // reuse from binprot sasl, but not sasl :)
}
if (c->rbytes < c->rlbytes) {
// need more bytes.
return 0;
}
cont = c->rcurr;
// advance buffer. no matter what we're stopping.
c->rbytes -= c->rlbytes;
c->rcurr += c->rlbytes;
c->sasl_started = false;
// must end with \r\n
// NB: I thought ASCII sets also worked with just \n, but according to
// complete_nread_ascii only \r\n is valid.
if (strncmp(cont + c->rlbytes - 2, "\r\n", 2) != 0) {
out_string(c, "CLIENT_ERROR bad command line termination");
return 1;
}
// payload should be "user pass", so we can use the tokenizer.
cont[c->rlbytes - 2] = '\0';
ntokens = tokenize_command(cont, tokens, MAX_TOKENS);
if (ntokens < 3) {
out_string(c, "CLIENT_ERROR bad authentication token format");
return 1;
}
if (authfile_check(tokens[0].value, tokens[1].value) == 1) {
out_string(c, "STORED");
c->authenticated = true;
c->try_read_command = try_read_command_ascii;
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.auth_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
} else {
out_string(c, "CLIENT_ERROR authentication failure");
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.auth_cmds++;
c->thread->stats.auth_errors++;
pthread_mutex_unlock(&c->thread->stats.mutex);
}
return 1;
}
static int try_read_command_ascii(conn *c) {
char *el, *cont;
if (c->rbytes == 0)
return 0;
el = memchr(c->rcurr, '\n', c->rbytes);
if (!el) {
if (c->rbytes > 1024) {
/*
* We didn't have a '\n' in the first k. This _has_ to be a
* large multiget, if not we should just nuke the connection.
*/
char *ptr = c->rcurr;
while (*ptr == ' ') { /* ignore leading whitespaces */
++ptr;
}
if (ptr - c->rcurr > 100 ||
(strncmp(ptr, "get ", 4) && strncmp(ptr, "gets ", 5))) {
conn_set_state(c, conn_closing);
return 1;
}
// ASCII multigets are unbound, so our fixed size rbuf may not
// work for this particular workload... For backcompat we'll use a
// malloc/realloc/free routine just for this.
if (!c->rbuf_malloced) {
if (!rbuf_switch_to_malloc(c)) {
conn_set_state(c, conn_closing);
return 1;
}
}
}
return 0;
}
cont = el + 1;
if ((el - c->rcurr) > 1 && *(el - 1) == '\r') {
el--;
}
*el = '\0';
assert(cont <= (c->rcurr + c->rbytes));
c->last_cmd_time = current_time;
process_command(c, c->rcurr);
c->rbytes -= (cont - c->rcurr);
c->rcurr = cont;
assert(c->rcurr <= (c->rbuf + c->rsize));
return 1;
}
/*
* read a UDP request.
*/
static enum try_read_result try_read_udp(conn *c) {
int res;
assert(c != NULL);
c->request_addr_size = sizeof(c->request_addr);
res = recvfrom(c->sfd, c->rbuf, c->rsize,
0, (struct sockaddr *)&c->request_addr,
&c->request_addr_size);
if (res > 8) {
unsigned char *buf = (unsigned char *)c->rbuf;
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_read += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
/* Beginning of UDP packet is the request ID; save it. */
c->request_id = buf[0] * 256 + buf[1];
/* If this is a multi-packet request, drop it. */
if (buf[4] != 0 || buf[5] != 1) {
out_string(c, "SERVER_ERROR multi-packet request not supported");
return READ_NO_DATA_RECEIVED;
}
/* Don't care about any of the rest of the header. */
res -= 8;
memmove(c->rbuf, c->rbuf + 8, res);
c->rbytes = res;
c->rcurr = c->rbuf;
return READ_DATA_RECEIVED;
}
return READ_NO_DATA_RECEIVED;
}
/*
* read from network as much as we can, handle buffer overflow and connection
* close.
* before reading, move the remaining incomplete fragment of a command
* (if any) to the beginning of the buffer.
*
* To protect us from someone flooding a connection with bogus data causing
* the connection to eat up all available memory, break out and start looking
* at the data I've got after a number of reallocs...
*
* @return enum try_read_result
*/
static enum try_read_result try_read_network(conn *c) {
enum try_read_result gotdata = READ_NO_DATA_RECEIVED;
int res;
int num_allocs = 0;
assert(c != NULL);
if (c->rcurr != c->rbuf) {
if (c->rbytes != 0) /* otherwise there's nothing to copy */
memmove(c->rbuf, c->rcurr, c->rbytes);
c->rcurr = c->rbuf;
}
while (1) {
// TODO: move to rbuf_* func?
if (c->rbytes >= c->rsize && c->rbuf_malloced) {
if (num_allocs == 4) {
return gotdata;
}
++num_allocs;
char *new_rbuf = realloc(c->rbuf, c->rsize * 2);
if (!new_rbuf) {
STATS_LOCK();
stats.malloc_fails++;
STATS_UNLOCK();
if (settings.verbose > 0) {
fprintf(stderr, "Couldn't realloc input buffer\n");
}
c->rbytes = 0; /* ignore what we read */
out_of_memory(c, "SERVER_ERROR out of memory reading request");
c->close_after_write = true;
return READ_MEMORY_ERROR;
}
c->rcurr = c->rbuf = new_rbuf;
c->rsize *= 2;
}
int avail = c->rsize - c->rbytes;
res = c->read(c, c->rbuf + c->rbytes, avail);
if (res > 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_read += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
gotdata = READ_DATA_RECEIVED;
c->rbytes += res;
if (res == avail && c->rbuf_malloced) {
// Resize rbuf and try a few times if huge ascii multiget.
continue;
} else {
break;
}
}
if (res == 0) {
return READ_ERROR;
}
if (res == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
break;
}
return READ_ERROR;
}
}
return gotdata;
}
static bool update_event(conn *c, const int new_flags) {
assert(c != NULL);
struct event_base *base = c->event.ev_base;
if (c->ev_flags == new_flags)
return true;
if (event_del(&c->event) == -1) return false;
event_set(&c->event, c->sfd, new_flags, event_handler, (void *)c);
event_base_set(base, &c->event);
c->ev_flags = new_flags;
if (event_add(&c->event, 0) == -1) return false;
return true;
}
/*
* Sets whether we are listening for new connections or not.
*/
void do_accept_new_conns(const bool do_accept) {
conn *next;
for (next = listen_conn; next; next = next->next) {
if (do_accept) {
update_event(next, EV_READ | EV_PERSIST);
if (listen(next->sfd, settings.backlog) != 0) {
perror("listen");
}
}
else {
update_event(next, 0);
if (listen(next->sfd, 0) != 0) {
perror("listen");
}
}
}
if (do_accept) {
struct timeval maxconns_exited;
uint64_t elapsed_us;
gettimeofday(&maxconns_exited,NULL);
STATS_LOCK();
elapsed_us =
(maxconns_exited.tv_sec - stats.maxconns_entered.tv_sec) * 1000000
+ (maxconns_exited.tv_usec - stats.maxconns_entered.tv_usec);
stats.time_in_listen_disabled_us += elapsed_us;
stats_state.accepting_conns = true;
STATS_UNLOCK();
} else {
STATS_LOCK();
stats_state.accepting_conns = false;
gettimeofday(&stats.maxconns_entered,NULL);
stats.listen_disabled_num++;
STATS_UNLOCK();
allow_new_conns = false;
maxconns_handler(-42, 0, 0);
}
}
#define TRANSMIT_ONE_RESP true
#define TRANSMIT_ALL_RESP false
static int _transmit_pre(conn *c, struct iovec *iovs, int iovused, bool one_resp) {
mc_resp *resp = c->resp_head;
while (resp && iovused + resp->iovcnt < IOV_MAX-1) {
if (resp->skip) {
// Don't actually unchain the resp obj here since it's singly-linked.
// Just let the post function handle it linearly.
resp = resp->next;
continue;
}
if (resp->chunked_data_iov) {
// Handle chunked items specially.
// They spend much more time in send so we can be a bit wasteful
// in rebuilding iovecs for them.
item_chunk *ch = (item_chunk *)ITEM_schunk((item *)resp->iov[resp->chunked_data_iov].iov_base);
int x;
for (x = 0; x < resp->iovcnt; x++) {
// This iov is tracking how far we've copied so far.
if (x == resp->chunked_data_iov) {
int done = resp->chunked_total - resp->iov[x].iov_len;
// Start from the len to allow binprot to cut the \r\n
int todo = resp->iov[x].iov_len;
while (ch && todo > 0 && iovused < IOV_MAX-1) {
int skip = 0;
if (!ch->used) {
ch = ch->next;
continue;
}
// Skip parts we've already sent.
if (done >= ch->used) {
done -= ch->used;
ch = ch->next;
continue;
} else if (done) {
skip = done;
done = 0;
}
iovs[iovused].iov_base = ch->data + skip;
// Stupid binary protocol makes this go negative.
iovs[iovused].iov_len = ch->used - skip > todo ? todo : ch->used - skip;
iovused++;
todo -= ch->used - skip;
ch = ch->next;
}
} else {
iovs[iovused].iov_base = resp->iov[x].iov_base;
iovs[iovused].iov_len = resp->iov[x].iov_len;
iovused++;
}
if (iovused >= IOV_MAX-1)
break;
}
} else {
memcpy(&iovs[iovused], resp->iov, sizeof(struct iovec)*resp->iovcnt);
iovused += resp->iovcnt;
}
// done looking at first response, walk down the chain.
resp = resp->next;
// used for UDP mode: UDP cannot send multiple responses per packet.
if (one_resp)
break;
}
return iovused;
}
/*
* Decrements and completes responses based on how much data was transmitted.
* Takes the connection and current result bytes.
*/
static void _transmit_post(conn *c, ssize_t res) {
// We've written some of the data. Remove the completed
// responses from the list of pending writes.
mc_resp *resp = c->resp_head;
while (resp) {
int x;
if (resp->skip) {
resp = resp_finish(c, resp);
continue;
}
// fastpath check. all small responses should cut here.
if (res >= resp->tosend) {
res -= resp->tosend;
resp = resp_finish(c, resp);
continue;
}
// it's fine to re-check iov's that were zeroed out before.
for (x = 0; x < resp->iovcnt; x++) {
struct iovec *iov = &resp->iov[x];
if (res >= iov->iov_len) {
resp->tosend -= iov->iov_len;
res -= iov->iov_len;
iov->iov_len = 0;
} else {
// Dumb special case for chunked items. Currently tracking
// where to inject the chunked item via iov_base.
// Extra not-great since chunked items can't be the first
// index, so we have to check for non-zero c_d_iov first.
if (!resp->chunked_data_iov || x != resp->chunked_data_iov) {
iov->iov_base = (char *)iov->iov_base + res;
}
iov->iov_len -= res;
resp->tosend -= res;
res = 0;
break;
}
}
// are we done with this response object?
if (resp->tosend == 0) {
resp = resp_finish(c, resp);
} else {
// Jammed up here. This is the new head.
break;
}
}
}
/*
* Transmit the next chunk of data from our list of msgbuf structures.
*
* Returns:
* TRANSMIT_COMPLETE All done writing.
* TRANSMIT_INCOMPLETE More data remaining to write.
* TRANSMIT_SOFT_ERROR Can't write any more right now.
* TRANSMIT_HARD_ERROR Can't write (c->state is set to conn_closing)
*/
static enum transmit_result transmit(conn *c) {
assert(c != NULL);
struct iovec iovs[IOV_MAX];
struct msghdr msg;
int iovused = 0;
// init the msg.
memset(&msg, 0, sizeof(struct msghdr));
msg.msg_iov = iovs;
iovused = _transmit_pre(c, iovs, iovused, TRANSMIT_ALL_RESP);
// Alright, send.
ssize_t res;
msg.msg_iovlen = iovused;
res = c->sendmsg(c, &msg, 0);
if (res >= 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_written += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
// Decrement any partial IOV's and complete any finished resp's.
_transmit_post(c, res);
if (c->resp_head) {
return TRANSMIT_INCOMPLETE;
} else {
return TRANSMIT_COMPLETE;
}
}
if (res == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
if (!update_event(c, EV_WRITE | EV_PERSIST)) {
if (settings.verbose > 0)
fprintf(stderr, "Couldn't update event\n");
conn_set_state(c, conn_closing);
return TRANSMIT_HARD_ERROR;
}
return TRANSMIT_SOFT_ERROR;
}
/* if res == -1 and error is not EAGAIN or EWOULDBLOCK,
we have a real error, on which we close the connection */
if (settings.verbose > 0)
perror("Failed to write, and not due to blocking");
conn_set_state(c, conn_closing);
return TRANSMIT_HARD_ERROR;
}
static void build_udp_header(unsigned char *hdr, mc_resp *resp) {
// We need to communicate the total number of packets
// If this isn't set, it's the first time this response is building a udp
// header, so "tosend" must be static.
if (!resp->udp_total) {
uint32_t total;
total = resp->tosend / UDP_MAX_PAYLOAD_SIZE;
if (resp->tosend % UDP_MAX_PAYLOAD_SIZE)
total++;
// The spec doesn't really say what we should do here. It's _probably_
// better to bail out?
if (total > USHRT_MAX) {
total = USHRT_MAX;
}
resp->udp_total = total;
}
// TODO: why wasn't this hto*'s and casts?
// this ends up sending UDP hdr data specifically in host byte order.
*hdr++ = resp->request_id / 256;
*hdr++ = resp->request_id % 256;
*hdr++ = resp->udp_sequence / 256;
*hdr++ = resp->udp_sequence % 256;
*hdr++ = resp->udp_total / 256;
*hdr++ = resp->udp_total % 256;
*hdr++ = 0;
*hdr++ = 0;
resp->udp_sequence++;
}
/*
* UDP specific transmit function. Uses its own function rather than check
* IS_UDP() five times. If we ever implement sendmmsg or similar support they
* will diverge even more.
* Does not use TLS.
*
* Returns:
* TRANSMIT_COMPLETE All done writing.
* TRANSMIT_INCOMPLETE More data remaining to write.
* TRANSMIT_SOFT_ERROR Can't write any more right now.
* TRANSMIT_HARD_ERROR Can't write (c->state is set to conn_closing)
*/
static enum transmit_result transmit_udp(conn *c) {
assert(c != NULL);
struct iovec iovs[IOV_MAX];
struct msghdr msg;
mc_resp *resp;
int iovused = 0;
unsigned char udp_hdr[UDP_HEADER_SIZE];
// We only send one UDP packet per call (ugh), so we can only operate on a
// single response at a time.
resp = c->resp_head;
if (!resp) {
return TRANSMIT_COMPLETE;
}
if (resp->skip) {
resp = resp_finish(c, resp);
return TRANSMIT_INCOMPLETE;
}
// clear the message and initialize it.
memset(&msg, 0, sizeof(struct msghdr));
msg.msg_iov = iovs;
// the UDP source to return to.
msg.msg_name = &resp->request_addr;
msg.msg_namelen = resp->request_addr_size;
// First IOV is the custom UDP header.
iovs[0].iov_base = udp_hdr;
iovs[0].iov_len = UDP_HEADER_SIZE;
build_udp_header(udp_hdr, resp);
iovused++;
// Fill the IOV's the standard way.
// TODO: might get a small speedup if we let it break early with a length
// limit.
iovused = _transmit_pre(c, iovs, iovused, TRANSMIT_ONE_RESP);
// Clip the IOV's to the max UDP packet size.
// If we add support for send_mmsg, this can be where we split msg's.
{
int x = 0;
int len = 0;
for (x = 0; x < iovused; x++) {
if (len + iovs[x].iov_len >= UDP_MAX_PAYLOAD_SIZE) {
iovs[x].iov_len = UDP_MAX_PAYLOAD_SIZE - len;
x++;
break;
} else {
len += iovs[x].iov_len;
}
}
iovused = x;
}
ssize_t res;
msg.msg_iovlen = iovused;
// NOTE: uses system sendmsg since we have no support for indirect UDP.
res = sendmsg(c->sfd, &msg, 0);
if (res >= 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_written += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
// Ignore the header size from forwarding the IOV's
res -= UDP_HEADER_SIZE;
// Decrement any partial IOV's and complete any finished resp's.
_transmit_post(c, res);
if (c->resp_head) {
return TRANSMIT_INCOMPLETE;
} else {
return TRANSMIT_COMPLETE;
}
}
if (res == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
if (!update_event(c, EV_WRITE | EV_PERSIST)) {
if (settings.verbose > 0)
fprintf(stderr, "Couldn't update event\n");
conn_set_state(c, conn_closing);
return TRANSMIT_HARD_ERROR;
}
return TRANSMIT_SOFT_ERROR;
}
/* if res == -1 and error is not EAGAIN or EWOULDBLOCK,
we have a real error, on which we close the connection */
if (settings.verbose > 0)
perror("Failed to write, and not due to blocking");
conn_set_state(c, conn_read);
return TRANSMIT_HARD_ERROR;
}
/* Does a looped read to fill data chunks */
/* TODO: restrict number of times this can loop.
* Also, benchmark using readv's.
*/
static int read_into_chunked_item(conn *c) {
int total = 0;
int res;
assert(c->rcurr != c->ritem);
while (c->rlbytes > 0) {
item_chunk *ch = (item_chunk *)c->ritem;
if (ch->size == ch->used) {
// FIXME: ch->next is currently always 0. remove this?
if (ch->next) {
c->ritem = (char *) ch->next;
} else {
/* Allocate next chunk. Binary protocol needs 2b for \r\n */
c->ritem = (char *) do_item_alloc_chunk(ch, c->rlbytes +
((c->protocol == binary_prot) ? 2 : 0));
if (!c->ritem) {
// We failed an allocation. Let caller handle cleanup.
total = -2;
break;
}
// ritem has new chunk, restart the loop.
continue;
//assert(c->rlbytes == 0);
}
}
int unused = ch->size - ch->used;
/* first check if we have leftovers in the conn_read buffer */
if (c->rbytes > 0) {
total = 0;
int tocopy = c->rbytes > c->rlbytes ? c->rlbytes : c->rbytes;
tocopy = tocopy > unused ? unused : tocopy;
if (c->ritem != c->rcurr) {
memmove(ch->data + ch->used, c->rcurr, tocopy);
}
total += tocopy;
c->rlbytes -= tocopy;
c->rcurr += tocopy;
c->rbytes -= tocopy;
ch->used += tocopy;
if (c->rlbytes == 0) {
break;
}
} else {
/* now try reading from the socket */
res = c->read(c, ch->data + ch->used,
(unused > c->rlbytes ? c->rlbytes : unused));
if (res > 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_read += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
ch->used += res;
total += res;
c->rlbytes -= res;
} else {
/* Reset total to the latest result so caller can handle it */
total = res;
break;
}
}
}
/* At some point I will be able to ditch the \r\n from item storage and
remove all of these kludges.
The above binprot check ensures inline space for \r\n, but if we do
exactly enough allocs there will be no additional chunk for \r\n.
*/
if (c->rlbytes == 0 && c->protocol == binary_prot && total >= 0) {
item_chunk *ch = (item_chunk *)c->ritem;
if (ch->size - ch->used < 2) {
c->ritem = (char *) do_item_alloc_chunk(ch, 2);
if (!c->ritem) {
total = -2;
}
}
}
return total;
}
static void drive_machine(conn *c) {
bool stop = false;
int sfd;
socklen_t addrlen;
struct sockaddr_storage addr;
int nreqs = settings.reqs_per_event;
int res;
const char *str;
#ifdef HAVE_ACCEPT4
static int use_accept4 = 1;
#else
static int use_accept4 = 0;
#endif
assert(c != NULL);
while (!stop) {
switch(c->state) {
case conn_listening:
addrlen = sizeof(addr);
#ifdef HAVE_ACCEPT4
if (use_accept4) {
sfd = accept4(c->sfd, (struct sockaddr *)&addr, &addrlen, SOCK_NONBLOCK);
} else {
sfd = accept(c->sfd, (struct sockaddr *)&addr, &addrlen);
}
#else
sfd = accept(c->sfd, (struct sockaddr *)&addr, &addrlen);
#endif
if (sfd == -1) {
if (use_accept4 && errno == ENOSYS) {
use_accept4 = 0;
continue;
}
perror(use_accept4 ? "accept4()" : "accept()");
if (errno == EAGAIN || errno == EWOULDBLOCK) {
/* these are transient, so don't log anything */
stop = true;
} else if (errno == EMFILE) {
if (settings.verbose > 0)
fprintf(stderr, "Too many open connections\n");
accept_new_conns(false);
stop = true;
} else {
perror("accept()");
stop = true;
}
break;
}
if (!use_accept4) {
if (fcntl(sfd, F_SETFL, fcntl(sfd, F_GETFL) | O_NONBLOCK) < 0) {
perror("setting O_NONBLOCK");
close(sfd);
break;
}
}
bool reject;
if (settings.maxconns_fast) {
STATS_LOCK();
reject = stats_state.curr_conns + stats_state.reserved_fds >= settings.maxconns - 1;
if (reject) {
stats.rejected_conns++;
}
STATS_UNLOCK();
} else {
reject = false;
}
if (reject) {
str = "ERROR Too many open connections\r\n";
res = write(sfd, str, strlen(str));
close(sfd);
} else {
void *ssl_v = NULL;
#ifdef TLS
SSL *ssl = NULL;
if (c->ssl_enabled) {
assert(IS_TCP(c->transport) && settings.ssl_enabled);
if (settings.ssl_ctx == NULL) {
if (settings.verbose) {
fprintf(stderr, "SSL context is not initialized\n");
}
close(sfd);
break;
}
SSL_LOCK();
ssl = SSL_new(settings.ssl_ctx);
SSL_UNLOCK();
if (ssl == NULL) {
if (settings.verbose) {
fprintf(stderr, "Failed to created the SSL object\n");
}
close(sfd);
break;
}
SSL_set_fd(ssl, sfd);
int ret = SSL_accept(ssl);
if (ret <= 0) {
int err = SSL_get_error(ssl, ret);
if (err == SSL_ERROR_SYSCALL || err == SSL_ERROR_SSL) {
if (settings.verbose) {
fprintf(stderr, "SSL connection failed with error code : %d : %s\n", err, strerror(errno));
}
SSL_free(ssl);
close(sfd);
STATS_LOCK();
stats.ssl_handshake_errors++;
STATS_UNLOCK();
break;
}
}
}
ssl_v = (void*) ssl;
#endif
dispatch_conn_new(sfd, conn_new_cmd, EV_READ | EV_PERSIST,
READ_BUFFER_CACHED, c->transport, ssl_v);
}
stop = true;
break;
case conn_waiting:
rbuf_release(c);
if (!update_event(c, EV_READ | EV_PERSIST)) {
if (settings.verbose > 0)
fprintf(stderr, "Couldn't update event\n");
conn_set_state(c, conn_closing);
break;
}
conn_set_state(c, conn_read);
stop = true;
break;
case conn_read:
if (!IS_UDP(c->transport)) {
// Assign a read buffer if necessary.
if (!rbuf_alloc(c)) {
// TODO: Some way to allow for temporary failures.
conn_set_state(c, conn_closing);
break;
}
res = try_read_network(c);
} else {
// UDP connections always have a static buffer.
res = try_read_udp(c);
}
switch (res) {
case READ_NO_DATA_RECEIVED:
conn_set_state(c, conn_waiting);
break;
case READ_DATA_RECEIVED:
conn_set_state(c, conn_parse_cmd);
break;
case READ_ERROR:
conn_set_state(c, conn_closing);
break;
case READ_MEMORY_ERROR: /* Failed to allocate more memory */
/* State already set by try_read_network */
break;
}
break;
case conn_parse_cmd:
c->noreply = false;
if (c->try_read_command(c) == 0) {
/* wee need more data! */
if (c->resp_head) {
// Buffered responses waiting, flush in the meantime.
conn_set_state(c, conn_mwrite);
} else {
conn_set_state(c, conn_waiting);
}
}
break;
case conn_new_cmd:
/* Only process nreqs at a time to avoid starving other
connections */
--nreqs;
if (nreqs >= 0) {
reset_cmd_handler(c);
} else if (c->resp_head) {
// flush response pipe on yield.
conn_set_state(c, conn_mwrite);
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.conn_yields++;
pthread_mutex_unlock(&c->thread->stats.mutex);
if (c->rbytes > 0) {
/* We have already read in data into the input buffer,
so libevent will most likely not signal read events
on the socket (unless more data is available. As a
hack we should just put in a request to write data,
because that should be possible ;-)
*/
if (!update_event(c, EV_WRITE | EV_PERSIST)) {
if (settings.verbose > 0)
fprintf(stderr, "Couldn't update event\n");
conn_set_state(c, conn_closing);
break;
}
}
stop = true;
}
break;
case conn_nread:
if (c->rlbytes == 0) {
complete_nread(c);
break;
}
/* Check if rbytes < 0, to prevent crash */
if (c->rlbytes < 0) {
if (settings.verbose) {
fprintf(stderr, "Invalid rlbytes to read: len %d\n", c->rlbytes);
}
conn_set_state(c, conn_closing);
break;
}
if ((((item *)c->item)->it_flags & ITEM_CHUNKED) == 0) {
/* first check if we have leftovers in the conn_read buffer */
if (c->rbytes > 0) {
int tocopy = c->rbytes > c->rlbytes ? c->rlbytes : c->rbytes;
memmove(c->ritem, c->rcurr, tocopy);
c->ritem += tocopy;
c->rlbytes -= tocopy;
c->rcurr += tocopy;
c->rbytes -= tocopy;
if (c->rlbytes == 0) {
break;
}
}
/* now try reading from the socket */
res = c->read(c, c->ritem, c->rlbytes);
if (res > 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_read += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
if (c->rcurr == c->ritem) {
c->rcurr += res;
}
c->ritem += res;
c->rlbytes -= res;
break;
}
} else {
res = read_into_chunked_item(c);
if (res > 0)
break;
}
if (res == 0) { /* end of stream */
conn_set_state(c, conn_closing);
break;
}
if (res == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
if (!update_event(c, EV_READ | EV_PERSIST)) {
if (settings.verbose > 0)
fprintf(stderr, "Couldn't update event\n");
conn_set_state(c, conn_closing);
break;
}
stop = true;
break;
}
/* Memory allocation failure */
if (res == -2) {
out_of_memory(c, "SERVER_ERROR Out of memory during read");
c->sbytes = c->rlbytes;
conn_set_state(c, conn_swallow);
// Ensure this flag gets cleared. It gets killed on conn_new()
// so any conn_closing is fine, calling complete_nread is
// fine. This swallow semms to be the only other case.
c->set_stale = false;
c->mset_res = false;
break;
}
/* otherwise we have a real error, on which we close the connection */
if (settings.verbose > 0) {
fprintf(stderr, "Failed to read, and not due to blocking:\n"
"errno: %d %s \n"
"rcurr=%lx ritem=%lx rbuf=%lx rlbytes=%d rsize=%d\n",
errno, strerror(errno),
(long)c->rcurr, (long)c->ritem, (long)c->rbuf,
(int)c->rlbytes, (int)c->rsize);
}
conn_set_state(c, conn_closing);
break;
case conn_swallow:
/* we are reading sbytes and throwing them away */
if (c->sbytes <= 0) {
conn_set_state(c, conn_new_cmd);
break;
}
/* first check if we have leftovers in the conn_read buffer */
if (c->rbytes > 0) {
int tocopy = c->rbytes > c->sbytes ? c->sbytes : c->rbytes;
c->sbytes -= tocopy;
c->rcurr += tocopy;
c->rbytes -= tocopy;
break;
}
/* now try reading from the socket */
res = c->read(c, c->rbuf, c->rsize > c->sbytes ? c->sbytes : c->rsize);
if (res > 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_read += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
c->sbytes -= res;
break;
}
if (res == 0) { /* end of stream */
conn_set_state(c, conn_closing);
break;
}
if (res == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
if (!update_event(c, EV_READ | EV_PERSIST)) {
if (settings.verbose > 0)
fprintf(stderr, "Couldn't update event\n");
conn_set_state(c, conn_closing);
break;
}
stop = true;
break;
}
/* otherwise we have a real error, on which we close the connection */
if (settings.verbose > 0)
fprintf(stderr, "Failed to read, and not due to blocking\n");
conn_set_state(c, conn_closing);
break;
case conn_write:
case conn_mwrite:
#ifdef EXTSTORE
/* have side IO's that must process before transmit() can run.
* remove the connection from the worker thread and dispatch the
* IO queue
*/
if (c->io_wrapleft) {
assert(c->io_queued == false);
assert(c->io_wraplist != NULL);
// TODO: create proper state for this condition
conn_set_state(c, conn_watch);
event_del(&c->event);
c->io_queued = true;
extstore_submit(c->thread->storage, &c->io_wraplist->io);
stop = true;
break;
}
#endif
switch (!IS_UDP(c->transport) ? transmit(c) : transmit_udp(c)) {
case TRANSMIT_COMPLETE:
if (c->state == conn_mwrite) {
// Free up IO wraps and any half-uploaded items.
conn_release_items(c);
conn_set_state(c, conn_new_cmd);
if (c->close_after_write) {
conn_set_state(c, conn_closing);
}
} else {
if (settings.verbose > 0)
fprintf(stderr, "Unexpected state %d\n", c->state);
conn_set_state(c, conn_closing);
}
break;
case TRANSMIT_INCOMPLETE:
case TRANSMIT_HARD_ERROR:
break; /* Continue in state machine. */
case TRANSMIT_SOFT_ERROR:
stop = true;
break;
}
break;
case conn_closing:
if (IS_UDP(c->transport))
conn_cleanup(c);
else
conn_close(c);
stop = true;
break;
case conn_closed:
/* This only happens if dormando is an idiot. */
abort();
break;
case conn_watch:
/* We handed off our connection to the logger thread. */
stop = true;
break;
case conn_max_state:
assert(false);
break;
}
}
return;
}
void event_handler(const int fd, const short which, void *arg) {
conn *c;
c = (conn *)arg;
assert(c != NULL);
c->which = which;
/* sanity */
if (fd != c->sfd) {
if (settings.verbose > 0)
fprintf(stderr, "Catastrophic: event fd doesn't match conn fd!\n");
conn_close(c);
return;
}
drive_machine(c);
/* wait for next event */
return;
}
static int new_socket(struct addrinfo *ai) {
int sfd;
int flags;
if ((sfd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol)) == -1) {
return -1;
}
if ((flags = fcntl(sfd, F_GETFL, 0)) < 0 ||
fcntl(sfd, F_SETFL, flags | O_NONBLOCK) < 0) {
perror("setting O_NONBLOCK");
close(sfd);
return -1;
}
return sfd;
}
/*
* Sets a socket's send buffer size to the maximum allowed by the system.
*/
static void maximize_sndbuf(const int sfd) {
socklen_t intsize = sizeof(int);
int last_good = 0;
int min, max, avg;
int old_size;
/* Start with the default size. */
if (getsockopt(sfd, SOL_SOCKET, SO_SNDBUF, &old_size, &intsize) != 0) {
if (settings.verbose > 0)
perror("getsockopt(SO_SNDBUF)");
return;
}
/* Binary-search for the real maximum. */
min = old_size;
max = MAX_SENDBUF_SIZE;
while (min <= max) {
avg = ((unsigned int)(min + max)) / 2;
if (setsockopt(sfd, SOL_SOCKET, SO_SNDBUF, (void *)&avg, intsize) == 0) {
last_good = avg;
min = avg + 1;
} else {
max = avg - 1;
}
}
if (settings.verbose > 1)
fprintf(stderr, "<%d send buffer was %d, now %d\n", sfd, old_size, last_good);
}
/**
* Create a socket and bind it to a specific port number
* @param interface the interface to bind to
* @param port the port number to bind to
* @param transport the transport protocol (TCP / UDP)
* @param portnumber_file A filepointer to write the port numbers to
* when they are successfully added to the list of ports we
* listen on.
*/
static int server_socket(const char *interface,
int port,
enum network_transport transport,
FILE *portnumber_file, bool ssl_enabled) {
int sfd;
struct linger ling = {0, 0};
struct addrinfo *ai;
struct addrinfo *next;
struct addrinfo hints = { .ai_flags = AI_PASSIVE,
.ai_family = AF_UNSPEC };
char port_buf[NI_MAXSERV];
int error;
int success = 0;
int flags =1;
hints.ai_socktype = IS_UDP(transport) ? SOCK_DGRAM : SOCK_STREAM;
if (port == -1) {
port = 0;
}
snprintf(port_buf, sizeof(port_buf), "%d", port);
error= getaddrinfo(interface, port_buf, &hints, &ai);
if (error != 0) {
if (error != EAI_SYSTEM)
fprintf(stderr, "getaddrinfo(): %s\n", gai_strerror(error));
else
perror("getaddrinfo()");
return 1;
}
for (next= ai; next; next= next->ai_next) {
conn *listen_conn_add;
if ((sfd = new_socket(next)) == -1) {
/* getaddrinfo can return "junk" addresses,
* we make sure at least one works before erroring.
*/
if (errno == EMFILE) {
/* ...unless we're out of fds */
perror("server_socket");
exit(EX_OSERR);
}
continue;
}
#ifdef IPV6_V6ONLY
if (next->ai_family == AF_INET6) {
error = setsockopt(sfd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &flags, sizeof(flags));
if (error != 0) {
perror("setsockopt");
close(sfd);
continue;
}
}
#endif
setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void *)&flags, sizeof(flags));
if (IS_UDP(transport)) {
maximize_sndbuf(sfd);
} else {
error = setsockopt(sfd, SOL_SOCKET, SO_KEEPALIVE, (void *)&flags, sizeof(flags));
if (error != 0)
perror("setsockopt");
error = setsockopt(sfd, SOL_SOCKET, SO_LINGER, (void *)&ling, sizeof(ling));
if (error != 0)
perror("setsockopt");
error = setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, (void *)&flags, sizeof(flags));
if (error != 0)
perror("setsockopt");
}
if (bind(sfd, next->ai_addr, next->ai_addrlen) == -1) {
if (errno != EADDRINUSE) {
perror("bind()");
close(sfd);
freeaddrinfo(ai);
return 1;
}
close(sfd);
continue;
} else {
success++;
if (!IS_UDP(transport) && listen(sfd, settings.backlog) == -1) {
perror("listen()");
close(sfd);
freeaddrinfo(ai);
return 1;
}
if (portnumber_file != NULL &&
(next->ai_addr->sa_family == AF_INET ||
next->ai_addr->sa_family == AF_INET6)) {
union {
struct sockaddr_in in;
struct sockaddr_in6 in6;
} my_sockaddr;
socklen_t len = sizeof(my_sockaddr);
if (getsockname(sfd, (struct sockaddr*)&my_sockaddr, &len)==0) {
if (next->ai_addr->sa_family == AF_INET) {
fprintf(portnumber_file, "%s INET: %u\n",
IS_UDP(transport) ? "UDP" : "TCP",
ntohs(my_sockaddr.in.sin_port));
} else {
fprintf(portnumber_file, "%s INET6: %u\n",
IS_UDP(transport) ? "UDP" : "TCP",
ntohs(my_sockaddr.in6.sin6_port));
}
}
}
}
if (IS_UDP(transport)) {
int c;
for (c = 0; c < settings.num_threads_per_udp; c++) {
/* Allocate one UDP file descriptor per worker thread;
* this allows "stats conns" to separately list multiple
* parallel UDP requests in progress.
*
* The dispatch code round-robins new connection requests
* among threads, so this is guaranteed to assign one
* FD to each thread.
*/
int per_thread_fd;
if (c == 0) {
per_thread_fd = sfd;
} else {
per_thread_fd = dup(sfd);
if (per_thread_fd < 0) {
perror("Failed to duplicate file descriptor");
exit(EXIT_FAILURE);
}
}
dispatch_conn_new(per_thread_fd, conn_read,
EV_READ | EV_PERSIST,
UDP_READ_BUFFER_SIZE, transport, NULL);
}
} else {
if (!(listen_conn_add = conn_new(sfd, conn_listening,
EV_READ | EV_PERSIST, 1,
transport, main_base, NULL))) {
fprintf(stderr, "failed to create listening connection\n");
exit(EXIT_FAILURE);
}
#ifdef TLS
listen_conn_add->ssl_enabled = ssl_enabled;
#else
assert(ssl_enabled == false);
#endif
listen_conn_add->next = listen_conn;
listen_conn = listen_conn_add;
}
}
freeaddrinfo(ai);
/* Return zero iff we detected no errors in starting up connections */
return success == 0;
}
static int server_sockets(int port, enum network_transport transport,
FILE *portnumber_file) {
bool ssl_enabled = false;
#ifdef TLS
const char *notls = "notls";
ssl_enabled = settings.ssl_enabled;
#endif
if (settings.inter == NULL) {
return server_socket(settings.inter, port, transport, portnumber_file, ssl_enabled);
} else {
// tokenize them and bind to each one of them..
char *b;
int ret = 0;
char *list = strdup(settings.inter);
if (list == NULL) {
fprintf(stderr, "Failed to allocate memory for parsing server interface string\n");
return 1;
}
for (char *p = strtok_r(list, ";,", &b);
p != NULL;
p = strtok_r(NULL, ";,", &b)) {
int the_port = port;
#ifdef TLS
ssl_enabled = settings.ssl_enabled;
// "notls" option is valid only when memcached is run with SSL enabled.
if (strncmp(p, notls, strlen(notls)) == 0) {
if (!settings.ssl_enabled) {
fprintf(stderr, "'notls' option is valid only when SSL is enabled\n");
free(list);
return 1;
}
ssl_enabled = false;
p += strlen(notls) + 1;
}
#endif
char *h = NULL;
if (*p == '[') {
// expecting it to be an IPv6 address enclosed in []
// i.e. RFC3986 style recommended by RFC5952
char *e = strchr(p, ']');
if (e == NULL) {
fprintf(stderr, "Invalid IPV6 address: \"%s\"", p);
free(list);
return 1;
}
h = ++p; // skip the opening '['
*e = '\0';
p = ++e; // skip the closing ']'
}
char *s = strchr(p, ':');
if (s != NULL) {
// If no more semicolons - attempt to treat as port number.
// Otherwise the only valid option is an unenclosed IPv6 without port, until
// of course there was an RFC3986 IPv6 address previously specified -
// in such a case there is no good option, will just send it to fail as port number.
if (strchr(s + 1, ':') == NULL || h != NULL) {
*s = '\0';
++s;
if (!safe_strtol(s, &the_port)) {
fprintf(stderr, "Invalid port number: \"%s\"", s);
free(list);
return 1;
}
}
}
if (h != NULL)
p = h;
if (strcmp(p, "*") == 0) {
p = NULL;
}
ret |= server_socket(p, the_port, transport, portnumber_file, ssl_enabled);
}
free(list);
return ret;
}
}
static int new_socket_unix(void) {
int sfd;
int flags;
if ((sfd = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) {
perror("socket()");
return -1;
}
if ((flags = fcntl(sfd, F_GETFL, 0)) < 0 ||
fcntl(sfd, F_SETFL, flags | O_NONBLOCK) < 0) {
perror("setting O_NONBLOCK");
close(sfd);
return -1;
}
return sfd;
}
static int server_socket_unix(const char *path, int access_mask) {
int sfd;
struct linger ling = {0, 0};
struct sockaddr_un addr;
struct stat tstat;
int flags =1;
int old_umask;
if (!path) {
return 1;
}
if ((sfd = new_socket_unix()) == -1) {
return 1;
}
/*
* Clean up a previous socket file if we left it around
*/
if (lstat(path, &tstat) == 0) {
if (S_ISSOCK(tstat.st_mode))
unlink(path);
}
setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void *)&flags, sizeof(flags));
setsockopt(sfd, SOL_SOCKET, SO_KEEPALIVE, (void *)&flags, sizeof(flags));
setsockopt(sfd, SOL_SOCKET, SO_LINGER, (void *)&ling, sizeof(ling));
/*
* the memset call clears nonstandard fields in some implementations
* that otherwise mess things up.
*/
memset(&addr, 0, sizeof(addr));
addr.sun_family = AF_UNIX;
strncpy(addr.sun_path, path, sizeof(addr.sun_path) - 1);
assert(strcmp(addr.sun_path, path) == 0);
old_umask = umask( ~(access_mask&0777));
if (bind(sfd, (struct sockaddr *)&addr, sizeof(addr)) == -1) {
perror("bind()");
close(sfd);
umask(old_umask);
return 1;
}
umask(old_umask);
if (listen(sfd, settings.backlog) == -1) {
perror("listen()");
close(sfd);
return 1;
}
if (!(listen_conn = conn_new(sfd, conn_listening,
EV_READ | EV_PERSIST, 1,
local_transport, main_base, NULL))) {
fprintf(stderr, "failed to create listening connection\n");
exit(EXIT_FAILURE);
}
return 0;
}
/*
* We keep the current time of day in a global variable that's updated by a
* timer event. This saves us a bunch of time() system calls (we really only
* need to get the time once a second, whereas there can be tens of thousands
* of requests a second) and allows us to use server-start-relative timestamps
* rather than absolute UNIX timestamps, a space savings on systems where
* sizeof(time_t) > sizeof(unsigned int).
*/
volatile rel_time_t current_time;
static struct event clockevent;
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
static bool monotonic = false;
static int64_t monotonic_start;
#endif
/* libevent uses a monotonic clock when available for event scheduling. Aside
* from jitter, simply ticking our internal timer here is accurate enough.
* Note that users who are setting explicit dates for expiration times *must*
* ensure their clocks are correct before starting memcached. */
static void clock_handler(const int fd, const short which, void *arg) {
struct timeval t = {.tv_sec = 1, .tv_usec = 0};
static bool initialized = false;
if (initialized) {
/* only delete the event if it's actually there. */
evtimer_del(&clockevent);
} else {
initialized = true;
}
// While we're here, check for hash table expansion.
// This function should be quick to avoid delaying the timer.
assoc_start_expand(stats_state.curr_items);
// also, if HUP'ed we need to do some maintenance.
// for now that's just the authfile reload.
if (settings.sig_hup) {
settings.sig_hup = false;
authfile_load(settings.auth_file);
}
evtimer_set(&clockevent, clock_handler, 0);
event_base_set(main_base, &clockevent);
evtimer_add(&clockevent, &t);
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
if (monotonic) {
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
return;
current_time = (rel_time_t) (ts.tv_sec - monotonic_start);
return;
}
#endif
{
struct timeval tv;
gettimeofday(&tv, NULL);
current_time = (rel_time_t) (tv.tv_sec - process_started);
}
}
static const char* flag_enabled_disabled(bool flag) {
return (flag ? "enabled" : "disabled");
}
static void verify_default(const char* param, bool condition) {
if (!condition) {
printf("Default value of [%s] has changed."
" Modify the help text and default value check.\n", param);
exit(EXIT_FAILURE);
}
}
static void usage(void) {
printf(PACKAGE " " VERSION "\n");
printf("-p, --port=<num> TCP port to listen on (default: %d)\n"
"-U, --udp-port=<num> UDP port to listen on (default: %d, off)\n"
"-s, --unix-socket=<file> UNIX socket to listen on (disables network support)\n"
"-A, --enable-shutdown enable ascii \"shutdown\" command\n"
"-a, --unix-mask=<mask> access mask for UNIX socket, in octal (default: %o)\n"
"-l, --listen=<addr> interface to listen on (default: INADDR_ANY)\n"
#ifdef TLS
" if TLS/SSL is enabled, 'notls' prefix can be used to\n"
" disable for specific listeners (-l notls:<ip>:<port>) \n"
#endif
"-d, --daemon run as a daemon\n"
"-r, --enable-coredumps maximize core file limit\n"
"-u, --user=<user> assume identity of <username> (only when run as root)\n"
"-m, --memory-limit=<num> item memory in megabytes (default: %lu)\n"
"-M, --disable-evictions return error on memory exhausted instead of evicting\n"
"-c, --conn-limit=<num> max simultaneous connections (default: %d)\n"
"-k, --lock-memory lock down all paged memory\n"
"-v, --verbose verbose (print errors/warnings while in event loop)\n"
"-vv very verbose (also print client commands/responses)\n"
"-vvv extremely verbose (internal state transitions)\n"
"-h, --help print this help and exit\n"
"-i, --license print memcached and libevent license\n"
"-V, --version print version and exit\n"
"-P, --pidfile=<file> save PID in <file>, only used with -d option\n"
"-f, --slab-growth-factor=<num> chunk size growth factor (default: %2.2f)\n"
"-n, --slab-min-size=<bytes> min space used for key+value+flags (default: %d)\n",
settings.port, settings.udpport, settings.access, (unsigned long) settings.maxbytes / (1 << 20),
settings.maxconns, settings.factor, settings.chunk_size);
verify_default("udp-port",settings.udpport == 0);
printf("-L, --enable-largepages try to use large memory pages (if available)\n");
printf("-D <char> Use <char> as the delimiter between key prefixes and IDs.\n"
" This is used for per-prefix stats reporting. The default is\n"
" \"%c\" (colon). If this option is specified, stats collection\n"
" is turned on automatically; if not, then it may be turned on\n"
" by sending the \"stats detail on\" command to the server.\n",
settings.prefix_delimiter);
printf("-t, --threads=<num> number of threads to use (default: %d)\n", settings.num_threads);
printf("-R, --max-reqs-per-event maximum number of requests per event, limits the\n"
" requests processed per connection to prevent \n"
" starvation (default: %d)\n", settings.reqs_per_event);
printf("-C, --disable-cas disable use of CAS\n");
printf("-b, --listen-backlog=<num> set the backlog queue limit (default: %d)\n", settings.backlog);
printf("-B, --protocol=<name> protocol - one of ascii, binary, or auto (default: %s)\n",
prot_text(settings.binding_protocol));
printf("-I, --max-item-size=<num> adjusts max item size\n"
" (default: %dm, min: %dk, max: %dm)\n",
settings.item_size_max/ (1 << 20), ITEM_SIZE_MAX_LOWER_LIMIT / (1 << 10), ITEM_SIZE_MAX_UPPER_LIMIT / (1 << 20));
#ifdef ENABLE_SASL
printf("-S, --enable-sasl turn on Sasl authentication\n");
#endif
printf("-F, --disable-flush-all disable flush_all command\n");
printf("-X, --disable-dumping disable stats cachedump and lru_crawler metadump\n");
printf("-W --disable-watch disable watch commands (live logging)\n");
printf("-Y, --auth-file=<file> (EXPERIMENTAL) enable ASCII protocol authentication. format:\n"
" user:pass\\nuser2:pass2\\n\n");
printf("-e, --memory-file=<file> (EXPERIMENTAL) mmap a file for item memory.\n"
" use only in ram disks or persistent memory mounts!\n"
" enables restartable cache (stop with SIGUSR1)\n");
#ifdef TLS
printf("-Z, --enable-ssl enable TLS/SSL\n");
#endif
printf("-o, --extended comma separated list of extended options\n"
" most options have a 'no_' prefix to disable\n"
" - maxconns_fast: immediately close new connections after limit (default: %s)\n"
" - hashpower: an integer multiplier for how large the hash\n"
" table should be. normally grows at runtime. (default starts at: %d)\n"
" set based on \"STAT hash_power_level\"\n"
" - tail_repair_time: time in seconds for how long to wait before\n"
" forcefully killing LRU tail item.\n"
" disabled by default; very dangerous option.\n"
" - hash_algorithm: the hash table algorithm\n"
" default is murmur3 hash. options: jenkins, murmur3\n"
" - no_lru_crawler: disable LRU Crawler background thread.\n"
" - lru_crawler_sleep: microseconds to sleep between items\n"
" default is %d.\n"
" - lru_crawler_tocrawl: max items to crawl per slab per run\n"
" default is %u (unlimited)\n",
flag_enabled_disabled(settings.maxconns_fast), settings.hashpower_init,
settings.lru_crawler_sleep, settings.lru_crawler_tocrawl);
printf(" - resp_obj_mem_limit: limit in megabytes for connection response objects.\n"
" do not adjust unless you have high (100k+) conn. limits.\n"
" 0 means unlimited (default: %u)\n"
" - read_buf_mem_limit: limit in megabytes for connection read buffers.\n"
" do not adjust unless you have high (100k+) conn. limits.\n"
" 0 means unlimited (default: %u)\n",
settings.resp_obj_mem_limit,
settings.read_buf_mem_limit);
verify_default("resp_obj_mem_limit", settings.resp_obj_mem_limit == 0);
verify_default("read_buf_mem_limit", settings.read_buf_mem_limit == 0);
printf(" - no_lru_maintainer: disable new LRU system + background thread.\n"
" - hot_lru_pct: pct of slab memory to reserve for hot lru.\n"
" (requires lru_maintainer, default pct: %d)\n"
" - warm_lru_pct: pct of slab memory to reserve for warm lru.\n"
" (requires lru_maintainer, default pct: %d)\n"
" - hot_max_factor: items idle > cold lru age * drop from hot lru. (default: %.2f)\n"
" - warm_max_factor: items idle > cold lru age * this drop from warm. (default: %.2f)\n"
" - temporary_ttl: TTL's below get separate LRU, can't be evicted.\n"
" (requires lru_maintainer, default: %d)\n"
" - idle_timeout: timeout for idle connections. (default: %d, no timeout)\n",
settings.hot_lru_pct, settings.warm_lru_pct, settings.hot_max_factor, settings.warm_max_factor,
settings.temporary_ttl, settings.idle_timeout);
printf(" - slab_chunk_max: (EXPERIMENTAL) maximum slab size in kilobytes. use extreme care. (default: %d)\n"
" - watcher_logbuf_size: size in kilobytes of per-watcher write buffer. (default: %u)\n"
" - worker_logbuf_size: size in kilobytes of per-worker-thread buffer\n"
" read by background thread, then written to watchers. (default: %u)\n"
" - track_sizes: enable dynamic reports for 'stats sizes' command.\n"
" - no_hashexpand: disables hash table expansion (dangerous)\n"
" - modern: enables options which will be default in future.\n"
" currently: nothing\n"
" - no_modern: uses defaults of previous major version (1.4.x)\n",
settings.slab_chunk_size_max / (1 << 10), settings.logger_watcher_buf_size / (1 << 10),
settings.logger_buf_size / (1 << 10));
verify_default("tail_repair_time", settings.tail_repair_time == TAIL_REPAIR_TIME_DEFAULT);
verify_default("lru_crawler_tocrawl", settings.lru_crawler_tocrawl == 0);
verify_default("idle_timeout", settings.idle_timeout == 0);
#ifdef HAVE_DROP_PRIVILEGES
printf(" - drop_privileges: enable dropping extra syscall privileges\n"
" - no_drop_privileges: disable drop_privileges in case it causes issues with\n"
" some customisation.\n"
" (default is no_drop_privileges)\n");
verify_default("drop_privileges", !settings.drop_privileges);
#ifdef MEMCACHED_DEBUG
printf(" - relaxed_privileges: running tests requires extra privileges. (default: %s)\n",
flag_enabled_disabled(settings.relaxed_privileges));
#endif
#endif
#ifdef EXTSTORE
printf("\n - External storage (ext_*) related options (see: https://memcached.org/extstore)\n");
printf(" - ext_path: file to write to for external storage.\n"
" ie: ext_path=/mnt/d1/extstore:1G\n"
" - ext_page_size: size in megabytes of storage pages. (default: %u)\n"
" - ext_wbuf_size: size in megabytes of page write buffers. (default: %u)\n"
" - ext_threads: number of IO threads to run. (default: %u)\n"
" - ext_item_size: store items larger than this (bytes, default %u)\n"
" - ext_item_age: store items idle at least this long (seconds, default: no age limit)\n"
" - ext_low_ttl: consider TTLs lower than this specially (default: %u)\n"
" - ext_drop_unread: don't re-write unread values during compaction (default: %s)\n"
" - ext_recache_rate: recache an item every N accesses (default: %u)\n"
" - ext_compact_under: compact when fewer than this many free pages\n"
" (default: 1/4th of the assigned storage)\n"
" - ext_drop_under: drop COLD items when fewer than this many free pages\n"
" (default: 1/4th of the assigned storage)\n"
" - ext_max_frag: max page fragmentation to tolerate (default: %.2f)\n"
" - slab_automove_freeratio: ratio of memory to hold free as buffer.\n"
" (see doc/storage.txt for more info, default: %.3f)\n",
settings.ext_page_size / (1 << 20), settings.ext_wbuf_size / (1 << 20), settings.ext_io_threadcount,
settings.ext_item_size, settings.ext_low_ttl,
flag_enabled_disabled(settings.ext_drop_unread), settings.ext_recache_rate,
settings.ext_max_frag, settings.slab_automove_freeratio);
verify_default("ext_item_age", settings.ext_item_age == UINT_MAX);
#endif
#ifdef TLS
printf(" - ssl_chain_cert: certificate chain file in PEM format\n"
" - ssl_key: private key, if not part of the -ssl_chain_cert\n"
" - ssl_keyformat: private key format (PEM, DER or ENGINE) (default: PEM)\n");
printf(" - ssl_verify_mode: peer certificate verification mode, default is 0(None).\n"
" valid values are 0(None), 1(Request), 2(Require)\n"
" or 3(Once)\n");
printf(" - ssl_ciphers: specify cipher list to be used\n"
" - ssl_ca_cert: PEM format file of acceptable client CA's\n"
" - ssl_wbuf_size: size in kilobytes of per-connection SSL output buffer\n"
" (default: %u)\n", settings.ssl_wbuf_size / (1 << 10));
verify_default("ssl_keyformat", settings.ssl_keyformat == SSL_FILETYPE_PEM);
verify_default("ssl_verify_mode", settings.ssl_verify_mode == SSL_VERIFY_NONE);
#endif
return;
}
static void usage_license(void) {
printf(PACKAGE " " VERSION "\n\n");
printf(
"Copyright (c) 2003, Danga Interactive, Inc. <http://www.danga.com/>\n"
"All rights reserved.\n"
"\n"
"Redistribution and use in source and binary forms, with or without\n"
"modification, are permitted provided that the following conditions are\n"
"met:\n"
"\n"
" * Redistributions of source code must retain the above copyright\n"
"notice, this list of conditions and the following disclaimer.\n"
"\n"
" * Redistributions in binary form must reproduce the above\n"
"copyright notice, this list of conditions and the following disclaimer\n"
"in the documentation and/or other materials provided with the\n"
"distribution.\n"
"\n"
" * Neither the name of the Danga Interactive nor the names of its\n"
"contributors may be used to endorse or promote products derived from\n"
"this software without specific prior written permission.\n"
"\n"
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n"
"\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n"
"LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n"
"A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n"
"OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n"
"SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n"
"LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n"
"DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n"
"THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n"
"OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
"\n"
"\n"
"This product includes software developed by Niels Provos.\n"
"\n"
"[ libevent ]\n"
"\n"
"Copyright 2000-2003 Niels Provos <provos@citi.umich.edu>\n"
"All rights reserved.\n"
"\n"
"Redistribution and use in source and binary forms, with or without\n"
"modification, are permitted provided that the following conditions\n"
"are met:\n"
"1. Redistributions of source code must retain the above copyright\n"
" notice, this list of conditions and the following disclaimer.\n"
"2. Redistributions in binary form must reproduce the above copyright\n"
" notice, this list of conditions and the following disclaimer in the\n"
" documentation and/or other materials provided with the distribution.\n"
"3. All advertising materials mentioning features or use of this software\n"
" must display the following acknowledgement:\n"
" This product includes software developed by Niels Provos.\n"
"4. The name of the author may not be used to endorse or promote products\n"
" derived from this software without specific prior written permission.\n"
"\n"
"THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n"
"IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n"
"OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n"
"IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n"
"INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n"
"NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n"
"DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n"
"THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n"
"THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
);
return;
}
static void save_pid(const char *pid_file) {
FILE *fp;
if (access(pid_file, F_OK) == 0) {
if ((fp = fopen(pid_file, "r")) != NULL) {
char buffer[1024];
if (fgets(buffer, sizeof(buffer), fp) != NULL) {
unsigned int pid;
if (safe_strtoul(buffer, &pid) && kill((pid_t)pid, 0) == 0) {
fprintf(stderr, "WARNING: The pid file contained the following (running) pid: %u\n", pid);
}
}
fclose(fp);
}
}
/* Create the pid file first with a temporary name, then
* atomically move the file to the real name to avoid a race with
* another process opening the file to read the pid, but finding
* it empty.
*/
char tmp_pid_file[1024];
snprintf(tmp_pid_file, sizeof(tmp_pid_file), "%s.tmp", pid_file);
if ((fp = fopen(tmp_pid_file, "w")) == NULL) {
vperror("Could not open the pid file %s for writing", tmp_pid_file);
return;
}
fprintf(fp,"%ld\n", (long)getpid());
if (fclose(fp) == -1) {
vperror("Could not close the pid file %s", tmp_pid_file);
}
if (rename(tmp_pid_file, pid_file) != 0) {
vperror("Could not rename the pid file from %s to %s",
tmp_pid_file, pid_file);
}
}
static void remove_pidfile(const char *pid_file) {
if (pid_file == NULL)
return;
if (unlink(pid_file) != 0) {
vperror("Could not remove the pid file %s", pid_file);
}
}
static void sig_handler(const int sig) {
printf("Signal handled: %s.\n", strsignal(sig));
exit(EXIT_SUCCESS);
}
static void sighup_handler(const int sig) {
settings.sig_hup = true;
}
static void sig_usrhandler(const int sig) {
printf("Graceful shutdown signal handled: %s.\n", strsignal(sig));
stop_main_loop = true;
}
#ifndef HAVE_SIGIGNORE
static int sigignore(int sig) {
struct sigaction sa = { .sa_handler = SIG_IGN, .sa_flags = 0 };
if (sigemptyset(&sa.sa_mask) == -1 || sigaction(sig, &sa, 0) == -1) {
return -1;
}
return 0;
}
#endif
/*
* On systems that supports multiple page sizes we may reduce the
* number of TLB-misses by using the biggest available page size
*/
static int enable_large_pages(void) {
#if defined(HAVE_GETPAGESIZES) && defined(HAVE_MEMCNTL)
int ret = -1;
size_t sizes[32];
int avail = getpagesizes(sizes, 32);
if (avail != -1) {
size_t max = sizes[0];
struct memcntl_mha arg = {0};
int ii;
for (ii = 1; ii < avail; ++ii) {
if (max < sizes[ii]) {
max = sizes[ii];
}
}
arg.mha_flags = 0;
arg.mha_pagesize = max;
arg.mha_cmd = MHA_MAPSIZE_BSSBRK;
if (memcntl(0, 0, MC_HAT_ADVISE, (caddr_t)&arg, 0, 0) == -1) {
fprintf(stderr, "Failed to set large pages: %s\n",
strerror(errno));
fprintf(stderr, "Will use default page size\n");
} else {
ret = 0;
}
} else {
fprintf(stderr, "Failed to get supported pagesizes: %s\n",
strerror(errno));
fprintf(stderr, "Will use default page size\n");
}
return ret;
#elif defined(__linux__) && defined(MADV_HUGEPAGE)
/* check if transparent hugepages is compiled into the kernel */
struct stat st;
int ret = stat("/sys/kernel/mm/transparent_hugepage/enabled", &st);
if (ret || !(st.st_mode & S_IFREG)) {
fprintf(stderr, "Transparent huge pages support not detected.\n");
fprintf(stderr, "Will use default page size.\n");
return -1;
}
return 0;
#elif defined(__FreeBSD__)
int spages;
size_t spagesl = sizeof(spages);
if (sysctlbyname("vm.pmap.pg_ps_enabled", &spages,
&spagesl, NULL, 0) != 0) {
fprintf(stderr, "Could not evaluate the presence of superpages features.");
return -1;
}
if (spages != 1) {
fprintf(stderr, "Superpages support not detected.\n");
fprintf(stderr, "Will use default page size.\n");
return -1;
}
return 0;
#else
return -1;
#endif
}
/**
* Do basic sanity check of the runtime environment
* @return true if no errors found, false if we can't use this env
*/
static bool sanitycheck(void) {
/* One of our biggest problems is old and bogus libevents */
const char *ever = event_get_version();
if (ever != NULL) {
if (strncmp(ever, "1.", 2) == 0) {
/* Require at least 1.3 (that's still a couple of years old) */
if (('0' <= ever[2] && ever[2] < '3') && !isdigit(ever[3])) {
fprintf(stderr, "You are using libevent %s.\nPlease upgrade to"
" a more recent version (1.3 or newer)\n",
event_get_version());
return false;
}
}
}
return true;
}
static bool _parse_slab_sizes(char *s, uint32_t *slab_sizes) {
char *b = NULL;
uint32_t size = 0;
int i = 0;
uint32_t last_size = 0;
if (strlen(s) < 1)
return false;
for (char *p = strtok_r(s, "-", &b);
p != NULL;
p = strtok_r(NULL, "-", &b)) {
if (!safe_strtoul(p, &size) || size < settings.chunk_size
|| size > settings.slab_chunk_size_max) {
fprintf(stderr, "slab size %u is out of valid range\n", size);
return false;
}
if (last_size >= size) {
fprintf(stderr, "slab size %u cannot be lower than or equal to a previous class size\n", size);
return false;
}
if (size <= last_size + CHUNK_ALIGN_BYTES) {
fprintf(stderr, "slab size %u must be at least %d bytes larger than previous class\n",
size, CHUNK_ALIGN_BYTES);
return false;
}
slab_sizes[i++] = size;
last_size = size;
if (i >= MAX_NUMBER_OF_SLAB_CLASSES-1) {
fprintf(stderr, "too many slab classes specified\n");
return false;
}
}
slab_sizes[i] = 0;
return true;
}
struct _mc_meta_data {
void *mmap_base;
uint64_t old_base;
char *slab_config; // string containing either factor or custom slab list.
int64_t time_delta;
uint64_t process_started;
uint32_t current_time;
};
// We need to remember a combination of configuration settings and global
// state for restart viability and resumption of internal services.
// Compared to the number of tunables and state values, relatively little
// does need to be remembered.
// Time is the hardest; we have to assume the sys clock is correct and re-sync for
// the lost time after restart.
static int _mc_meta_save_cb(const char *tag, void *ctx, void *data) {
struct _mc_meta_data *meta = (struct _mc_meta_data *)data;
// Settings to remember.
// TODO: should get a version of version which is numeric, else
// comparisons for compat reasons are difficult.
// it may be possible to punt on this for now; since we can test for the
// absense of another key... such as the new numeric version.
//restart_set_kv(ctx, "version", "%s", VERSION);
// We hold the original factor or subopts _string_
// it can be directly compared without roundtripping through floats or
// serializing/deserializing the long options list.
restart_set_kv(ctx, "slab_config", "%s", meta->slab_config);
restart_set_kv(ctx, "maxbytes", "%llu", (unsigned long long) settings.maxbytes);
restart_set_kv(ctx, "chunk_size", "%d", settings.chunk_size);
restart_set_kv(ctx, "item_size_max", "%d", settings.item_size_max);
restart_set_kv(ctx, "slab_chunk_size_max", "%d", settings.slab_chunk_size_max);
restart_set_kv(ctx, "slab_page_size", "%d", settings.slab_page_size);
restart_set_kv(ctx, "use_cas", "%s", settings.use_cas ? "true" : "false");
restart_set_kv(ctx, "slab_reassign", "%s", settings.slab_reassign ? "true" : "false");
// Online state to remember.
// current time is tough. we need to rely on the clock being correct to
// pull the delta between stop and start times. we also need to know the
// delta between start time and now to restore monotonic clocks.
// for non-monotonic clocks (some OS?), process_started is the only
// important one.
restart_set_kv(ctx, "current_time", "%u", current_time);
// types are great until... this. some systems time_t could be big, but
// I'm assuming never negative.
restart_set_kv(ctx, "process_started", "%llu", (unsigned long long) process_started);
{
struct timeval tv;
gettimeofday(&tv, NULL);
restart_set_kv(ctx, "stop_time", "%lu", tv.tv_sec);
}
// Might as well just fetch the next CAS value to use than tightly
// coupling the internal variable into the restart system.
restart_set_kv(ctx, "current_cas", "%llu", (unsigned long long) get_cas_id());
restart_set_kv(ctx, "oldest_cas", "%llu", (unsigned long long) settings.oldest_cas);
restart_set_kv(ctx, "logger_gid", "%llu", logger_get_gid());
restart_set_kv(ctx, "hashpower", "%u", stats_state.hash_power_level);
// NOTE: oldest_live is a rel_time_t, which aliases for unsigned int.
// should future proof this with a 64bit upcast, or fetch value from a
// converter function/macro?
restart_set_kv(ctx, "oldest_live", "%u", settings.oldest_live);
// TODO: use uintptr_t etc? is it portable enough?
restart_set_kv(ctx, "mmap_oldbase", "%p", meta->mmap_base);
return 0;
}
// We must see at least this number of checked lines. Else empty/missing lines
// could cause a false-positive.
// TODO: Once crc32'ing of the metadata file is done this could be ensured better by
// the restart module itself (crc32 + count of lines must match on the
// backend)
#define RESTART_REQUIRED_META 17
// With this callback we make a decision on if the current configuration
// matches up enough to allow reusing the cache.
// We also re-load important runtime information.
static int _mc_meta_load_cb(const char *tag, void *ctx, void *data) {
struct _mc_meta_data *meta = (struct _mc_meta_data *)data;
char *key;
char *val;
int reuse_mmap = 0;
meta->process_started = 0;
meta->time_delta = 0;
meta->current_time = 0;
int lines_seen = 0;
// TODO: not sure this is any better than just doing an if/else tree with
// strcmp's...
enum {
R_MMAP_OLDBASE = 0,
R_MAXBYTES,
R_CHUNK_SIZE,
R_ITEM_SIZE_MAX,
R_SLAB_CHUNK_SIZE_MAX,
R_SLAB_PAGE_SIZE,
R_SLAB_CONFIG,
R_USE_CAS,
R_SLAB_REASSIGN,
R_CURRENT_CAS,
R_OLDEST_CAS,
R_OLDEST_LIVE,
R_LOGGER_GID,
R_CURRENT_TIME,
R_STOP_TIME,
R_PROCESS_STARTED,
R_HASHPOWER,
};
const char *opts[] = {
[R_MMAP_OLDBASE] = "mmap_oldbase",
[R_MAXBYTES] = "maxbytes",
[R_CHUNK_SIZE] = "chunk_size",
[R_ITEM_SIZE_MAX] = "item_size_max",
[R_SLAB_CHUNK_SIZE_MAX] = "slab_chunk_size_max",
[R_SLAB_PAGE_SIZE] = "slab_page_size",
[R_SLAB_CONFIG] = "slab_config",
[R_USE_CAS] = "use_cas",
[R_SLAB_REASSIGN] = "slab_reassign",
[R_CURRENT_CAS] = "current_cas",
[R_OLDEST_CAS] = "oldest_cas",
[R_OLDEST_LIVE] = "oldest_live",
[R_LOGGER_GID] = "logger_gid",
[R_CURRENT_TIME] = "current_time",
[R_STOP_TIME] = "stop_time",
[R_PROCESS_STARTED] = "process_started",
[R_HASHPOWER] = "hashpower",
NULL
};
while (restart_get_kv(ctx, &key, &val) == RESTART_OK) {
int type = 0;
int32_t val_int = 0;
uint32_t val_uint = 0;
int64_t bigval_int = 0;
uint64_t bigval_uint = 0;
while (opts[type] != NULL && strcmp(key, opts[type]) != 0) {
type++;
}
if (opts[type] == NULL) {
fprintf(stderr, "[restart] unknown/unhandled key: %s\n", key);
continue;
}
lines_seen++;
// helper for any boolean checkers.
bool val_bool = false;
bool is_bool = true;
if (strcmp(val, "false") == 0) {
val_bool = false;
} else if (strcmp(val, "true") == 0) {
val_bool = true;
} else {
is_bool = false;
}
switch (type) {
case R_MMAP_OLDBASE:
if (!safe_strtoull_hex(val, &meta->old_base)) {
fprintf(stderr, "[restart] failed to parse %s: %s\n", key, val);
reuse_mmap = -1;
}
break;
case R_MAXBYTES:
if (!safe_strtoll(val, &bigval_int) || settings.maxbytes != bigval_int) {
reuse_mmap = -1;
}
break;
case R_CHUNK_SIZE:
if (!safe_strtol(val, &val_int) || settings.chunk_size != val_int) {
reuse_mmap = -1;
}
break;
case R_ITEM_SIZE_MAX:
if (!safe_strtol(val, &val_int) || settings.item_size_max != val_int) {
reuse_mmap = -1;
}
break;
case R_SLAB_CHUNK_SIZE_MAX:
if (!safe_strtol(val, &val_int) || settings.slab_chunk_size_max != val_int) {
reuse_mmap = -1;
}
break;
case R_SLAB_PAGE_SIZE:
if (!safe_strtol(val, &val_int) || settings.slab_page_size != val_int) {
reuse_mmap = -1;
}
break;
case R_SLAB_CONFIG:
if (strcmp(val, meta->slab_config) != 0) {
reuse_mmap = -1;
}
break;
case R_USE_CAS:
if (!is_bool || settings.use_cas != val_bool) {
reuse_mmap = -1;
}
break;
case R_SLAB_REASSIGN:
if (!is_bool || settings.slab_reassign != val_bool) {
reuse_mmap = -1;
}
break;
case R_CURRENT_CAS:
// FIXME: do we need to fail if these values _aren't_ found?
if (!safe_strtoull(val, &bigval_uint)) {
reuse_mmap = -1;
} else {
set_cas_id(bigval_uint);
}
break;
case R_OLDEST_CAS:
if (!safe_strtoull(val, &bigval_uint)) {
reuse_mmap = -1;
} else {
settings.oldest_cas = bigval_uint;
}
break;
case R_OLDEST_LIVE:
if (!safe_strtoul(val, &val_uint)) {
reuse_mmap = -1;
} else {
settings.oldest_live = val_uint;
}
break;
case R_LOGGER_GID:
if (!safe_strtoull(val, &bigval_uint)) {
reuse_mmap = -1;
} else {
logger_set_gid(bigval_uint);
}
break;
case R_PROCESS_STARTED:
if (!safe_strtoull(val, &bigval_uint)) {
reuse_mmap = -1;
} else {
meta->process_started = bigval_uint;
}
break;
case R_CURRENT_TIME:
if (!safe_strtoul(val, &val_uint)) {
reuse_mmap = -1;
} else {
meta->current_time = val_uint;
}
break;
case R_STOP_TIME:
if (!safe_strtoll(val, &bigval_int)) {
reuse_mmap = -1;
} else {
struct timeval t;
gettimeofday(&t, NULL);
meta->time_delta = t.tv_sec - bigval_int;
// clock has done something crazy.
// there are _lots_ of ways the clock can go wrong here, but
// this is a safe sanity check since there's nothing else we
// can realistically do.
if (meta->time_delta <= 0) {
reuse_mmap = -1;
}
}
break;
case R_HASHPOWER:
if (!safe_strtoul(val, &val_uint)) {
reuse_mmap = -1;
} else {
settings.hashpower_init = val_uint;
}
break;
default:
fprintf(stderr, "[restart] unhandled key: %s\n", key);
}
if (reuse_mmap != 0) {
fprintf(stderr, "[restart] restart incompatible due to setting for [%s] [old value: %s]\n", key, val);
break;
}
}
if (lines_seen < RESTART_REQUIRED_META) {
fprintf(stderr, "[restart] missing some metadata lines\n");
reuse_mmap = -1;
}
return reuse_mmap;
}
int main (int argc, char **argv) {
int c;
bool lock_memory = false;
bool do_daemonize = false;
bool preallocate = false;
int maxcore = 0;
char *username = NULL;
char *pid_file = NULL;
char *memory_file = NULL;
struct passwd *pw;
struct rlimit rlim;
char *buf;
char unit = '\0';
int size_max = 0;
int retval = EXIT_SUCCESS;
bool protocol_specified = false;
bool tcp_specified = false;
bool udp_specified = false;
bool start_lru_maintainer = true;
bool start_lru_crawler = true;
bool start_assoc_maint = true;
enum hashfunc_type hash_type = MURMUR3_HASH;
uint32_t tocrawl;
uint32_t slab_sizes[MAX_NUMBER_OF_SLAB_CLASSES];
bool use_slab_sizes = false;
char *slab_sizes_unparsed = NULL;
bool slab_chunk_size_changed = false;
// struct for restart code. Initialized up here so we can curry
// important settings to save or validate.
struct _mc_meta_data *meta = malloc(sizeof(struct _mc_meta_data));
meta->slab_config = NULL;
#ifdef EXTSTORE
void *storage = NULL;
struct extstore_conf_file *storage_file = NULL;
struct extstore_conf ext_cf;
#endif
char *subopts, *subopts_orig;
char *subopts_value;
enum {
MAXCONNS_FAST = 0,
HASHPOWER_INIT,
NO_HASHEXPAND,
SLAB_REASSIGN,
SLAB_AUTOMOVE,
SLAB_AUTOMOVE_RATIO,
SLAB_AUTOMOVE_WINDOW,
TAIL_REPAIR_TIME,
HASH_ALGORITHM,
LRU_CRAWLER,
LRU_CRAWLER_SLEEP,
LRU_CRAWLER_TOCRAWL,
LRU_MAINTAINER,
HOT_LRU_PCT,
WARM_LRU_PCT,
HOT_MAX_FACTOR,
WARM_MAX_FACTOR,
TEMPORARY_TTL,
IDLE_TIMEOUT,
WATCHER_LOGBUF_SIZE,
WORKER_LOGBUF_SIZE,
SLAB_SIZES,
SLAB_CHUNK_MAX,
TRACK_SIZES,
NO_INLINE_ASCII_RESP,
MODERN,
NO_MODERN,
NO_CHUNKED_ITEMS,
NO_SLAB_REASSIGN,
NO_SLAB_AUTOMOVE,
NO_MAXCONNS_FAST,
INLINE_ASCII_RESP,
NO_LRU_CRAWLER,
NO_LRU_MAINTAINER,
NO_DROP_PRIVILEGES,
DROP_PRIVILEGES,
RESP_OBJ_MEM_LIMIT,
READ_BUF_MEM_LIMIT,
#ifdef TLS
SSL_CERT,
SSL_KEY,
SSL_VERIFY_MODE,
SSL_KEYFORM,
SSL_CIPHERS,
SSL_CA_CERT,
SSL_WBUF_SIZE,
#endif
#ifdef MEMCACHED_DEBUG
RELAXED_PRIVILEGES,
#endif
#ifdef EXTSTORE
EXT_PAGE_SIZE,
EXT_WBUF_SIZE,
EXT_THREADS,
EXT_IO_DEPTH,
EXT_PATH,
EXT_ITEM_SIZE,
EXT_ITEM_AGE,
EXT_LOW_TTL,
EXT_RECACHE_RATE,
EXT_COMPACT_UNDER,
EXT_DROP_UNDER,
EXT_MAX_FRAG,
EXT_DROP_UNREAD,
SLAB_AUTOMOVE_FREERATIO,
#endif
};
char *const subopts_tokens[] = {
[MAXCONNS_FAST] = "maxconns_fast",
[HASHPOWER_INIT] = "hashpower",
[NO_HASHEXPAND] = "no_hashexpand",
[SLAB_REASSIGN] = "slab_reassign",
[SLAB_AUTOMOVE] = "slab_automove",
[SLAB_AUTOMOVE_RATIO] = "slab_automove_ratio",
[SLAB_AUTOMOVE_WINDOW] = "slab_automove_window",
[TAIL_REPAIR_TIME] = "tail_repair_time",
[HASH_ALGORITHM] = "hash_algorithm",
[LRU_CRAWLER] = "lru_crawler",
[LRU_CRAWLER_SLEEP] = "lru_crawler_sleep",
[LRU_CRAWLER_TOCRAWL] = "lru_crawler_tocrawl",
[LRU_MAINTAINER] = "lru_maintainer",
[HOT_LRU_PCT] = "hot_lru_pct",
[WARM_LRU_PCT] = "warm_lru_pct",
[HOT_MAX_FACTOR] = "hot_max_factor",
[WARM_MAX_FACTOR] = "warm_max_factor",
[TEMPORARY_TTL] = "temporary_ttl",
[IDLE_TIMEOUT] = "idle_timeout",
[WATCHER_LOGBUF_SIZE] = "watcher_logbuf_size",
[WORKER_LOGBUF_SIZE] = "worker_logbuf_size",
[SLAB_SIZES] = "slab_sizes",
[SLAB_CHUNK_MAX] = "slab_chunk_max",
[TRACK_SIZES] = "track_sizes",
[NO_INLINE_ASCII_RESP] = "no_inline_ascii_resp",
[MODERN] = "modern",
[NO_MODERN] = "no_modern",
[NO_CHUNKED_ITEMS] = "no_chunked_items",
[NO_SLAB_REASSIGN] = "no_slab_reassign",
[NO_SLAB_AUTOMOVE] = "no_slab_automove",
[NO_MAXCONNS_FAST] = "no_maxconns_fast",
[INLINE_ASCII_RESP] = "inline_ascii_resp",
[NO_LRU_CRAWLER] = "no_lru_crawler",
[NO_LRU_MAINTAINER] = "no_lru_maintainer",
[NO_DROP_PRIVILEGES] = "no_drop_privileges",
[DROP_PRIVILEGES] = "drop_privileges",
[RESP_OBJ_MEM_LIMIT] = "resp_obj_mem_limit",
[READ_BUF_MEM_LIMIT] = "read_buf_mem_limit",
#ifdef TLS
[SSL_CERT] = "ssl_chain_cert",
[SSL_KEY] = "ssl_key",
[SSL_VERIFY_MODE] = "ssl_verify_mode",
[SSL_KEYFORM] = "ssl_keyformat",
[SSL_CIPHERS] = "ssl_ciphers",
[SSL_CA_CERT] = "ssl_ca_cert",
[SSL_WBUF_SIZE] = "ssl_wbuf_size",
#endif
#ifdef MEMCACHED_DEBUG
[RELAXED_PRIVILEGES] = "relaxed_privileges",
#endif
#ifdef EXTSTORE
[EXT_PAGE_SIZE] = "ext_page_size",
[EXT_WBUF_SIZE] = "ext_wbuf_size",
[EXT_THREADS] = "ext_threads",
[EXT_IO_DEPTH] = "ext_io_depth",
[EXT_PATH] = "ext_path",
[EXT_ITEM_SIZE] = "ext_item_size",
[EXT_ITEM_AGE] = "ext_item_age",
[EXT_LOW_TTL] = "ext_low_ttl",
[EXT_RECACHE_RATE] = "ext_recache_rate",
[EXT_COMPACT_UNDER] = "ext_compact_under",
[EXT_DROP_UNDER] = "ext_drop_under",
[EXT_MAX_FRAG] = "ext_max_frag",
[EXT_DROP_UNREAD] = "ext_drop_unread",
[SLAB_AUTOMOVE_FREERATIO] = "slab_automove_freeratio",
#endif
NULL
};
if (!sanitycheck()) {
free(meta);
return EX_OSERR;
}
/* handle SIGINT, SIGTERM */
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
signal(SIGHUP, sighup_handler);
signal(SIGUSR1, sig_usrhandler);
/* init settings */
settings_init();
verify_default("hash_algorithm", hash_type == MURMUR3_HASH);
#ifdef EXTSTORE
settings.ext_item_size = 512;
settings.ext_item_age = UINT_MAX;
settings.ext_low_ttl = 0;
settings.ext_recache_rate = 2000;
settings.ext_max_frag = 0.8;
settings.ext_drop_unread = false;
settings.ext_wbuf_size = 1024 * 1024 * 4;
settings.ext_compact_under = 0;
settings.ext_drop_under = 0;
settings.slab_automove_freeratio = 0.01;
settings.ext_page_size = 1024 * 1024 * 64;
settings.ext_io_threadcount = 1;
ext_cf.page_size = settings.ext_page_size;
ext_cf.wbuf_size = settings.ext_wbuf_size;
ext_cf.io_threadcount = settings.ext_io_threadcount;
ext_cf.io_depth = 1;
ext_cf.page_buckets = 4;
ext_cf.wbuf_count = ext_cf.page_buckets;
#endif
/* Run regardless of initializing it later */
init_lru_maintainer();
/* set stderr non-buffering (for running under, say, daemontools) */
setbuf(stderr, NULL);
char *shortopts =
"a:" /* access mask for unix socket */
"A" /* enable admin shutdown command */
"Z" /* enable SSL */
"p:" /* TCP port number to listen on */
"s:" /* unix socket path to listen on */
"U:" /* UDP port number to listen on */
"m:" /* max memory to use for items in megabytes */
"M" /* return error on memory exhausted */
"c:" /* max simultaneous connections */
"k" /* lock down all paged memory */
"hiV" /* help, licence info, version */
"r" /* maximize core file limit */
"v" /* verbose */
"d" /* daemon mode */
"l:" /* interface to listen on */
"u:" /* user identity to run as */
"P:" /* save PID in file */
"f:" /* factor? */
"n:" /* minimum space allocated for key+value+flags */
"t:" /* threads */
"D:" /* prefix delimiter? */
"L" /* Large memory pages */
"R:" /* max requests per event */
"C" /* Disable use of CAS */
"b:" /* backlog queue limit */
"B:" /* Binding protocol */
"I:" /* Max item size */
"S" /* Sasl ON */
"F" /* Disable flush_all */
"X" /* Disable dump commands */
"W" /* Disable watch commands */
"Y:" /* Enable token auth */
"e:" /* mmap path for external item memory */
"o:" /* Extended generic options */
;
/* process arguments */
#ifdef HAVE_GETOPT_LONG
const struct option longopts[] = {
{"unix-mask", required_argument, 0, 'a'},
{"enable-shutdown", no_argument, 0, 'A'},
{"enable-ssl", no_argument, 0, 'Z'},
{"port", required_argument, 0, 'p'},
{"unix-socket", required_argument, 0, 's'},
{"udp-port", required_argument, 0, 'U'},
{"memory-limit", required_argument, 0, 'm'},
{"disable-evictions", no_argument, 0, 'M'},
{"conn-limit", required_argument, 0, 'c'},
{"lock-memory", no_argument, 0, 'k'},
{"help", no_argument, 0, 'h'},
{"license", no_argument, 0, 'i'},
{"version", no_argument, 0, 'V'},
{"enable-coredumps", no_argument, 0, 'r'},
{"verbose", optional_argument, 0, 'v'},
{"daemon", no_argument, 0, 'd'},
{"listen", required_argument, 0, 'l'},
{"user", required_argument, 0, 'u'},
{"pidfile", required_argument, 0, 'P'},
{"slab-growth-factor", required_argument, 0, 'f'},
{"slab-min-size", required_argument, 0, 'n'},
{"threads", required_argument, 0, 't'},
{"enable-largepages", no_argument, 0, 'L'},
{"max-reqs-per-event", required_argument, 0, 'R'},
{"disable-cas", no_argument, 0, 'C'},
{"listen-backlog", required_argument, 0, 'b'},
{"protocol", required_argument, 0, 'B'},
{"max-item-size", required_argument, 0, 'I'},
{"enable-sasl", no_argument, 0, 'S'},
{"disable-flush-all", no_argument, 0, 'F'},
{"disable-dumping", no_argument, 0, 'X'},
{"disable-watch", no_argument, 0, 'W'},
{"auth-file", required_argument, 0, 'Y'},
{"memory-file", required_argument, 0, 'e'},
{"extended", required_argument, 0, 'o'},
{0, 0, 0, 0}
};
int optindex;
while (-1 != (c = getopt_long(argc, argv, shortopts,
longopts, &optindex))) {
#else
while (-1 != (c = getopt(argc, argv, shortopts))) {
#endif
switch (c) {
case 'A':
/* enables "shutdown" command */
settings.shutdown_command = true;
break;
case 'Z':
/* enable secure communication*/
#ifdef TLS
settings.ssl_enabled = true;
#else
fprintf(stderr, "This server is not built with TLS support.\n");
exit(EX_USAGE);
#endif
break;
case 'a':
/* access for unix domain socket, as octal mask (like chmod)*/
settings.access= strtol(optarg,NULL,8);
break;
case 'U':
settings.udpport = atoi(optarg);
udp_specified = true;
break;
case 'p':
settings.port = atoi(optarg);
tcp_specified = true;
break;
case 's':
settings.socketpath = optarg;
break;
case 'm':
settings.maxbytes = ((size_t)atoi(optarg)) * 1024 * 1024;
break;
case 'M':
settings.evict_to_free = 0;
break;
case 'c':
settings.maxconns = atoi(optarg);
if (settings.maxconns <= 0) {
fprintf(stderr, "Maximum connections must be greater than 0\n");
return 1;
}
break;
case 'h':
usage();
exit(EXIT_SUCCESS);
case 'i':
usage_license();
exit(EXIT_SUCCESS);
case 'V':
printf(PACKAGE " " VERSION "\n");
exit(EXIT_SUCCESS);
case 'k':
lock_memory = true;
break;
case 'v':
settings.verbose++;
break;
case 'l':
if (settings.inter != NULL) {
if (strstr(settings.inter, optarg) != NULL) {
break;
}
size_t len = strlen(settings.inter) + strlen(optarg) + 2;
char *p = malloc(len);
if (p == NULL) {
fprintf(stderr, "Failed to allocate memory\n");
return 1;
}
snprintf(p, len, "%s,%s", settings.inter, optarg);
free(settings.inter);
settings.inter = p;
} else {
settings.inter= strdup(optarg);
}
break;
case 'd':
do_daemonize = true;
break;
case 'r':
maxcore = 1;
break;
case 'R':
settings.reqs_per_event = atoi(optarg);
if (settings.reqs_per_event == 0) {
fprintf(stderr, "Number of requests per event must be greater than 0\n");
return 1;
}
break;
case 'u':
username = optarg;
break;
case 'P':
pid_file = optarg;
break;
case 'e':
memory_file = optarg;
break;
case 'f':
settings.factor = atof(optarg);
if (settings.factor <= 1.0) {
fprintf(stderr, "Factor must be greater than 1\n");
return 1;
}
meta->slab_config = strdup(optarg);
break;
case 'n':
settings.chunk_size = atoi(optarg);
if (settings.chunk_size == 0) {
fprintf(stderr, "Chunk size must be greater than 0\n");
return 1;
}
break;
case 't':
settings.num_threads = atoi(optarg);
if (settings.num_threads <= 0) {
fprintf(stderr, "Number of threads must be greater than 0\n");
return 1;
}
/* There're other problems when you get above 64 threads.
* In the future we should portably detect # of cores for the
* default.
*/
if (settings.num_threads > 64) {
fprintf(stderr, "WARNING: Setting a high number of worker"
"threads is not recommended.\n"
" Set this value to the number of cores in"
" your machine or less.\n");
}
break;
case 'D':
if (! optarg || ! optarg[0]) {
fprintf(stderr, "No delimiter specified\n");
return 1;
}
settings.prefix_delimiter = optarg[0];
settings.detail_enabled = 1;
break;
case 'L' :
if (enable_large_pages() == 0) {
preallocate = true;
} else {
fprintf(stderr, "Cannot enable large pages on this system\n"
"(There is no support as of this version)\n");
return 1;
}
break;
case 'C' :
settings.use_cas = false;
break;
case 'b' :
settings.backlog = atoi(optarg);
break;
case 'B':
protocol_specified = true;
if (strcmp(optarg, "auto") == 0) {
settings.binding_protocol = negotiating_prot;
} else if (strcmp(optarg, "binary") == 0) {
settings.binding_protocol = binary_prot;
} else if (strcmp(optarg, "ascii") == 0) {
settings.binding_protocol = ascii_prot;
} else {
fprintf(stderr, "Invalid value for binding protocol: %s\n"
" -- should be one of auto, binary, or ascii\n", optarg);
exit(EX_USAGE);
}
break;
case 'I':
buf = strdup(optarg);
unit = buf[strlen(buf)-1];
if (unit == 'k' || unit == 'm' ||
unit == 'K' || unit == 'M') {
buf[strlen(buf)-1] = '\0';
size_max = atoi(buf);
if (unit == 'k' || unit == 'K')
size_max *= 1024;
if (unit == 'm' || unit == 'M')
size_max *= 1024 * 1024;
settings.item_size_max = size_max;
} else {
settings.item_size_max = atoi(buf);
}
free(buf);
break;
case 'S': /* set Sasl authentication to true. Default is false */
#ifndef ENABLE_SASL
fprintf(stderr, "This server is not built with SASL support.\n");
exit(EX_USAGE);
#endif
settings.sasl = true;
break;
case 'F' :
settings.flush_enabled = false;
break;
case 'X' :
settings.dump_enabled = false;
break;
case 'W' :
settings.watch_enabled = false;
break;
case 'Y' :
// dupe the file path now just in case the options get mangled.
settings.auth_file = strdup(optarg);
break;
case 'o': /* It's sub-opts time! */
subopts_orig = subopts = strdup(optarg); /* getsubopt() changes the original args */
while (*subopts != '\0') {
switch (getsubopt(&subopts, subopts_tokens, &subopts_value)) {
case MAXCONNS_FAST:
settings.maxconns_fast = true;
break;
case HASHPOWER_INIT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing numeric argument for hashpower\n");
return 1;
}
settings.hashpower_init = atoi(subopts_value);
if (settings.hashpower_init < 12) {
fprintf(stderr, "Initial hashtable multiplier of %d is too low\n",
settings.hashpower_init);
return 1;
} else if (settings.hashpower_init > 32) {
fprintf(stderr, "Initial hashtable multiplier of %d is too high\n"
"Choose a value based on \"STAT hash_power_level\" from a running instance\n",
settings.hashpower_init);
return 1;
}
break;
case NO_HASHEXPAND:
start_assoc_maint = false;
break;
case SLAB_REASSIGN:
settings.slab_reassign = true;
break;
case SLAB_AUTOMOVE:
if (subopts_value == NULL) {
settings.slab_automove = 1;
break;
}
settings.slab_automove = atoi(subopts_value);
if (settings.slab_automove < 0 || settings.slab_automove > 2) {
fprintf(stderr, "slab_automove must be between 0 and 2\n");
return 1;
}
break;
case SLAB_AUTOMOVE_RATIO:
if (subopts_value == NULL) {
fprintf(stderr, "Missing slab_automove_ratio argument\n");
return 1;
}
settings.slab_automove_ratio = atof(subopts_value);
if (settings.slab_automove_ratio <= 0 || settings.slab_automove_ratio > 1) {
fprintf(stderr, "slab_automove_ratio must be > 0 and < 1\n");
return 1;
}
break;
case SLAB_AUTOMOVE_WINDOW:
if (subopts_value == NULL) {
fprintf(stderr, "Missing slab_automove_window argument\n");
return 1;
}
settings.slab_automove_window = atoi(subopts_value);
if (settings.slab_automove_window < 3) {
fprintf(stderr, "slab_automove_window must be > 2\n");
return 1;
}
break;
case TAIL_REPAIR_TIME:
if (subopts_value == NULL) {
fprintf(stderr, "Missing numeric argument for tail_repair_time\n");
return 1;
}
settings.tail_repair_time = atoi(subopts_value);
if (settings.tail_repair_time < 10) {
fprintf(stderr, "Cannot set tail_repair_time to less than 10 seconds\n");
return 1;
}
break;
case HASH_ALGORITHM:
if (subopts_value == NULL) {
fprintf(stderr, "Missing hash_algorithm argument\n");
return 1;
};
if (strcmp(subopts_value, "jenkins") == 0) {
hash_type = JENKINS_HASH;
} else if (strcmp(subopts_value, "murmur3") == 0) {
hash_type = MURMUR3_HASH;
} else {
fprintf(stderr, "Unknown hash_algorithm option (jenkins, murmur3)\n");
return 1;
}
break;
case LRU_CRAWLER:
start_lru_crawler = true;
break;
case LRU_CRAWLER_SLEEP:
if (subopts_value == NULL) {
fprintf(stderr, "Missing lru_crawler_sleep value\n");
return 1;
}
settings.lru_crawler_sleep = atoi(subopts_value);
if (settings.lru_crawler_sleep > 1000000 || settings.lru_crawler_sleep < 0) {
fprintf(stderr, "LRU crawler sleep must be between 0 and 1 second\n");
return 1;
}
break;
case LRU_CRAWLER_TOCRAWL:
if (subopts_value == NULL) {
fprintf(stderr, "Missing lru_crawler_tocrawl value\n");
return 1;
}
if (!safe_strtoul(subopts_value, &tocrawl)) {
fprintf(stderr, "lru_crawler_tocrawl takes a numeric 32bit value\n");
return 1;
}
settings.lru_crawler_tocrawl = tocrawl;
break;
case LRU_MAINTAINER:
start_lru_maintainer = true;
settings.lru_segmented = true;
break;
case HOT_LRU_PCT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing hot_lru_pct argument\n");
return 1;
}
settings.hot_lru_pct = atoi(subopts_value);
if (settings.hot_lru_pct < 1 || settings.hot_lru_pct >= 80) {
fprintf(stderr, "hot_lru_pct must be > 1 and < 80\n");
return 1;
}
break;
case WARM_LRU_PCT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing warm_lru_pct argument\n");
return 1;
}
settings.warm_lru_pct = atoi(subopts_value);
if (settings.warm_lru_pct < 1 || settings.warm_lru_pct >= 80) {
fprintf(stderr, "warm_lru_pct must be > 1 and < 80\n");
return 1;
}
break;
case HOT_MAX_FACTOR:
if (subopts_value == NULL) {
fprintf(stderr, "Missing hot_max_factor argument\n");
return 1;
}
settings.hot_max_factor = atof(subopts_value);
if (settings.hot_max_factor <= 0) {
fprintf(stderr, "hot_max_factor must be > 0\n");
return 1;
}
break;
case WARM_MAX_FACTOR:
if (subopts_value == NULL) {
fprintf(stderr, "Missing warm_max_factor argument\n");
return 1;
}
settings.warm_max_factor = atof(subopts_value);
if (settings.warm_max_factor <= 0) {
fprintf(stderr, "warm_max_factor must be > 0\n");
return 1;
}
break;
case TEMPORARY_TTL:
if (subopts_value == NULL) {
fprintf(stderr, "Missing temporary_ttl argument\n");
return 1;
}
settings.temp_lru = true;
settings.temporary_ttl = atoi(subopts_value);
break;
case IDLE_TIMEOUT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing numeric argument for idle_timeout\n");
return 1;
}
settings.idle_timeout = atoi(subopts_value);
break;
case WATCHER_LOGBUF_SIZE:
if (subopts_value == NULL) {
fprintf(stderr, "Missing watcher_logbuf_size argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.logger_watcher_buf_size)) {
fprintf(stderr, "could not parse argument to watcher_logbuf_size\n");
return 1;
}
settings.logger_watcher_buf_size *= 1024; /* kilobytes */
break;
case WORKER_LOGBUF_SIZE:
if (subopts_value == NULL) {
fprintf(stderr, "Missing worker_logbuf_size argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.logger_buf_size)) {
fprintf(stderr, "could not parse argument to worker_logbuf_size\n");
return 1;
}
settings.logger_buf_size *= 1024; /* kilobytes */
case SLAB_SIZES:
slab_sizes_unparsed = strdup(subopts_value);
break;
case SLAB_CHUNK_MAX:
if (subopts_value == NULL) {
fprintf(stderr, "Missing slab_chunk_max argument\n");
}
if (!safe_strtol(subopts_value, &settings.slab_chunk_size_max)) {
fprintf(stderr, "could not parse argument to slab_chunk_max\n");
}
slab_chunk_size_changed = true;
break;
case TRACK_SIZES:
item_stats_sizes_init();
break;
case NO_INLINE_ASCII_RESP:
break;
case INLINE_ASCII_RESP:
break;
case NO_CHUNKED_ITEMS:
settings.slab_chunk_size_max = settings.slab_page_size;
break;
case NO_SLAB_REASSIGN:
settings.slab_reassign = false;
break;
case NO_SLAB_AUTOMOVE:
settings.slab_automove = 0;
break;
case NO_MAXCONNS_FAST:
settings.maxconns_fast = false;
break;
case NO_LRU_CRAWLER:
settings.lru_crawler = false;
start_lru_crawler = false;
break;
case NO_LRU_MAINTAINER:
start_lru_maintainer = false;
settings.lru_segmented = false;
break;
#ifdef TLS
case SSL_CERT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ssl_chain_cert argument\n");
return 1;
}
settings.ssl_chain_cert = strdup(subopts_value);
break;
case SSL_KEY:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ssl_key argument\n");
return 1;
}
settings.ssl_key = strdup(subopts_value);
break;
case SSL_VERIFY_MODE:
{
if (subopts_value == NULL) {
fprintf(stderr, "Missing ssl_verify_mode argument\n");
return 1;
}
int verify = 0;
if (!safe_strtol(subopts_value, &verify)) {
fprintf(stderr, "could not parse argument to ssl_verify_mode\n");
return 1;
}
switch(verify) {
case 0:
settings.ssl_verify_mode = SSL_VERIFY_NONE;
break;
case 1:
settings.ssl_verify_mode = SSL_VERIFY_PEER;
break;
case 2:
settings.ssl_verify_mode = SSL_VERIFY_PEER |
SSL_VERIFY_FAIL_IF_NO_PEER_CERT;
break;
case 3:
settings.ssl_verify_mode = SSL_VERIFY_PEER |
SSL_VERIFY_FAIL_IF_NO_PEER_CERT |
SSL_VERIFY_CLIENT_ONCE;
break;
default:
fprintf(stderr, "Invalid ssl_verify_mode. Use help to see valid options.\n");
return 1;
}
break;
}
case SSL_KEYFORM:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ssl_keyformat argument\n");
return 1;
}
if (!safe_strtol(subopts_value, &settings.ssl_keyformat)) {
fprintf(stderr, "could not parse argument to ssl_keyformat\n");
return 1;
}
break;
case SSL_CIPHERS:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ssl_ciphers argument\n");
return 1;
}
settings.ssl_ciphers = strdup(subopts_value);
break;
case SSL_CA_CERT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ssl_ca_cert argument\n");
return 1;
}
settings.ssl_ca_cert = strdup(subopts_value);
break;
case SSL_WBUF_SIZE:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ssl_wbuf_size argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.ssl_wbuf_size)) {
fprintf(stderr, "could not parse argument to ssl_wbuf_size\n");
return 1;
}
settings.ssl_wbuf_size *= 1024; /* kilobytes */
break;
#endif
#ifdef EXTSTORE
case EXT_PAGE_SIZE:
if (storage_file) {
fprintf(stderr, "Must specify ext_page_size before any ext_path arguments\n");
return 1;
}
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_page_size argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &ext_cf.page_size)) {
fprintf(stderr, "could not parse argument to ext_page_size\n");
return 1;
}
ext_cf.page_size *= 1024 * 1024; /* megabytes */
break;
case EXT_WBUF_SIZE:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_wbuf_size argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &ext_cf.wbuf_size)) {
fprintf(stderr, "could not parse argument to ext_wbuf_size\n");
return 1;
}
ext_cf.wbuf_size *= 1024 * 1024; /* megabytes */
settings.ext_wbuf_size = ext_cf.wbuf_size;
break;
case EXT_THREADS:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_threads argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &ext_cf.io_threadcount)) {
fprintf(stderr, "could not parse argument to ext_threads\n");
return 1;
}
break;
case EXT_IO_DEPTH:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_io_depth argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &ext_cf.io_depth)) {
fprintf(stderr, "could not parse argument to ext_io_depth\n");
return 1;
}
break;
case EXT_ITEM_SIZE:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_item_size argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.ext_item_size)) {
fprintf(stderr, "could not parse argument to ext_item_size\n");
return 1;
}
break;
case EXT_ITEM_AGE:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_item_age argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.ext_item_age)) {
fprintf(stderr, "could not parse argument to ext_item_age\n");
return 1;
}
break;
case EXT_LOW_TTL:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_low_ttl argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.ext_low_ttl)) {
fprintf(stderr, "could not parse argument to ext_low_ttl\n");
return 1;
}
break;
case EXT_RECACHE_RATE:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_recache_rate argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.ext_recache_rate)) {
fprintf(stderr, "could not parse argument to ext_recache_rate\n");
return 1;
}
break;
case EXT_COMPACT_UNDER:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_compact_under argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.ext_compact_under)) {
fprintf(stderr, "could not parse argument to ext_compact_under\n");
return 1;
}
break;
case EXT_DROP_UNDER:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_drop_under argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.ext_drop_under)) {
fprintf(stderr, "could not parse argument to ext_drop_under\n");
return 1;
}
break;
case EXT_MAX_FRAG:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_max_frag argument\n");
return 1;
}
if (!safe_strtod(subopts_value, &settings.ext_max_frag)) {
fprintf(stderr, "could not parse argument to ext_max_frag\n");
return 1;
}
break;
case SLAB_AUTOMOVE_FREERATIO:
if (subopts_value == NULL) {
fprintf(stderr, "Missing slab_automove_freeratio argument\n");
return 1;
}
if (!safe_strtod(subopts_value, &settings.slab_automove_freeratio)) {
fprintf(stderr, "could not parse argument to slab_automove_freeratio\n");
return 1;
}
break;
case EXT_DROP_UNREAD:
settings.ext_drop_unread = true;
break;
case EXT_PATH:
if (subopts_value) {
struct extstore_conf_file *tmp = storage_conf_parse(subopts_value, ext_cf.page_size);
if (tmp == NULL) {
fprintf(stderr, "failed to parse ext_path argument\n");
return 1;
}
if (storage_file != NULL) {
tmp->next = storage_file;
}
storage_file = tmp;
} else {
fprintf(stderr, "missing argument to ext_path, ie: ext_path=/d/file:5G\n");
return 1;
}
break;
#endif
case MODERN:
/* currently no new defaults */
break;
case NO_MODERN:
if (!slab_chunk_size_changed) {
settings.slab_chunk_size_max = settings.slab_page_size;
}
settings.slab_reassign = false;
settings.slab_automove = 0;
settings.maxconns_fast = false;
settings.lru_segmented = false;
hash_type = JENKINS_HASH;
start_lru_crawler = false;
start_lru_maintainer = false;
break;
case NO_DROP_PRIVILEGES:
settings.drop_privileges = false;
break;
case DROP_PRIVILEGES:
settings.drop_privileges = true;
break;
case RESP_OBJ_MEM_LIMIT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing resp_obj_mem_limit argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.resp_obj_mem_limit)) {
fprintf(stderr, "could not parse argument to resp_obj_mem_limit\n");
return 1;
}
settings.resp_obj_mem_limit *= 1024 * 1024; /* megabytes */
break;
case READ_BUF_MEM_LIMIT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing read_buf_mem_limit argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.read_buf_mem_limit)) {
fprintf(stderr, "could not parse argument to read_buf_mem_limit\n");
return 1;
}
settings.read_buf_mem_limit *= 1024 * 1024; /* megabytes */
break;
#ifdef MEMCACHED_DEBUG
case RELAXED_PRIVILEGES:
settings.relaxed_privileges = true;
break;
#endif
default:
printf("Illegal suboption \"%s\"\n", subopts_value);
return 1;
}
}
free(subopts_orig);
break;
default:
fprintf(stderr, "Illegal argument \"%c\"\n", c);
return 1;
}
}
if (settings.item_size_max < ITEM_SIZE_MAX_LOWER_LIMIT) {
fprintf(stderr, "Item max size cannot be less than 1024 bytes.\n");
exit(EX_USAGE);
}
if (settings.item_size_max > (settings.maxbytes / 2)) {
fprintf(stderr, "Cannot set item size limit higher than 1/2 of memory max.\n");
exit(EX_USAGE);
}
if (settings.item_size_max > (ITEM_SIZE_MAX_UPPER_LIMIT)) {
fprintf(stderr, "Cannot set item size limit higher than a gigabyte.\n");
exit(EX_USAGE);
}
if (settings.item_size_max > 1024 * 1024) {
if (!slab_chunk_size_changed) {
// Ideal new default is 16k, but needs stitching.
settings.slab_chunk_size_max = settings.slab_page_size / 2;
}
}
if (settings.slab_chunk_size_max > settings.item_size_max) {
fprintf(stderr, "slab_chunk_max (bytes: %d) cannot be larger than -I (item_size_max %d)\n",
settings.slab_chunk_size_max, settings.item_size_max);
exit(EX_USAGE);
}
if (settings.item_size_max % settings.slab_chunk_size_max != 0) {
fprintf(stderr, "-I (item_size_max: %d) must be evenly divisible by slab_chunk_max (bytes: %d)\n",
settings.item_size_max, settings.slab_chunk_size_max);
exit(EX_USAGE);
}
if (settings.slab_page_size % settings.slab_chunk_size_max != 0) {
fprintf(stderr, "slab_chunk_max (bytes: %d) must divide evenly into %d (slab_page_size)\n",
settings.slab_chunk_size_max, settings.slab_page_size);
exit(EX_USAGE);
}
#ifdef EXTSTORE
if (storage_file) {
if (settings.item_size_max > ext_cf.wbuf_size) {
fprintf(stderr, "-I (item_size_max: %d) cannot be larger than ext_wbuf_size: %d\n",
settings.item_size_max, ext_cf.wbuf_size);
exit(EX_USAGE);
}
if (settings.udpport) {
fprintf(stderr, "Cannot use UDP with extstore enabled (-U 0 to disable)\n");
exit(EX_USAGE);
}
}
#endif
// Reserve this for the new default. If factor size hasn't changed, use
// new default.
/*if (settings.slab_chunk_size_max == 16384 && settings.factor == 1.25) {
settings.factor = 1.08;
}*/
if (slab_sizes_unparsed != NULL) {
// want the unedited string for restart code.
char *temp = strdup(slab_sizes_unparsed);
if (_parse_slab_sizes(slab_sizes_unparsed, slab_sizes)) {
use_slab_sizes = true;
if (meta->slab_config) {
free(meta->slab_config);
}
meta->slab_config = temp;
} else {
exit(EX_USAGE);
}
} else if (!meta->slab_config) {
// using the default factor.
meta->slab_config = "1.25";
}
if (settings.hot_lru_pct + settings.warm_lru_pct > 80) {
fprintf(stderr, "hot_lru_pct + warm_lru_pct cannot be more than 80%% combined\n");
exit(EX_USAGE);
}
if (settings.temp_lru && !start_lru_maintainer) {
fprintf(stderr, "temporary_ttl requires lru_maintainer to be enabled\n");
exit(EX_USAGE);
}
if (hash_init(hash_type) != 0) {
fprintf(stderr, "Failed to initialize hash_algorithm!\n");
exit(EX_USAGE);
}
/*
* Use one workerthread to serve each UDP port if the user specified
* multiple ports
*/
if (settings.inter != NULL && strchr(settings.inter, ',')) {
settings.num_threads_per_udp = 1;
} else {
settings.num_threads_per_udp = settings.num_threads;
}
if (settings.sasl) {
if (!protocol_specified) {
settings.binding_protocol = binary_prot;
} else {
if (settings.binding_protocol != binary_prot) {
fprintf(stderr, "ERROR: You cannot allow the ASCII protocol while using SASL.\n");
exit(EX_USAGE);
}
}
if (settings.udpport) {
fprintf(stderr, "ERROR: Cannot enable UDP while using binary SASL authentication.\n");
exit(EX_USAGE);
}
}
if (settings.auth_file) {
if (!protocol_specified) {
settings.binding_protocol = ascii_prot;
} else {
if (settings.binding_protocol != ascii_prot) {
fprintf(stderr, "ERROR: You cannot allow the BINARY protocol while using ascii authentication tokens.\n");
exit(EX_USAGE);
}
}
}
if (udp_specified && settings.udpport != 0 && !tcp_specified) {
settings.port = settings.udpport;
}
#ifdef TLS
/*
* Setup SSL if enabled
*/
if (settings.ssl_enabled) {
if (!settings.port) {
fprintf(stderr, "ERROR: You cannot enable SSL without a TCP port.\n");
exit(EX_USAGE);
}
// openssl init methods.
SSL_load_error_strings();
SSLeay_add_ssl_algorithms();
// Initiate the SSL context.
ssl_init();
}
#endif
if (maxcore != 0) {
struct rlimit rlim_new;
/*
* First try raising to infinity; if that fails, try bringing
* the soft limit to the hard.
*/
if (getrlimit(RLIMIT_CORE, &rlim) == 0) {
rlim_new.rlim_cur = rlim_new.rlim_max = RLIM_INFINITY;
if (setrlimit(RLIMIT_CORE, &rlim_new)!= 0) {
/* failed. try raising just to the old max */
rlim_new.rlim_cur = rlim_new.rlim_max = rlim.rlim_max;
(void)setrlimit(RLIMIT_CORE, &rlim_new);
}
}
/*
* getrlimit again to see what we ended up with. Only fail if
* the soft limit ends up 0, because then no core files will be
* created at all.
*/
if ((getrlimit(RLIMIT_CORE, &rlim) != 0) || rlim.rlim_cur == 0) {
fprintf(stderr, "failed to ensure corefile creation\n");
exit(EX_OSERR);
}
}
/*
* If needed, increase rlimits to allow as many connections
* as needed.
*/
if (getrlimit(RLIMIT_NOFILE, &rlim) != 0) {
fprintf(stderr, "failed to getrlimit number of files\n");
exit(EX_OSERR);
} else {
rlim.rlim_cur = settings.maxconns;
rlim.rlim_max = settings.maxconns;
if (setrlimit(RLIMIT_NOFILE, &rlim) != 0) {
fprintf(stderr, "failed to set rlimit for open files. Try starting as root or requesting smaller maxconns value.\n");
exit(EX_OSERR);
}
}
/* lose root privileges if we have them */
if (getuid() == 0 || geteuid() == 0) {
if (username == 0 || *username == '\0') {
fprintf(stderr, "can't run as root without the -u switch\n");
exit(EX_USAGE);
}
if ((pw = getpwnam(username)) == 0) {
fprintf(stderr, "can't find the user %s to switch to\n", username);
exit(EX_NOUSER);
}
if (setgroups(0, NULL) < 0) {
/* setgroups may fail with EPERM, indicating we are already in a
* minimally-privileged state. In that case we continue. For all
* other failure codes we exit.
*
* Note that errno is stored here because fprintf may change it.
*/
bool should_exit = errno != EPERM;
fprintf(stderr, "failed to drop supplementary groups: %s\n",
strerror(errno));
if (should_exit) {
exit(EX_OSERR);
}
}
if (setgid(pw->pw_gid) < 0 || setuid(pw->pw_uid) < 0) {
fprintf(stderr, "failed to assume identity of user %s\n", username);
exit(EX_OSERR);
}
}
/* Initialize Sasl if -S was specified */
if (settings.sasl) {
init_sasl();
}
/* daemonize if requested */
/* if we want to ensure our ability to dump core, don't chdir to / */
if (do_daemonize) {
if (sigignore(SIGHUP) == -1) {
perror("Failed to ignore SIGHUP");
}
if (daemonize(maxcore, settings.verbose) == -1) {
fprintf(stderr, "failed to daemon() in order to daemonize\n");
exit(EXIT_FAILURE);
}
}
/* lock paged memory if needed */
if (lock_memory) {
#ifdef HAVE_MLOCKALL
int res = mlockall(MCL_CURRENT | MCL_FUTURE);
if (res != 0) {
fprintf(stderr, "warning: -k invalid, mlockall() failed: %s\n",
strerror(errno));
}
#else
fprintf(stderr, "warning: -k invalid, mlockall() not supported on this platform. proceeding without.\n");
#endif
}
/* initialize main thread libevent instance */
#if defined(LIBEVENT_VERSION_NUMBER) && LIBEVENT_VERSION_NUMBER >= 0x02000101
/* If libevent version is larger/equal to 2.0.2-alpha, use newer version */
struct event_config *ev_config;
ev_config = event_config_new();
event_config_set_flag(ev_config, EVENT_BASE_FLAG_NOLOCK);
main_base = event_base_new_with_config(ev_config);
event_config_free(ev_config);
#else
/* Otherwise, use older API */
main_base = event_init();
#endif
/* Load initial auth file if required */
if (settings.auth_file) {
if (settings.udpport) {
fprintf(stderr, "Cannot use UDP with ascii authentication enabled (-U 0 to disable)\n");
exit(EX_USAGE);
}
switch (authfile_load(settings.auth_file)) {
case AUTHFILE_MISSING: // fall through.
case AUTHFILE_OPENFAIL:
vperror("Could not open authfile [%s] for reading", settings.auth_file);
exit(EXIT_FAILURE);
break;
case AUTHFILE_OOM:
fprintf(stderr, "Out of memory reading password file: %s", settings.auth_file);
exit(EXIT_FAILURE);
break;
case AUTHFILE_MALFORMED:
fprintf(stderr, "Authfile [%s] has a malformed entry. Should be 'user:password'", settings.auth_file);
exit(EXIT_FAILURE);
break;
case AUTHFILE_OK:
break;
}
}
/* initialize other stuff */
stats_init();
logger_init();
conn_init();
bool reuse_mem = false;
void *mem_base = NULL;
bool prefill = false;
if (memory_file != NULL) {
preallocate = true;
// Easier to manage memory if we prefill the global pool when reusing.
prefill = true;
restart_register("main", _mc_meta_load_cb, _mc_meta_save_cb, meta);
reuse_mem = restart_mmap_open(settings.maxbytes,
memory_file,
&mem_base);
// The "save" callback gets called when we're closing out the mmap,
// but we don't know what the mmap_base is until after we call open.
// So we pass the struct above but have to fill it in here so the
// data's available during the save routine.
meta->mmap_base = mem_base;
// Also, the callbacks for load() run before _open returns, so we
// should have the old base in 'meta' as of here.
}
// Initialize the hash table _after_ checking restart metadata.
// We override the hash table start argument with what was live
// previously, to avoid filling a huge set of items into a tiny hash
// table.
assoc_init(settings.hashpower_init);
#ifdef EXTSTORE
if (storage_file && reuse_mem) {
fprintf(stderr, "[restart] memory restart with extstore not presently supported.\n");
reuse_mem = false;
}
#endif
slabs_init(settings.maxbytes, settings.factor, preallocate,
use_slab_sizes ? slab_sizes : NULL, mem_base, reuse_mem);
#ifdef EXTSTORE
if (storage_file) {
enum extstore_res eres;
if (settings.ext_compact_under == 0) {
// If changing the default fraction (4), change the help text as well.
settings.ext_compact_under = storage_file->page_count / 4;
/* Only rescues non-COLD items if below this threshold */
settings.ext_drop_under = storage_file->page_count / 4;
}
crc32c_init();
/* Init free chunks to zero. */
for (int x = 0; x < MAX_NUMBER_OF_SLAB_CLASSES; x++) {
settings.ext_free_memchunks[x] = 0;
}
storage = extstore_init(storage_file, &ext_cf, &eres);
if (storage == NULL) {
fprintf(stderr, "Failed to initialize external storage: %s\n",
extstore_err(eres));
if (eres == EXTSTORE_INIT_OPEN_FAIL) {
perror("extstore open");
}
exit(EXIT_FAILURE);
}
ext_storage = storage;
/* page mover algorithm for extstore needs memory prefilled */
prefill = true;
}
#endif
if (settings.drop_privileges) {
setup_privilege_violations_handler();
}
if (prefill)
slabs_prefill_global();
/* In restartable mode and we've decided to issue a fixup on memory */
if (memory_file != NULL && reuse_mem) {
mc_ptr_t old_base = meta->old_base;
assert(old_base == meta->old_base);
// should've pulled in process_started from meta file.
process_started = meta->process_started;
// TODO: must be a more canonical way of serializing/deserializing
// pointers? passing through uint64_t should work, and we're not
// annotating the pointer with anything, but it's still slightly
// insane.
restart_fixup((void *)old_base);
}
/*
* ignore SIGPIPE signals; we can use errno == EPIPE if we
* need that information
*/
if (sigignore(SIGPIPE) == -1) {
perror("failed to ignore SIGPIPE; sigaction");
exit(EX_OSERR);
}
/* start up worker threads if MT mode */
#ifdef EXTSTORE
slabs_set_storage(storage);
memcached_thread_init(settings.num_threads, storage);
init_lru_crawler(storage);
#else
memcached_thread_init(settings.num_threads, NULL);
init_lru_crawler(NULL);
#endif
if (start_assoc_maint && start_assoc_maintenance_thread() == -1) {
exit(EXIT_FAILURE);
}
if (start_lru_crawler && start_item_crawler_thread() != 0) {
fprintf(stderr, "Failed to enable LRU crawler thread\n");
exit(EXIT_FAILURE);
}
#ifdef EXTSTORE
if (storage && start_storage_compact_thread(storage) != 0) {
fprintf(stderr, "Failed to start storage compaction thread\n");
exit(EXIT_FAILURE);
}
if (storage && start_storage_write_thread(storage) != 0) {
fprintf(stderr, "Failed to start storage writer thread\n");
exit(EXIT_FAILURE);
}
if (start_lru_maintainer && start_lru_maintainer_thread(storage) != 0) {
#else
if (start_lru_maintainer && start_lru_maintainer_thread(NULL) != 0) {
#endif
fprintf(stderr, "Failed to enable LRU maintainer thread\n");
free(meta);
return 1;
}
if (settings.slab_reassign &&
start_slab_maintenance_thread() == -1) {
exit(EXIT_FAILURE);
}
if (settings.idle_timeout && start_conn_timeout_thread() == -1) {
exit(EXIT_FAILURE);
}
/* initialise clock event */
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
{
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
monotonic = true;
monotonic_start = ts.tv_sec;
// Monotonic clock needs special handling for restarts.
// We get a start time at an arbitrary place, so we need to
// restore the original time delta, which is always "now" - _start
if (reuse_mem) {
// the running timespan at stop time + the time we think we
// were stopped.
monotonic_start -= meta->current_time + meta->time_delta;
} else {
monotonic_start -= ITEM_UPDATE_INTERVAL + 2;
}
}
}
#endif
clock_handler(0, 0, 0);
/* create unix mode sockets after dropping privileges */
if (settings.socketpath != NULL) {
errno = 0;
if (server_socket_unix(settings.socketpath,settings.access)) {
vperror("failed to listen on UNIX socket: %s", settings.socketpath);
exit(EX_OSERR);
}
}
/* create the listening socket, bind it, and init */
if (settings.socketpath == NULL) {
const char *portnumber_filename = getenv("MEMCACHED_PORT_FILENAME");
char *temp_portnumber_filename = NULL;
size_t len;
FILE *portnumber_file = NULL;
if (portnumber_filename != NULL) {
len = strlen(portnumber_filename)+4+1;
temp_portnumber_filename = malloc(len);
snprintf(temp_portnumber_filename,
len,
"%s.lck", portnumber_filename);
portnumber_file = fopen(temp_portnumber_filename, "a");
if (portnumber_file == NULL) {
fprintf(stderr, "Failed to open \"%s\": %s\n",
temp_portnumber_filename, strerror(errno));
}
}
errno = 0;
if (settings.port && server_sockets(settings.port, tcp_transport,
portnumber_file)) {
vperror("failed to listen on TCP port %d", settings.port);
exit(EX_OSERR);
}
/*
* initialization order: first create the listening sockets
* (may need root on low ports), then drop root if needed,
* then daemonize if needed, then init libevent (in some cases
* descriptors created by libevent wouldn't survive forking).
*/
/* create the UDP listening socket and bind it */
errno = 0;
if (settings.udpport && server_sockets(settings.udpport, udp_transport,
portnumber_file)) {
vperror("failed to listen on UDP port %d", settings.udpport);
exit(EX_OSERR);
}
if (portnumber_file) {
fclose(portnumber_file);
rename(temp_portnumber_filename, portnumber_filename);
}
if (temp_portnumber_filename)
free(temp_portnumber_filename);
}
/* Give the sockets a moment to open. I know this is dumb, but the error
* is only an advisory.
*/
usleep(1000);
if (stats_state.curr_conns + stats_state.reserved_fds >= settings.maxconns - 1) {
fprintf(stderr, "Maxconns setting is too low, use -c to increase.\n");
exit(EXIT_FAILURE);
}
if (pid_file != NULL) {
save_pid(pid_file);
}
/* Drop privileges no longer needed */
if (settings.drop_privileges) {
drop_privileges();
}
/* Initialize the uriencode lookup table. */
uriencode_init();
/* enter the event loop */
while (!stop_main_loop) {
if (event_base_loop(main_base, EVLOOP_ONCE) != 0) {
retval = EXIT_FAILURE;
break;
}
}
fprintf(stderr, "Gracefully stopping\n");
stop_threads();
int i;
// FIXME: make a function callable from threads.c
for (i = 0; i < max_fds; i++) {
if (conns[i] && conns[i]->state != conn_closed) {
conn_close(conns[i]);
}
}
if (memory_file != NULL) {
restart_mmap_close();
}
/* remove the PID file if we're a daemon */
if (do_daemonize)
remove_pidfile(pid_file);
/* Clean up strdup() call for bind() address */
if (settings.inter)
free(settings.inter);
/* cleanup base */
event_base_free(main_base);
free(meta);
return retval;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_3888_0 |
crossvul-cpp_data_good_3927_0 | /*!
* \file LoRaMac.c
*
* \brief LoRa MAC layer implementation
*
* \copyright Revised BSD License, see section \ref LICENSE.
*
* \code
* ______ _
* / _____) _ | |
* ( (____ _____ ____ _| |_ _____ ____| |__
* \____ \| ___ | (_ _) ___ |/ ___) _ \
* _____) ) ____| | | || |_| ____( (___| | | |
* (______/|_____)_|_|_| \__)_____)\____)_| |_|
* (C)2013-2017 Semtech
*
* ___ _____ _ ___ _ _____ ___ ___ ___ ___
* / __|_ _/_\ / __| |/ / __/ _ \| _ \/ __| __|
* \__ \ | |/ _ \ (__| ' <| _| (_) | / (__| _|
* |___/ |_/_/ \_\___|_|\_\_| \___/|_|_\\___|___|
* embedded.connectivity.solutions===============
*
* \endcode
*
* \author Miguel Luis ( Semtech )
*
* \author Gregory Cristian ( Semtech )
*
* \author Daniel Jaeckle ( STACKFORCE )
*
* \author Johannes Bruder ( STACKFORCE )
*/
#include "utilities.h"
#include "region/Region.h"
#include "LoRaMacClassB.h"
#include "LoRaMacCrypto.h"
#include "secure-element.h"
#include "LoRaMacTest.h"
#include "LoRaMacTypes.h"
#include "LoRaMacConfirmQueue.h"
#include "LoRaMacHeaderTypes.h"
#include "LoRaMacMessageTypes.h"
#include "LoRaMacParser.h"
#include "LoRaMacCommands.h"
#include "LoRaMacAdr.h"
#include "LoRaMac.h"
#ifndef LORAMAC_VERSION
/*!
* LORaWAN version definition.
*/
#define LORAMAC_VERSION 0x01000300
#endif
/*!
* Maximum PHY layer payload size
*/
#define LORAMAC_PHY_MAXPAYLOAD 255
/*!
* Maximum MAC commands buffer size
*/
#define LORA_MAC_COMMAND_MAX_LENGTH 128
/*!
* Maximum length of the fOpts field
*/
#define LORA_MAC_COMMAND_MAX_FOPTS_LENGTH 15
/*!
* LoRaMac duty cycle for the back-off procedure during the first hour.
*/
#define BACKOFF_DC_1_HOUR 100
/*!
* LoRaMac duty cycle for the back-off procedure during the next 10 hours.
*/
#define BACKOFF_DC_10_HOURS 1000
/*!
* LoRaMac duty cycle for the back-off procedure during the next 24 hours.
*/
#define BACKOFF_DC_24_HOURS 10000
/*!
* LoRaMac internal states
*/
enum eLoRaMacState
{
LORAMAC_IDLE = 0x00000000,
LORAMAC_STOPPED = 0x00000001,
LORAMAC_TX_RUNNING = 0x00000002,
LORAMAC_RX = 0x00000004,
LORAMAC_ACK_RETRY = 0x00000010,
LORAMAC_TX_DELAYED = 0x00000020,
LORAMAC_TX_CONFIG = 0x00000040,
LORAMAC_RX_ABORT = 0x00000080,
};
/*
* Request permission state
*/
typedef enum eLoRaMacRequestHandling
{
LORAMAC_REQUEST_HANDLING_OFF = 0,
LORAMAC_REQUEST_HANDLING_ON = !LORAMAC_REQUEST_HANDLING_OFF
}LoRaMacRequestHandling_t;
typedef struct sLoRaMacNvmCtx
{
/*
* LoRaMac region.
*/
LoRaMacRegion_t Region;
/*
* LoRaMac default parameters
*/
LoRaMacParams_t MacParamsDefaults;
/*
* Network ID ( 3 bytes )
*/
uint32_t NetID;
/*
* Mote Address
*/
uint32_t DevAddr;
/*!
* Multicast channel list
*/
MulticastCtx_t MulticastChannelList[LORAMAC_MAX_MC_CTX];
/*
* Actual device class
*/
DeviceClass_t DeviceClass;
/*
* Indicates if the node is connected to
* a private or public network
*/
bool PublicNetwork;
/*
* LoRaMac ADR control status
*/
bool AdrCtrlOn;
/*
* Counts the number of missed ADR acknowledgements
*/
uint32_t AdrAckCounter;
/*
* LoRaMac parameters
*/
LoRaMacParams_t MacParams;
/*
* Maximum duty cycle
* \remark Possibility to shutdown the device.
*/
uint8_t MaxDCycle;
/*
* Enables/Disables duty cycle management (Test only)
*/
bool DutyCycleOn;
/*
* Current channel index
*/
uint8_t LastTxChannel;
/*
* Buffer containing the MAC layer commands
*/
uint8_t MacCommandsBuffer[LORA_MAC_COMMAND_MAX_LENGTH];
/*
* If the server has sent a FRAME_TYPE_DATA_CONFIRMED_DOWN this variable indicates
* if the ACK bit must be set for the next transmission
*/
bool SrvAckRequested;
/*
* Aggregated duty cycle management
*/
uint16_t AggregatedDCycle;
/*
* Aggregated duty cycle management
*/
TimerTime_t LastTxDoneTime;
TimerTime_t AggregatedTimeOff;
/*
* Stores the time at LoRaMac initialization.
*
* \remark Used for the BACKOFF_DC computation.
*/
SysTime_t InitializationTime;
/*
* Current LoRaWAN Version
*/
Version_t Version;
/*
* End-Device network activation
*/
ActivationType_t NetworkActivation;
/*!
* Last received Message integrity Code (MIC)
*/
uint32_t LastRxMic;
}LoRaMacNvmCtx_t;
typedef struct sLoRaMacCtx
{
/*
* Length of packet in PktBuffer
*/
uint16_t PktBufferLen;
/*
* Buffer containing the data to be sent or received.
*/
uint8_t PktBuffer[LORAMAC_PHY_MAXPAYLOAD];
/*!
* Current processed transmit message
*/
LoRaMacMessage_t TxMsg;
/*!
* Buffer containing the data received by the application.
*/
uint8_t AppData[LORAMAC_PHY_MAXPAYLOAD];
/*
* Size of buffer containing the application data.
*/
uint8_t AppDataSize;
/*
* Buffer containing the upper layer data.
*/
uint8_t RxPayload[LORAMAC_PHY_MAXPAYLOAD];
SysTime_t LastTxSysTime;
/*
* LoRaMac internal state
*/
uint32_t MacState;
/*
* LoRaMac upper layer event functions
*/
LoRaMacPrimitives_t* MacPrimitives;
/*
* LoRaMac upper layer callback functions
*/
LoRaMacCallback_t* MacCallbacks;
/*
* Radio events function pointer
*/
RadioEvents_t RadioEvents;
/*
* LoRaMac duty cycle delayed Tx timer
*/
TimerEvent_t TxDelayedTimer;
/*
* LoRaMac reception windows timers
*/
TimerEvent_t RxWindowTimer1;
TimerEvent_t RxWindowTimer2;
/*
* LoRaMac reception windows delay
* \remark normal frame: RxWindowXDelay = ReceiveDelayX - RADIO_WAKEUP_TIME
* join frame : RxWindowXDelay = JoinAcceptDelayX - RADIO_WAKEUP_TIME
*/
uint32_t RxWindow1Delay;
uint32_t RxWindow2Delay;
/*
* LoRaMac Rx windows configuration
*/
RxConfigParams_t RxWindow1Config;
RxConfigParams_t RxWindow2Config;
RxConfigParams_t RxWindowCConfig;
/*
* Limit of uplinks without any donwlink response before the ADRACKReq bit will be set.
*/
uint16_t AdrAckLimit;
/*
* Limit of uplinks without any donwlink response after a the first frame with set ADRACKReq bit
* before the trying to regain the connectivity.
*/
uint16_t AdrAckDelay;
/*
* Acknowledge timeout timer. Used for packet retransmissions.
*/
TimerEvent_t AckTimeoutTimer;
/*
* Uplink messages repetitions counter
*/
uint8_t ChannelsNbTransCounter;
/*
* Number of trials to get a frame acknowledged
*/
uint8_t AckTimeoutRetries;
/*
* Number of trials to get a frame acknowledged
*/
uint8_t AckTimeoutRetriesCounter;
/*
* Indicates if the AckTimeout timer has expired or not
*/
bool AckTimeoutRetry;
/*
* If the node has sent a FRAME_TYPE_DATA_CONFIRMED_UP this variable indicates
* if the nodes needs to manage the server acknowledgement.
*/
bool NodeAckRequested;
/*
* Current channel index
*/
uint8_t Channel;
/*
* Last transmission time on air
*/
TimerTime_t TxTimeOnAir;
/*
* Structure to hold an MCPS indication data.
*/
McpsIndication_t McpsIndication;
/*
* Structure to hold MCPS confirm data.
*/
McpsConfirm_t McpsConfirm;
/*
* Structure to hold MLME confirm data.
*/
MlmeConfirm_t MlmeConfirm;
/*
* Structure to hold MLME indication data.
*/
MlmeIndication_t MlmeIndication;
/*
* Holds the current rx window slot
*/
LoRaMacRxSlot_t RxSlot;
/*
* LoRaMac tx/rx operation state
*/
LoRaMacFlags_t MacFlags;
/*
* Data structure indicating if a request is allowed or not.
*/
LoRaMacRequestHandling_t AllowRequests;
/*
* Non-volatile module context structure
*/
LoRaMacNvmCtx_t* NvmCtx;
}LoRaMacCtx_t;
/*
* Module context.
*/
static LoRaMacCtx_t MacCtx;
/*
* Non-volatile module context.
*/
static LoRaMacNvmCtx_t NvmMacCtx;
/*
* List of module contexts.
*/
LoRaMacCtxs_t Contexts;
/*!
* Defines the LoRaMac radio events status
*/
typedef union uLoRaMacRadioEvents
{
uint32_t Value;
struct sEvents
{
uint32_t RxTimeout : 1;
uint32_t RxError : 1;
uint32_t TxTimeout : 1;
uint32_t RxDone : 1;
uint32_t TxDone : 1;
}Events;
}LoRaMacRadioEvents_t;
/*!
* LoRaMac radio events status
*/
LoRaMacRadioEvents_t LoRaMacRadioEvents = { .Value = 0 };
/*!
* \brief Function to be executed on Radio Tx Done event
*/
static void OnRadioTxDone( void );
/*!
* \brief This function prepares the MAC to abort the execution of function
* OnRadioRxDone in case of a reception error.
*/
static void PrepareRxDoneAbort( void );
/*!
* \brief Function to be executed on Radio Rx Done event
*/
static void OnRadioRxDone( uint8_t* payload, uint16_t size, int16_t rssi, int8_t snr );
/*!
* \brief Function executed on Radio Tx Timeout event
*/
static void OnRadioTxTimeout( void );
/*!
* \brief Function executed on Radio Rx error event
*/
static void OnRadioRxError( void );
/*!
* \brief Function executed on Radio Rx Timeout event
*/
static void OnRadioRxTimeout( void );
/*!
* \brief Function executed on duty cycle delayed Tx timer event
*/
static void OnTxDelayedTimerEvent( void* context );
/*!
* \brief Function executed on first Rx window timer event
*/
static void OnRxWindow1TimerEvent( void* context );
/*!
* \brief Function executed on second Rx window timer event
*/
static void OnRxWindow2TimerEvent( void* context );
/*!
* \brief Function executed on AckTimeout timer event
*/
static void OnAckTimeoutTimerEvent( void* context );
/*!
* \brief Configures the events to trigger an MLME-Indication with
* a MLME type of MLME_SCHEDULE_UPLINK.
*/
static void SetMlmeScheduleUplinkIndication( void );
/*!
* Computes next 32 bit downlink counter value and determines the frame counter ID.
*
* \param[IN] addrID - Address identifier
* \param[IN] fType - Frame type
* \param[IN] macMsg - Data message object, holding the current 16 bit transmitted frame counter
* \param[IN] lrWanVersion - LoRaWAN version
* \param[IN] maxFCntGap - Maximum allowed frame counter difference (only for 1.0.X necessary)
* \param[OUT] fCntID - Frame counter identifier
* \param[OUT] currentDown - Current downlink counter value
*
* \retval - Status of the operation
*/
static LoRaMacCryptoStatus_t GetFCntDown( AddressIdentifier_t addrID, FType_t fType, LoRaMacMessageData_t* macMsg, Version_t lrWanVersion,
uint16_t maxFCntGap, FCntIdentifier_t* fCntID, uint32_t* currentDown );
/*!
* \brief Switches the device class
*
* \param [IN] deviceClass Device class to switch to
*/
static LoRaMacStatus_t SwitchClass( DeviceClass_t deviceClass );
/*!
* \brief Gets the maximum application payload length in the absence of the optional FOpt field.
*
* \param [IN] datarate Current datarate
*
* \retval Max length
*/
static uint8_t GetMaxAppPayloadWithoutFOptsLength( int8_t datarate );
/*!
* \brief Validates if the payload fits into the frame, taking the datarate
* into account.
*
* \details Refer to chapter 4.3.2 of the LoRaWAN specification, v1.0
*
* \param lenN Length of the application payload. The length depends on the
* datarate and is region specific
*
* \param datarate Current datarate
*
* \param fOptsLen Length of the fOpts field
*
* \retval [false: payload does not fit into the frame, true: payload fits into
* the frame]
*/
static bool ValidatePayloadLength( uint8_t lenN, int8_t datarate, uint8_t fOptsLen );
/*!
* \brief Decodes MAC commands in the fOpts field and in the payload
*
* \param [IN] payload A pointer to the payload
* \param [IN] macIndex The index of the payload where the MAC commands start
* \param [IN] commandsSize The size of the MAC commands
* \param [IN] snr The SNR value of the frame
* \param [IN] rxSlot The RX slot where the frame was received
*/
static void ProcessMacCommands( uint8_t* payload, uint8_t macIndex, uint8_t commandsSize, int8_t snr, LoRaMacRxSlot_t rxSlot );
/*!
* \brief LoRaMAC layer generic send frame
*
* \param [IN] macHdr MAC header field
* \param [IN] fPort MAC payload port
* \param [IN] fBuffer MAC data buffer to be sent
* \param [IN] fBufferSize MAC data buffer size
* \retval status Status of the operation.
*/
LoRaMacStatus_t Send( LoRaMacHeader_t* macHdr, uint8_t fPort, void* fBuffer, uint16_t fBufferSize );
/*!
* \brief LoRaMAC layer send join/rejoin request
*
* \param [IN] joinReqType Type of join-request or rejoin
*
* \retval status Status of the operation.
*/
LoRaMacStatus_t SendReJoinReq( JoinReqIdentifier_t joinReqType );
/*!
* \brief LoRaMAC layer frame buffer initialization
*
* \param [IN] macHdr MAC header field
* \param [IN] fCtrl MAC frame control field
* \param [IN] fOpts MAC commands buffer
* \param [IN] fPort MAC payload port
* \param [IN] fBuffer MAC data buffer to be sent
* \param [IN] fBufferSize MAC data buffer size
* \retval status Status of the operation.
*/
LoRaMacStatus_t PrepareFrame( LoRaMacHeader_t* macHdr, LoRaMacFrameCtrl_t* fCtrl, uint8_t fPort, void* fBuffer, uint16_t fBufferSize );
/*
* \brief Schedules the frame according to the duty cycle
*
* \param [IN] allowDelayedTx When set to true, the a frame will be delayed,
* the duty cycle restriction is active
* \retval Status of the operation
*/
static LoRaMacStatus_t ScheduleTx( bool allowDelayedTx );
/*
* \brief Secures the current processed frame ( TxMsg )
* \param[IN] txDr Data rate used for the transmission
* \param[IN] txCh Index of the channel used for the transmission
* \retval status Status of the operation
*/
static LoRaMacStatus_t SecureFrame( uint8_t txDr, uint8_t txCh );
/*
* \brief Calculates the back-off time for the band of a channel.
*
* \param [IN] channel The last Tx channel index
*/
static void CalculateBackOff( uint8_t channel );
/*
* \brief Function to remove pending MAC commands
*
* \param [IN] rxSlot The RX slot on which the frame was received
* \param [IN] fCtrl The frame control field of the received frame
* \param [IN] request The request type
*/
static void RemoveMacCommands( LoRaMacRxSlot_t rxSlot, LoRaMacFrameCtrl_t fCtrl, Mcps_t request );
/*!
* \brief LoRaMAC layer prepared frame buffer transmission with channel specification
*
* \remark PrepareFrame must be called at least once before calling this
* function.
*
* \param [IN] channel Channel to transmit on
* \retval status Status of the operation.
*/
LoRaMacStatus_t SendFrameOnChannel( uint8_t channel );
/*!
* \brief Sets the radio in continuous transmission mode
*
* \remark Uses the radio parameters set on the previous transmission.
*
* \param [IN] timeout Time in seconds while the radio is kept in continuous wave mode
* \retval status Status of the operation.
*/
LoRaMacStatus_t SetTxContinuousWave( uint16_t timeout );
/*!
* \brief Sets the radio in continuous transmission mode
*
* \remark Uses the radio parameters set on the previous transmission.
*
* \param [IN] timeout Time in seconds while the radio is kept in continuous wave mode
* \param [IN] frequency RF frequency to be set.
* \param [IN] power RF output power to be set.
* \retval status Status of the operation.
*/
LoRaMacStatus_t SetTxContinuousWave1( uint16_t timeout, uint32_t frequency, uint8_t power );
/*!
* \brief Resets MAC specific parameters to default
*/
static void ResetMacParameters( void );
/*!
* \brief Initializes and opens the reception window
*
* \param [IN] rxTimer Window timer to be topped.
* \param [IN] rxConfig Window parameters to be setup
*/
static void RxWindowSetup( TimerEvent_t* rxTimer, RxConfigParams_t* rxConfig );
/*!
* \brief Opens up a continuous RX C window. This is used for
* class c devices.
*/
static void OpenContinuousRxCWindow( void );
/*!
* \brief Returns a pointer to the internal contexts structure.
*
* \retval void Points to a structure containing all contexts
*/
LoRaMacCtxs_t* GetCtxs( void );
/*!
* \brief Restoring of internal module contexts
*
* \details This function allows to restore module contexts by a given pointer.
*
*
* \retval LoRaMacStatus_t Status of the operation. Possible returns are:
* returns are:
* \ref LORAMAC_STATUS_OK,
* \ref LORAMAC_STATUS_PARAMETER_INVALID,
*/
LoRaMacStatus_t RestoreCtxs( LoRaMacCtxs_t* contexts );
/*!
* \brief Determines the frame type
*
* \param [IN] macMsg Data message object
*
* \param [OUT] fType Frame type
*
* \retval LoRaMacStatus_t Status of the operation. Possible returns are:
* returns are:
* \ref LORAMAC_STATUS_OK,
* \ref LORAMAC_STATUS_PARAMETER_INVALID,
*/
LoRaMacStatus_t DetermineFrameType( LoRaMacMessageData_t* macMsg, FType_t* fType );
/*!
* \brief Checks if the retransmission should be stopped in case of a unconfirmed uplink
*
* \retval Returns true if it should be stopped.
*/
static bool CheckRetransUnconfirmedUplink( void );
/*!
* \brief Checks if the retransmission should be stopped in case of a confirmed uplink
*
* \retval Returns true it should be stopped.
*/
static bool CheckRetransConfirmedUplink( void );
/*!
* \brief Stops the uplink retransmission
*
* \retval Returns true if successful.
*/
static bool StopRetransmission( void );
/*!
* \brief Handles the ACK retries algorithm.
* Increments the re-tries counter up until the specified number of
* trials or the allowed maximum. Decrease the uplink datarate every 2
* trials.
*/
static void AckTimeoutRetriesProcess( void );
/*!
* \brief Finalizes the ACK retries algorithm.
* If no ACK is received restores the default channels
*/
static void AckTimeoutRetriesFinalize( void );
/*!
* \brief Calls the callback to indicate that a context changed
*/
static void CallNvmCtxCallback( LoRaMacNvmCtxModule_t module );
/*!
* \brief MAC NVM Context has been changed
*/
static void EventMacNvmCtxChanged( void );
/*!
* \brief Region NVM Context has been changed
*/
static void EventRegionNvmCtxChanged( void );
/*!
* \brief Crypto NVM Context has been changed
*/
static void EventCryptoNvmCtxChanged( void );
/*!
* \brief Secure Element NVM Context has been changed
*/
static void EventSecureElementNvmCtxChanged( void );
/*!
* \brief MAC commands module nvm context has been changed
*/
static void EventCommandsNvmCtxChanged( void );
/*!
* \brief Class B module nvm context has been changed
*/
static void EventClassBNvmCtxChanged( void );
/*!
* \brief Confirm Queue module nvm context has been changed
*/
static void EventConfirmQueueNvmCtxChanged( void );
/*!
* \brief Verifies if a request is pending currently
*
*\retval 1: Request pending, 0: request not pending
*/
static uint8_t IsRequestPending( void );
/*!
* \brief Enabled the possibility to perform requests
*
* \param [IN] requestState Request permission state
*/
static void LoRaMacEnableRequests( LoRaMacRequestHandling_t requestState );
/*!
* \brief This function verifies if a RX abort occurred
*/
static void LoRaMacCheckForRxAbort( void );
/*!
* \brief This function verifies if a beacon acquisition MLME
* request was pending
*
* \retval 1: Request pending, 0: no request pending
*/
static uint8_t LoRaMacCheckForBeaconAcquisition( void );
/*!
* \brief This function handles join request
*/
static void LoRaMacHandleMlmeRequest( void );
/*!
* \brief This function handles mcps request
*/
static void LoRaMacHandleMcpsRequest( void );
/*!
* \brief This function handles callback events for requests
*/
static void LoRaMacHandleRequestEvents( void );
/*!
* \brief This function handles callback events for indications
*/
static void LoRaMacHandleIndicationEvents( void );
/*!
* Structure used to store the radio Tx event data
*/
struct
{
TimerTime_t CurTime;
}TxDoneParams;
/*!
* Structure used to store the radio Rx event data
*/
struct
{
TimerTime_t LastRxDone;
uint8_t *Payload;
uint16_t Size;
int16_t Rssi;
int8_t Snr;
}RxDoneParams;
static void OnRadioTxDone( void )
{
TxDoneParams.CurTime = TimerGetCurrentTime( );
MacCtx.LastTxSysTime = SysTimeGet( );
LoRaMacRadioEvents.Events.TxDone = 1;
if( ( MacCtx.MacCallbacks != NULL ) && ( MacCtx.MacCallbacks->MacProcessNotify != NULL ) )
{
MacCtx.MacCallbacks->MacProcessNotify( );
}
}
static void OnRadioRxDone( uint8_t *payload, uint16_t size, int16_t rssi, int8_t snr )
{
RxDoneParams.LastRxDone = TimerGetCurrentTime( );
RxDoneParams.Payload = payload;
RxDoneParams.Size = size;
RxDoneParams.Rssi = rssi;
RxDoneParams.Snr = snr;
LoRaMacRadioEvents.Events.RxDone = 1;
if( ( MacCtx.MacCallbacks != NULL ) && ( MacCtx.MacCallbacks->MacProcessNotify != NULL ) )
{
MacCtx.MacCallbacks->MacProcessNotify( );
}
}
static void OnRadioTxTimeout( void )
{
LoRaMacRadioEvents.Events.TxTimeout = 1;
if( ( MacCtx.MacCallbacks != NULL ) && ( MacCtx.MacCallbacks->MacProcessNotify != NULL ) )
{
MacCtx.MacCallbacks->MacProcessNotify( );
}
}
static void OnRadioRxError( void )
{
LoRaMacRadioEvents.Events.RxError = 1;
if( ( MacCtx.MacCallbacks != NULL ) && ( MacCtx.MacCallbacks->MacProcessNotify != NULL ) )
{
MacCtx.MacCallbacks->MacProcessNotify( );
}
}
static void OnRadioRxTimeout( void )
{
LoRaMacRadioEvents.Events.RxTimeout = 1;
if( ( MacCtx.MacCallbacks != NULL ) && ( MacCtx.MacCallbacks->MacProcessNotify != NULL ) )
{
MacCtx.MacCallbacks->MacProcessNotify( );
}
}
static void UpdateRxSlotIdleState( void )
{
if( MacCtx.NvmCtx->DeviceClass != CLASS_C )
{
MacCtx.RxSlot = RX_SLOT_NONE;
}
else
{
MacCtx.RxSlot = RX_SLOT_WIN_CLASS_C;
}
}
static void ProcessRadioTxDone( void )
{
GetPhyParams_t getPhy;
PhyParam_t phyParam;
SetBandTxDoneParams_t txDone;
if( MacCtx.NvmCtx->DeviceClass != CLASS_C )
{
Radio.Sleep( );
}
// Setup timers
TimerSetValue( &MacCtx.RxWindowTimer1, MacCtx.RxWindow1Delay );
TimerStart( &MacCtx.RxWindowTimer1 );
TimerSetValue( &MacCtx.RxWindowTimer2, MacCtx.RxWindow2Delay );
TimerStart( &MacCtx.RxWindowTimer2 );
if( ( MacCtx.NvmCtx->DeviceClass == CLASS_C ) || ( MacCtx.NodeAckRequested == true ) )
{
getPhy.Attribute = PHY_ACK_TIMEOUT;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
TimerSetValue( &MacCtx.AckTimeoutTimer, MacCtx.RxWindow2Delay + phyParam.Value );
TimerStart( &MacCtx.AckTimeoutTimer );
}
// Store last Tx channel
MacCtx.NvmCtx->LastTxChannel = MacCtx.Channel;
// Update last tx done time for the current channel
txDone.Channel = MacCtx.Channel;
if( MacCtx.NvmCtx->NetworkActivation == ACTIVATION_TYPE_NONE )
{
txDone.Joined = false;
}
else
{
txDone.Joined = true;
}
txDone.LastTxDoneTime = TxDoneParams.CurTime;
RegionSetBandTxDone( MacCtx.NvmCtx->Region, &txDone );
// Update Aggregated last tx done time
MacCtx.NvmCtx->LastTxDoneTime = TxDoneParams.CurTime;
if( MacCtx.NodeAckRequested == false )
{
MacCtx.McpsConfirm.Status = LORAMAC_EVENT_INFO_STATUS_OK;
}
}
static void PrepareRxDoneAbort( void )
{
MacCtx.MacState |= LORAMAC_RX_ABORT;
if( MacCtx.NodeAckRequested == true )
{
OnAckTimeoutTimerEvent( NULL );
}
MacCtx.MacFlags.Bits.McpsInd = 1;
MacCtx.MacFlags.Bits.MacDone = 1;
UpdateRxSlotIdleState( );
}
static void ProcessRadioRxDone( void )
{
LoRaMacHeader_t macHdr;
ApplyCFListParams_t applyCFList;
GetPhyParams_t getPhy;
PhyParam_t phyParam;
LoRaMacCryptoStatus_t macCryptoStatus = LORAMAC_CRYPTO_ERROR;
LoRaMacMessageData_t macMsgData;
LoRaMacMessageJoinAccept_t macMsgJoinAccept;
uint8_t *payload = RxDoneParams.Payload;
uint16_t size = RxDoneParams.Size;
int16_t rssi = RxDoneParams.Rssi;
int8_t snr = RxDoneParams.Snr;
uint8_t pktHeaderLen = 0;
uint32_t downLinkCounter = 0;
uint32_t address = MacCtx.NvmCtx->DevAddr;
uint8_t multicast = 0;
AddressIdentifier_t addrID = UNICAST_DEV_ADDR;
FCntIdentifier_t fCntID;
MacCtx.McpsConfirm.AckReceived = false;
MacCtx.McpsIndication.Rssi = rssi;
MacCtx.McpsIndication.Snr = snr;
MacCtx.McpsIndication.RxSlot = MacCtx.RxSlot;
MacCtx.McpsIndication.Port = 0;
MacCtx.McpsIndication.Multicast = 0;
MacCtx.McpsIndication.FramePending = 0;
MacCtx.McpsIndication.Buffer = NULL;
MacCtx.McpsIndication.BufferSize = 0;
MacCtx.McpsIndication.RxData = false;
MacCtx.McpsIndication.AckReceived = false;
MacCtx.McpsIndication.DownLinkCounter = 0;
MacCtx.McpsIndication.McpsIndication = MCPS_UNCONFIRMED;
MacCtx.McpsIndication.DevAddress = 0;
MacCtx.McpsIndication.DeviceTimeAnsReceived = false;
Radio.Sleep( );
TimerStop( &MacCtx.RxWindowTimer2 );
// This function must be called even if we are not in class b mode yet.
if( LoRaMacClassBRxBeacon( payload, size ) == true )
{
MacCtx.MlmeIndication.BeaconInfo.Rssi = rssi;
MacCtx.MlmeIndication.BeaconInfo.Snr = snr;
return;
}
// Check if we expect a ping or a multicast slot.
if( MacCtx.NvmCtx->DeviceClass == CLASS_B )
{
if( LoRaMacClassBIsPingExpected( ) == true )
{
LoRaMacClassBSetPingSlotState( PINGSLOT_STATE_CALC_PING_OFFSET );
LoRaMacClassBPingSlotTimerEvent( NULL );
MacCtx.McpsIndication.RxSlot = RX_SLOT_WIN_CLASS_B_PING_SLOT;
}
else if( LoRaMacClassBIsMulticastExpected( ) == true )
{
LoRaMacClassBSetMulticastSlotState( PINGSLOT_STATE_CALC_PING_OFFSET );
LoRaMacClassBMulticastSlotTimerEvent( NULL );
MacCtx.McpsIndication.RxSlot = RX_SLOT_WIN_CLASS_B_MULTICAST_SLOT;
}
}
macHdr.Value = payload[pktHeaderLen++];
switch( macHdr.Bits.MType )
{
case FRAME_TYPE_JOIN_ACCEPT:
// Check if the received frame size is valid
if( size < LORAMAC_JOIN_ACCEPT_FRAME_MIN_SIZE )
{
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_ERROR;
PrepareRxDoneAbort( );
return;
}
macMsgJoinAccept.Buffer = payload;
macMsgJoinAccept.BufSize = size;
// Abort in case if the device isn't joined yet and no rejoin request is ongoing.
if( MacCtx.NvmCtx->NetworkActivation != ACTIVATION_TYPE_NONE )
{
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_ERROR;
PrepareRxDoneAbort( );
return;
}
macCryptoStatus = LoRaMacCryptoHandleJoinAccept( JOIN_REQ, SecureElementGetJoinEui( ), &macMsgJoinAccept );
if( LORAMAC_CRYPTO_SUCCESS == macCryptoStatus )
{
// Network ID
MacCtx.NvmCtx->NetID = ( uint32_t ) macMsgJoinAccept.NetID[0];
MacCtx.NvmCtx->NetID |= ( ( uint32_t ) macMsgJoinAccept.NetID[1] << 8 );
MacCtx.NvmCtx->NetID |= ( ( uint32_t ) macMsgJoinAccept.NetID[2] << 16 );
// Device Address
MacCtx.NvmCtx->DevAddr = macMsgJoinAccept.DevAddr;
// DLSettings
MacCtx.NvmCtx->MacParams.Rx1DrOffset = macMsgJoinAccept.DLSettings.Bits.RX1DRoffset;
MacCtx.NvmCtx->MacParams.Rx2Channel.Datarate = macMsgJoinAccept.DLSettings.Bits.RX2DataRate;
MacCtx.NvmCtx->MacParams.RxCChannel.Datarate = macMsgJoinAccept.DLSettings.Bits.RX2DataRate;
// RxDelay
MacCtx.NvmCtx->MacParams.ReceiveDelay1 = macMsgJoinAccept.RxDelay;
if( MacCtx.NvmCtx->MacParams.ReceiveDelay1 == 0 )
{
MacCtx.NvmCtx->MacParams.ReceiveDelay1 = 1;
}
MacCtx.NvmCtx->MacParams.ReceiveDelay1 *= 1000;
MacCtx.NvmCtx->MacParams.ReceiveDelay2 = MacCtx.NvmCtx->MacParams.ReceiveDelay1 + 1000;
MacCtx.NvmCtx->Version.Fields.Minor = 0;
// Apply CF list
applyCFList.Payload = macMsgJoinAccept.CFList;
// Size of the regular payload is 12. Plus 1 byte MHDR and 4 bytes MIC
applyCFList.Size = size - 17;
RegionApplyCFList( MacCtx.NvmCtx->Region, &applyCFList );
MacCtx.NvmCtx->NetworkActivation = ACTIVATION_TYPE_OTAA;
// MLME handling
if( LoRaMacConfirmQueueIsCmdActive( MLME_JOIN ) == true )
{
LoRaMacConfirmQueueSetStatus( LORAMAC_EVENT_INFO_STATUS_OK, MLME_JOIN );
}
}
else
{
// MLME handling
if( LoRaMacConfirmQueueIsCmdActive( MLME_JOIN ) == true )
{
LoRaMacConfirmQueueSetStatus( LORAMAC_EVENT_INFO_STATUS_JOIN_FAIL, MLME_JOIN );
}
}
break;
case FRAME_TYPE_DATA_CONFIRMED_DOWN:
MacCtx.McpsIndication.McpsIndication = MCPS_CONFIRMED;
// Intentional fall through
case FRAME_TYPE_DATA_UNCONFIRMED_DOWN:
// Check if the received payload size is valid
getPhy.UplinkDwellTime = MacCtx.NvmCtx->MacParams.DownlinkDwellTime;
getPhy.Datarate = MacCtx.McpsIndication.RxDatarate;
getPhy.Attribute = PHY_MAX_PAYLOAD;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
if( ( MAX( 0, ( int16_t )( ( int16_t ) size - ( int16_t ) LORAMAC_FRAME_PAYLOAD_OVERHEAD_SIZE ) ) > ( int16_t )phyParam.Value ) ||
( size < LORAMAC_FRAME_PAYLOAD_MIN_SIZE ) )
{
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_ERROR;
PrepareRxDoneAbort( );
return;
}
macMsgData.Buffer = payload;
macMsgData.BufSize = size;
macMsgData.FRMPayload = MacCtx.RxPayload;
macMsgData.FRMPayloadSize = LORAMAC_PHY_MAXPAYLOAD;
if( LORAMAC_PARSER_SUCCESS != LoRaMacParserData( &macMsgData ) )
{
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_ERROR;
PrepareRxDoneAbort( );
return;
}
// Store device address
MacCtx.McpsIndication.DevAddress = macMsgData.FHDR.DevAddr;
FType_t fType;
if( LORAMAC_STATUS_OK != DetermineFrameType( &macMsgData, &fType ) )
{
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_ERROR;
PrepareRxDoneAbort( );
return;
}
//Check if it is a multicast message
multicast = 0;
downLinkCounter = 0;
for( uint8_t i = 0; i < LORAMAC_MAX_MC_CTX; i++ )
{
if( ( MacCtx.NvmCtx->MulticastChannelList[i].ChannelParams.Address == macMsgData.FHDR.DevAddr ) &&
( MacCtx.NvmCtx->MulticastChannelList[i].ChannelParams.IsEnabled == true ) )
{
multicast = 1;
addrID = MacCtx.NvmCtx->MulticastChannelList[i].ChannelParams.GroupID;
downLinkCounter = *( MacCtx.NvmCtx->MulticastChannelList[i].DownLinkCounter );
address = MacCtx.NvmCtx->MulticastChannelList[i].ChannelParams.Address;
if( MacCtx.NvmCtx->DeviceClass == CLASS_C )
{
MacCtx.McpsIndication.RxSlot = RX_SLOT_WIN_CLASS_C_MULTICAST;
}
break;
}
}
// Filter messages according to multicast downlink exceptions
if( ( multicast == 1 ) && ( ( fType != FRAME_TYPE_D ) ||
( macMsgData.FHDR.FCtrl.Bits.Ack == true ) ||
( macMsgData.FHDR.FCtrl.Bits.AdrAckReq == true ) ) )
{
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_ERROR;
PrepareRxDoneAbort( );
return;
}
// Get maximum allowed counter difference
getPhy.Attribute = PHY_MAX_FCNT_GAP;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
// Get downlink frame counter value
macCryptoStatus = GetFCntDown( addrID, fType, &macMsgData, MacCtx.NvmCtx->Version, phyParam.Value, &fCntID, &downLinkCounter );
if( macCryptoStatus != LORAMAC_CRYPTO_SUCCESS )
{
if( macCryptoStatus == LORAMAC_CRYPTO_FAIL_FCNT_DUPLICATED )
{
// Catch the case of repeated downlink frame counter
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_DOWNLINK_REPEATED;
if( ( MacCtx.NvmCtx->Version.Fields.Minor == 0 ) && ( macHdr.Bits.MType == FRAME_TYPE_DATA_CONFIRMED_DOWN ) && ( MacCtx.NvmCtx->LastRxMic == macMsgData.MIC ) )
{
MacCtx.NvmCtx->SrvAckRequested = true;
}
}
else if( macCryptoStatus == LORAMAC_CRYPTO_FAIL_MAX_GAP_FCNT )
{
// Lost too many frames
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_DOWNLINK_TOO_MANY_FRAMES_LOSS;
}
else
{
// Other errors
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_ERROR;
}
MacCtx.McpsIndication.DownLinkCounter = downLinkCounter;
PrepareRxDoneAbort( );
return;
}
macCryptoStatus = LoRaMacCryptoUnsecureMessage( addrID, address, fCntID, downLinkCounter, &macMsgData );
if( macCryptoStatus != LORAMAC_CRYPTO_SUCCESS )
{
if( macCryptoStatus == LORAMAC_CRYPTO_FAIL_ADDRESS )
{
// We are not the destination of this frame.
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_ADDRESS_FAIL;
}
else
{
// MIC calculation fail
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_MIC_FAIL;
}
PrepareRxDoneAbort( );
return;
}
// Frame is valid
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_OK;
MacCtx.McpsIndication.Multicast = multicast;
MacCtx.McpsIndication.FramePending = macMsgData.FHDR.FCtrl.Bits.FPending;
MacCtx.McpsIndication.Buffer = NULL;
MacCtx.McpsIndication.BufferSize = 0;
MacCtx.McpsIndication.DownLinkCounter = downLinkCounter;
MacCtx.McpsIndication.AckReceived = macMsgData.FHDR.FCtrl.Bits.Ack;
MacCtx.McpsConfirm.Status = LORAMAC_EVENT_INFO_STATUS_OK;
MacCtx.McpsConfirm.AckReceived = macMsgData.FHDR.FCtrl.Bits.Ack;
// Reset ADR ACK Counter only, when RX1 or RX2 slot
if( ( MacCtx.McpsIndication.RxSlot == RX_SLOT_WIN_1 ) ||
( MacCtx.McpsIndication.RxSlot == RX_SLOT_WIN_2 ) )
{
MacCtx.NvmCtx->AdrAckCounter = 0;
}
// MCPS Indication and ack requested handling
if( multicast == 1 )
{
MacCtx.McpsIndication.McpsIndication = MCPS_MULTICAST;
}
else
{
if( macHdr.Bits.MType == FRAME_TYPE_DATA_CONFIRMED_DOWN )
{
MacCtx.NvmCtx->SrvAckRequested = true;
if( MacCtx.NvmCtx->Version.Fields.Minor == 0 )
{
MacCtx.NvmCtx->LastRxMic = macMsgData.MIC;
}
MacCtx.McpsIndication.McpsIndication = MCPS_CONFIRMED;
}
else
{
MacCtx.NvmCtx->SrvAckRequested = false;
MacCtx.McpsIndication.McpsIndication = MCPS_UNCONFIRMED;
}
}
RemoveMacCommands( MacCtx.McpsIndication.RxSlot, macMsgData.FHDR.FCtrl, MacCtx.McpsConfirm.McpsRequest );
switch( fType )
{
case FRAME_TYPE_A:
{ /* +----------+------+-------+--------------+
* | FOptsLen | Fopt | FPort | FRMPayload |
* +----------+------+-------+--------------+
* | > 0 | X | > 0 | X |
* +----------+------+-------+--------------+
*/
// Decode MAC commands in FOpts field
ProcessMacCommands( macMsgData.FHDR.FOpts, 0, macMsgData.FHDR.FCtrl.Bits.FOptsLen, snr, MacCtx.McpsIndication.RxSlot );
MacCtx.McpsIndication.Port = macMsgData.FPort;
MacCtx.McpsIndication.Buffer = macMsgData.FRMPayload;
MacCtx.McpsIndication.BufferSize = macMsgData.FRMPayloadSize;
MacCtx.McpsIndication.RxData = true;
break;
}
case FRAME_TYPE_B:
{ /* +----------+------+-------+--------------+
* | FOptsLen | Fopt | FPort | FRMPayload |
* +----------+------+-------+--------------+
* | > 0 | X | - | - |
* +----------+------+-------+--------------+
*/
// Decode MAC commands in FOpts field
ProcessMacCommands( macMsgData.FHDR.FOpts, 0, macMsgData.FHDR.FCtrl.Bits.FOptsLen, snr, MacCtx.McpsIndication.RxSlot );
MacCtx.McpsIndication.Port = macMsgData.FPort;
break;
}
case FRAME_TYPE_C:
{ /* +----------+------+-------+--------------+
* | FOptsLen | Fopt | FPort | FRMPayload |
* +----------+------+-------+--------------+
* | = 0 | - | = 0 | MAC commands |
* +----------+------+-------+--------------+
*/
// Decode MAC commands in FRMPayload
ProcessMacCommands( macMsgData.FRMPayload, 0, macMsgData.FRMPayloadSize, snr, MacCtx.McpsIndication.RxSlot );
MacCtx.McpsIndication.Port = macMsgData.FPort;
break;
}
case FRAME_TYPE_D:
{ /* +----------+------+-------+--------------+
* | FOptsLen | Fopt | FPort | FRMPayload |
* +----------+------+-------+--------------+
* | = 0 | - | > 0 | X |
* +----------+------+-------+--------------+
*/
// No MAC commands just application payload
MacCtx.McpsIndication.Port = macMsgData.FPort;
MacCtx.McpsIndication.Buffer = macMsgData.FRMPayload;
MacCtx.McpsIndication.BufferSize = macMsgData.FRMPayloadSize;
MacCtx.McpsIndication.RxData = true;
break;
}
default:
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_ERROR;
PrepareRxDoneAbort( );
break;
}
// Provide always an indication, skip the callback to the user application,
// in case of a confirmed downlink retransmission.
MacCtx.MacFlags.Bits.McpsInd = 1;
break;
case FRAME_TYPE_PROPRIETARY:
memcpy1( MacCtx.RxPayload, &payload[pktHeaderLen], size - pktHeaderLen );
MacCtx.McpsIndication.McpsIndication = MCPS_PROPRIETARY;
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_OK;
MacCtx.McpsIndication.Buffer = MacCtx.RxPayload;
MacCtx.McpsIndication.BufferSize = size - pktHeaderLen;
MacCtx.MacFlags.Bits.McpsInd = 1;
break;
default:
MacCtx.McpsIndication.Status = LORAMAC_EVENT_INFO_STATUS_ERROR;
PrepareRxDoneAbort( );
break;
}
// Verify if we need to disable the AckTimeoutTimer
if( MacCtx.NodeAckRequested == true )
{
if( MacCtx.McpsConfirm.AckReceived == true )
{
OnAckTimeoutTimerEvent( NULL );
}
}
else
{
if( MacCtx.NvmCtx->DeviceClass == CLASS_C )
{
OnAckTimeoutTimerEvent( NULL );
}
}
MacCtx.MacFlags.Bits.MacDone = 1;
UpdateRxSlotIdleState( );
}
static void ProcessRadioTxTimeout( void )
{
if( MacCtx.NvmCtx->DeviceClass != CLASS_C )
{
Radio.Sleep( );
}
UpdateRxSlotIdleState( );
MacCtx.McpsConfirm.Status = LORAMAC_EVENT_INFO_STATUS_TX_TIMEOUT;
LoRaMacConfirmQueueSetStatusCmn( LORAMAC_EVENT_INFO_STATUS_TX_TIMEOUT );
if( MacCtx.NodeAckRequested == true )
{
MacCtx.AckTimeoutRetry = true;
}
MacCtx.MacFlags.Bits.MacDone = 1;
}
static void HandleRadioRxErrorTimeout( LoRaMacEventInfoStatus_t rx1EventInfoStatus, LoRaMacEventInfoStatus_t rx2EventInfoStatus )
{
bool classBRx = false;
if( MacCtx.NvmCtx->DeviceClass != CLASS_C )
{
Radio.Sleep( );
}
if( LoRaMacClassBIsBeaconExpected( ) == true )
{
LoRaMacClassBSetBeaconState( BEACON_STATE_TIMEOUT );
LoRaMacClassBBeaconTimerEvent( NULL );
classBRx = true;
}
if( MacCtx.NvmCtx->DeviceClass == CLASS_B )
{
if( LoRaMacClassBIsPingExpected( ) == true )
{
LoRaMacClassBSetPingSlotState( PINGSLOT_STATE_CALC_PING_OFFSET );
LoRaMacClassBPingSlotTimerEvent( NULL );
classBRx = true;
}
if( LoRaMacClassBIsMulticastExpected( ) == true )
{
LoRaMacClassBSetMulticastSlotState( PINGSLOT_STATE_CALC_PING_OFFSET );
LoRaMacClassBMulticastSlotTimerEvent( NULL );
classBRx = true;
}
}
if( classBRx == false )
{
if( MacCtx.RxSlot == RX_SLOT_WIN_1 )
{
if( MacCtx.NodeAckRequested == true )
{
MacCtx.McpsConfirm.Status = rx1EventInfoStatus;
}
LoRaMacConfirmQueueSetStatusCmn( rx1EventInfoStatus );
if( TimerGetElapsedTime( MacCtx.NvmCtx->LastTxDoneTime ) >= MacCtx.RxWindow2Delay )
{
TimerStop( &MacCtx.RxWindowTimer2 );
MacCtx.MacFlags.Bits.MacDone = 1;
}
}
else
{
if( MacCtx.NodeAckRequested == true )
{
MacCtx.McpsConfirm.Status = rx2EventInfoStatus;
}
LoRaMacConfirmQueueSetStatusCmn( rx2EventInfoStatus );
if( MacCtx.NvmCtx->DeviceClass != CLASS_C )
{
MacCtx.MacFlags.Bits.MacDone = 1;
}
}
}
UpdateRxSlotIdleState( );
}
static void ProcessRadioRxError( void )
{
HandleRadioRxErrorTimeout( LORAMAC_EVENT_INFO_STATUS_RX1_ERROR, LORAMAC_EVENT_INFO_STATUS_RX2_ERROR );
}
static void ProcessRadioRxTimeout( void )
{
HandleRadioRxErrorTimeout( LORAMAC_EVENT_INFO_STATUS_RX1_TIMEOUT, LORAMAC_EVENT_INFO_STATUS_RX2_TIMEOUT );
}
static void LoRaMacHandleIrqEvents( void )
{
LoRaMacRadioEvents_t events;
CRITICAL_SECTION_BEGIN( );
events = LoRaMacRadioEvents;
LoRaMacRadioEvents.Value = 0;
CRITICAL_SECTION_END( );
if( events.Value != 0 )
{
if( events.Events.TxDone == 1 )
{
ProcessRadioTxDone( );
}
if( events.Events.RxDone == 1 )
{
ProcessRadioRxDone( );
}
if( events.Events.TxTimeout == 1 )
{
ProcessRadioTxTimeout( );
}
if( events.Events.RxError == 1 )
{
ProcessRadioRxError( );
}
if( events.Events.RxTimeout == 1 )
{
ProcessRadioRxTimeout( );
}
}
}
bool LoRaMacIsBusy( void )
{
if( ( MacCtx.MacState == LORAMAC_IDLE ) &&
( MacCtx.AllowRequests == LORAMAC_REQUEST_HANDLING_ON ) )
{
return false;
}
return true;
}
static void LoRaMacEnableRequests( LoRaMacRequestHandling_t requestState )
{
MacCtx.AllowRequests = requestState;
}
static void LoRaMacHandleRequestEvents( void )
{
// Handle events
LoRaMacFlags_t reqEvents = MacCtx.MacFlags;
if( MacCtx.MacState == LORAMAC_IDLE )
{
// Update event bits
if( MacCtx.MacFlags.Bits.McpsReq == 1 )
{
MacCtx.MacFlags.Bits.McpsReq = 0;
}
if( MacCtx.MacFlags.Bits.MlmeReq == 1 )
{
MacCtx.MacFlags.Bits.MlmeReq = 0;
}
// Allow requests again
LoRaMacEnableRequests( LORAMAC_REQUEST_HANDLING_ON );
// Handle callbacks
if( reqEvents.Bits.McpsReq == 1 )
{
MacCtx.MacPrimitives->MacMcpsConfirm( &MacCtx.McpsConfirm );
}
if( reqEvents.Bits.MlmeReq == 1 )
{
LoRaMacConfirmQueueHandleCb( &MacCtx.MlmeConfirm );
if( LoRaMacConfirmQueueGetCnt( ) > 0 )
{
MacCtx.MacFlags.Bits.MlmeReq = 1;
}
}
// Start beaconing again
LoRaMacClassBResumeBeaconing( );
// Procedure done. Reset variables.
MacCtx.MacFlags.Bits.MacDone = 0;
}
}
static void LoRaMacHandleScheduleUplinkEvent( void )
{
// Handle events
if( MacCtx.MacState == LORAMAC_IDLE )
{
// Verify if sticky MAC commands are pending or not
bool isStickyMacCommandPending = false;
LoRaMacCommandsStickyCmdsPending( &isStickyMacCommandPending );
if( isStickyMacCommandPending == true )
{// Setup MLME indication
SetMlmeScheduleUplinkIndication( );
}
}
}
static void LoRaMacHandleIndicationEvents( void )
{
// Handle MLME indication
if( MacCtx.MacFlags.Bits.MlmeInd == 1 )
{
MacCtx.MacFlags.Bits.MlmeInd = 0;
MacCtx.MacPrimitives->MacMlmeIndication( &MacCtx.MlmeIndication );
}
if( MacCtx.MacFlags.Bits.MlmeSchedUplinkInd == 1 )
{
MlmeIndication_t schduleUplinkIndication;
schduleUplinkIndication.MlmeIndication = MLME_SCHEDULE_UPLINK;
schduleUplinkIndication.Status = LORAMAC_EVENT_INFO_STATUS_OK;
MacCtx.MacPrimitives->MacMlmeIndication( &schduleUplinkIndication );
MacCtx.MacFlags.Bits.MlmeSchedUplinkInd = 0;
}
// Handle MCPS indication
if( MacCtx.MacFlags.Bits.McpsInd == 1 )
{
MacCtx.MacFlags.Bits.McpsInd = 0;
MacCtx.MacPrimitives->MacMcpsIndication( &MacCtx.McpsIndication );
}
}
static void LoRaMacHandleMcpsRequest( void )
{
// Handle MCPS uplinks
if( MacCtx.MacFlags.Bits.McpsReq == 1 )
{
bool stopRetransmission = false;
bool waitForRetransmission = false;
if( ( MacCtx.McpsConfirm.McpsRequest == MCPS_UNCONFIRMED ) ||
( MacCtx.McpsConfirm.McpsRequest == MCPS_PROPRIETARY ) )
{
stopRetransmission = CheckRetransUnconfirmedUplink( );
}
else if( MacCtx.McpsConfirm.McpsRequest == MCPS_CONFIRMED )
{
if( MacCtx.AckTimeoutRetry == true )
{
stopRetransmission = CheckRetransConfirmedUplink( );
if( MacCtx.NvmCtx->Version.Fields.Minor == 0 )
{
if( stopRetransmission == false )
{
AckTimeoutRetriesProcess( );
}
else
{
AckTimeoutRetriesFinalize( );
}
}
}
else
{
waitForRetransmission = true;
}
}
if( stopRetransmission == true )
{// Stop retransmission
TimerStop( &MacCtx.TxDelayedTimer );
MacCtx.MacState &= ~LORAMAC_TX_DELAYED;
StopRetransmission( );
}
else if( waitForRetransmission == false )
{// Arrange further retransmission
MacCtx.MacFlags.Bits.MacDone = 0;
// Reset the state of the AckTimeout
MacCtx.AckTimeoutRetry = false;
// Sends the same frame again
OnTxDelayedTimerEvent( NULL );
}
}
}
static void LoRaMacHandleMlmeRequest( void )
{
// Handle join request
if( MacCtx.MacFlags.Bits.MlmeReq == 1 )
{
if( ( LoRaMacConfirmQueueIsCmdActive( MLME_JOIN ) == true ) )
{
if( LoRaMacConfirmQueueGetStatus( MLME_JOIN ) == LORAMAC_EVENT_INFO_STATUS_OK )
{// Node joined successfully
MacCtx.ChannelsNbTransCounter = 0;
}
MacCtx.MacState &= ~LORAMAC_TX_RUNNING;
}
else if( ( LoRaMacConfirmQueueIsCmdActive( MLME_TXCW ) == true ) ||
( LoRaMacConfirmQueueIsCmdActive( MLME_TXCW_1 ) == true ) )
{
MacCtx.MacState &= ~LORAMAC_TX_RUNNING;
}
}
}
static uint8_t LoRaMacCheckForBeaconAcquisition( void )
{
if( ( LoRaMacConfirmQueueIsCmdActive( MLME_BEACON_ACQUISITION ) == true ) &&
( MacCtx.MacFlags.Bits.McpsReq == 0 ) )
{
if( MacCtx.MacFlags.Bits.MlmeReq == 1 )
{
MacCtx.MacState &= ~LORAMAC_TX_RUNNING;
return 0x01;
}
}
return 0x00;
}
static void LoRaMacCheckForRxAbort( void )
{
// A error occurs during receiving
if( ( MacCtx.MacState & LORAMAC_RX_ABORT ) == LORAMAC_RX_ABORT )
{
MacCtx.MacState &= ~LORAMAC_RX_ABORT;
MacCtx.MacState &= ~LORAMAC_TX_RUNNING;
}
}
void LoRaMacProcess( void )
{
uint8_t noTx = false;
LoRaMacHandleIrqEvents( );
LoRaMacClassBProcess( );
// MAC proceeded a state and is ready to check
if( MacCtx.MacFlags.Bits.MacDone == 1 )
{
LoRaMacEnableRequests( LORAMAC_REQUEST_HANDLING_OFF );
LoRaMacCheckForRxAbort( );
// An error occurs during transmitting
if( IsRequestPending( ) > 0 )
{
noTx |= LoRaMacCheckForBeaconAcquisition( );
}
if( noTx == 0x00 )
{
LoRaMacHandleMlmeRequest( );
LoRaMacHandleMcpsRequest( );
}
LoRaMacHandleRequestEvents( );
LoRaMacHandleScheduleUplinkEvent( );
LoRaMacEnableRequests( LORAMAC_REQUEST_HANDLING_ON );
}
LoRaMacHandleIndicationEvents( );
if( MacCtx.RxSlot == RX_SLOT_WIN_CLASS_C )
{
OpenContinuousRxCWindow( );
}
}
static void OnTxDelayedTimerEvent( void* context )
{
TimerStop( &MacCtx.TxDelayedTimer );
MacCtx.MacState &= ~LORAMAC_TX_DELAYED;
// Schedule frame, allow delayed frame transmissions
switch( ScheduleTx( true ) )
{
case LORAMAC_STATUS_OK:
case LORAMAC_STATUS_DUTYCYCLE_RESTRICTED:
{
break;
}
default:
{
// Stop retransmission attempt
MacCtx.McpsConfirm.Datarate = MacCtx.NvmCtx->MacParams.ChannelsDatarate;
MacCtx.McpsConfirm.NbRetries = MacCtx.AckTimeoutRetriesCounter;
MacCtx.McpsConfirm.Status = LORAMAC_EVENT_INFO_STATUS_TX_DR_PAYLOAD_SIZE_ERROR;
LoRaMacConfirmQueueSetStatusCmn( LORAMAC_EVENT_INFO_STATUS_TX_DR_PAYLOAD_SIZE_ERROR );
StopRetransmission( );
break;
}
}
}
static void OnRxWindow1TimerEvent( void* context )
{
MacCtx.RxWindow1Config.Channel = MacCtx.Channel;
MacCtx.RxWindow1Config.DrOffset = MacCtx.NvmCtx->MacParams.Rx1DrOffset;
MacCtx.RxWindow1Config.DownlinkDwellTime = MacCtx.NvmCtx->MacParams.DownlinkDwellTime;
MacCtx.RxWindow1Config.RxContinuous = false;
MacCtx.RxWindow1Config.RxSlot = RX_SLOT_WIN_1;
RxWindowSetup( &MacCtx.RxWindowTimer1, &MacCtx.RxWindow1Config );
}
static void OnRxWindow2TimerEvent( void* context )
{
// Check if we are processing Rx1 window.
// If yes, we don't setup the Rx2 window.
if( MacCtx.RxSlot == RX_SLOT_WIN_1 )
{
return;
}
MacCtx.RxWindow2Config.Channel = MacCtx.Channel;
MacCtx.RxWindow2Config.Frequency = MacCtx.NvmCtx->MacParams.Rx2Channel.Frequency;
MacCtx.RxWindow2Config.DownlinkDwellTime = MacCtx.NvmCtx->MacParams.DownlinkDwellTime;
MacCtx.RxWindow2Config.RxContinuous = false;
MacCtx.RxWindow2Config.RxSlot = RX_SLOT_WIN_2;
RxWindowSetup( &MacCtx.RxWindowTimer2, &MacCtx.RxWindow2Config );
}
static void OnAckTimeoutTimerEvent( void* context )
{
TimerStop( &MacCtx.AckTimeoutTimer );
if( MacCtx.NodeAckRequested == true )
{
MacCtx.AckTimeoutRetry = true;
}
if( MacCtx.NvmCtx->DeviceClass == CLASS_C )
{
MacCtx.MacFlags.Bits.MacDone = 1;
}
if( ( MacCtx.MacCallbacks != NULL ) && ( MacCtx.MacCallbacks->MacProcessNotify != NULL ) )
{
MacCtx.MacCallbacks->MacProcessNotify( );
}
}
static LoRaMacCryptoStatus_t GetFCntDown( AddressIdentifier_t addrID, FType_t fType, LoRaMacMessageData_t* macMsg, Version_t lrWanVersion,
uint16_t maxFCntGap, FCntIdentifier_t* fCntID, uint32_t* currentDown )
{
if( ( macMsg == NULL ) || ( fCntID == NULL ) ||
( currentDown == NULL ) )
{
return LORAMAC_CRYPTO_ERROR_NPE;
}
// Determine the frame counter identifier and choose counter from FCntList
switch( addrID )
{
case UNICAST_DEV_ADDR:
if( lrWanVersion.Fields.Minor == 1 )
{
if( ( fType == FRAME_TYPE_A ) || ( fType == FRAME_TYPE_D ) )
{
*fCntID = A_FCNT_DOWN;
}
else
{
*fCntID = N_FCNT_DOWN;
}
}
else
{ // For LoRaWAN 1.0.X
*fCntID = FCNT_DOWN;
}
break;
case MULTICAST_0_ADDR:
*fCntID = MC_FCNT_DOWN_0;
break;
case MULTICAST_1_ADDR:
*fCntID = MC_FCNT_DOWN_1;
break;
case MULTICAST_2_ADDR:
*fCntID = MC_FCNT_DOWN_2;
break;
case MULTICAST_3_ADDR:
*fCntID = MC_FCNT_DOWN_3;
break;
default:
return LORAMAC_CRYPTO_FAIL_FCNT_ID;
}
return LoRaMacCryptoGetFCntDown( *fCntID, maxFCntGap, macMsg->FHDR.FCnt, currentDown );
}
static LoRaMacStatus_t SwitchClass( DeviceClass_t deviceClass )
{
LoRaMacStatus_t status = LORAMAC_STATUS_PARAMETER_INVALID;
switch( MacCtx.NvmCtx->DeviceClass )
{
case CLASS_A:
{
if( deviceClass == CLASS_A )
{
// Revert back RxC parameters
MacCtx.NvmCtx->MacParams.RxCChannel = MacCtx.NvmCtx->MacParams.Rx2Channel;
}
if( deviceClass == CLASS_B )
{
status = LoRaMacClassBSwitchClass( deviceClass );
if( status == LORAMAC_STATUS_OK )
{
MacCtx.NvmCtx->DeviceClass = deviceClass;
}
}
if( deviceClass == CLASS_C )
{
MacCtx.NvmCtx->DeviceClass = deviceClass;
MacCtx.RxWindowCConfig = MacCtx.RxWindow2Config;
MacCtx.RxWindowCConfig.RxSlot = RX_SLOT_WIN_CLASS_C;
for( int8_t i = 0; i < LORAMAC_MAX_MC_CTX; i++ )
{
if( MacCtx.NvmCtx->MulticastChannelList[i].ChannelParams.IsEnabled == true )
// TODO: Check multicast channel device class.
{
MacCtx.NvmCtx->MacParams.RxCChannel.Frequency = MacCtx.NvmCtx->MulticastChannelList[i].ChannelParams.RxParams.ClassC.Frequency;
MacCtx.NvmCtx->MacParams.RxCChannel.Datarate = MacCtx.NvmCtx->MulticastChannelList[i].ChannelParams.RxParams.ClassC.Datarate;
MacCtx.RxWindowCConfig.Channel = MacCtx.Channel;
MacCtx.RxWindowCConfig.Frequency = MacCtx.NvmCtx->MacParams.RxCChannel.Frequency;
MacCtx.RxWindowCConfig.DownlinkDwellTime = MacCtx.NvmCtx->MacParams.DownlinkDwellTime;
MacCtx.RxWindowCConfig.RxSlot = RX_SLOT_WIN_CLASS_C_MULTICAST;
MacCtx.RxWindowCConfig.RxContinuous = true;
break;
}
}
// Set the NodeAckRequested indicator to default
MacCtx.NodeAckRequested = false;
// Set the radio into sleep mode in case we are still in RX mode
Radio.Sleep( );
OpenContinuousRxCWindow( );
status = LORAMAC_STATUS_OK;
}
break;
}
case CLASS_B:
{
status = LoRaMacClassBSwitchClass( deviceClass );
if( status == LORAMAC_STATUS_OK )
{
MacCtx.NvmCtx->DeviceClass = deviceClass;
}
break;
}
case CLASS_C:
{
if( deviceClass == CLASS_A )
{
MacCtx.NvmCtx->DeviceClass = deviceClass;
// Set the radio into sleep to setup a defined state
Radio.Sleep( );
status = LORAMAC_STATUS_OK;
}
break;
}
}
return status;
}
static uint8_t GetMaxAppPayloadWithoutFOptsLength( int8_t datarate )
{
GetPhyParams_t getPhy;
PhyParam_t phyParam;
// Setup PHY request
getPhy.UplinkDwellTime = MacCtx.NvmCtx->MacParams.UplinkDwellTime;
getPhy.Datarate = datarate;
getPhy.Attribute = PHY_MAX_PAYLOAD;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
return phyParam.Value;
}
static bool ValidatePayloadLength( uint8_t lenN, int8_t datarate, uint8_t fOptsLen )
{
uint16_t maxN = 0;
uint16_t payloadSize = 0;
maxN = GetMaxAppPayloadWithoutFOptsLength( datarate );
// Calculate the resulting payload size
payloadSize = ( lenN + fOptsLen );
// Validation of the application payload size
if( ( payloadSize <= maxN ) && ( payloadSize <= LORAMAC_PHY_MAXPAYLOAD ) )
{
return true;
}
return false;
}
static void SetMlmeScheduleUplinkIndication( void )
{
MacCtx.MacFlags.Bits.MlmeSchedUplinkInd = 1;
}
static void ProcessMacCommands( uint8_t *payload, uint8_t macIndex, uint8_t commandsSize, int8_t snr, LoRaMacRxSlot_t rxSlot )
{
uint8_t status = 0;
bool adrBlockFound = false;
uint8_t macCmdPayload[2] = { 0x00, 0x00 };
while( macIndex < commandsSize )
{
// Make sure to parse only complete MAC commands
if( ( LoRaMacCommandsGetCmdSize( payload[macIndex] ) + macIndex ) > commandsSize )
{
return;
}
// Decode Frame MAC commands
switch( payload[macIndex++] )
{
case SRV_MAC_LINK_CHECK_ANS:
{
if( LoRaMacConfirmQueueIsCmdActive( MLME_LINK_CHECK ) == true )
{
LoRaMacConfirmQueueSetStatus( LORAMAC_EVENT_INFO_STATUS_OK, MLME_LINK_CHECK );
MacCtx.MlmeConfirm.DemodMargin = payload[macIndex++];
MacCtx.MlmeConfirm.NbGateways = payload[macIndex++];
}
break;
}
case SRV_MAC_LINK_ADR_REQ:
{
LinkAdrReqParams_t linkAdrReq;
int8_t linkAdrDatarate = DR_0;
int8_t linkAdrTxPower = TX_POWER_0;
uint8_t linkAdrNbRep = 0;
uint8_t linkAdrNbBytesParsed = 0;
if( adrBlockFound == false )
{
adrBlockFound = true;
// Fill parameter structure
linkAdrReq.Payload = &payload[macIndex - 1];
linkAdrReq.PayloadSize = commandsSize - ( macIndex - 1 );
linkAdrReq.AdrEnabled = MacCtx.NvmCtx->AdrCtrlOn;
linkAdrReq.UplinkDwellTime = MacCtx.NvmCtx->MacParams.UplinkDwellTime;
linkAdrReq.CurrentDatarate = MacCtx.NvmCtx->MacParams.ChannelsDatarate;
linkAdrReq.CurrentTxPower = MacCtx.NvmCtx->MacParams.ChannelsTxPower;
linkAdrReq.CurrentNbRep = MacCtx.NvmCtx->MacParams.ChannelsNbTrans;
linkAdrReq.Version = MacCtx.NvmCtx->Version;
// Process the ADR requests
status = RegionLinkAdrReq( MacCtx.NvmCtx->Region, &linkAdrReq, &linkAdrDatarate,
&linkAdrTxPower, &linkAdrNbRep, &linkAdrNbBytesParsed );
if( ( status & 0x07 ) == 0x07 )
{
MacCtx.NvmCtx->MacParams.ChannelsDatarate = linkAdrDatarate;
MacCtx.NvmCtx->MacParams.ChannelsTxPower = linkAdrTxPower;
MacCtx.NvmCtx->MacParams.ChannelsNbTrans = linkAdrNbRep;
}
// Add the answers to the buffer
for( uint8_t i = 0; i < ( linkAdrNbBytesParsed / 5 ); i++ )
{
LoRaMacCommandsAddCmd( MOTE_MAC_LINK_ADR_ANS, &status, 1 );
}
// Update MAC index
macIndex += linkAdrNbBytesParsed - 1;
}
break;
}
case SRV_MAC_DUTY_CYCLE_REQ:
{
MacCtx.NvmCtx->MaxDCycle = payload[macIndex++] & 0x0F;
MacCtx.NvmCtx->AggregatedDCycle = 1 << MacCtx.NvmCtx->MaxDCycle;
LoRaMacCommandsAddCmd( MOTE_MAC_DUTY_CYCLE_ANS, macCmdPayload, 0 );
break;
}
case SRV_MAC_RX_PARAM_SETUP_REQ:
{
RxParamSetupReqParams_t rxParamSetupReq;
status = 0x07;
rxParamSetupReq.DrOffset = ( payload[macIndex] >> 4 ) & 0x07;
rxParamSetupReq.Datarate = payload[macIndex] & 0x0F;
macIndex++;
rxParamSetupReq.Frequency = ( uint32_t ) payload[macIndex++];
rxParamSetupReq.Frequency |= ( uint32_t ) payload[macIndex++] << 8;
rxParamSetupReq.Frequency |= ( uint32_t ) payload[macIndex++] << 16;
rxParamSetupReq.Frequency *= 100;
// Perform request on region
status = RegionRxParamSetupReq( MacCtx.NvmCtx->Region, &rxParamSetupReq );
if( ( status & 0x07 ) == 0x07 )
{
MacCtx.NvmCtx->MacParams.Rx2Channel.Datarate = rxParamSetupReq.Datarate;
MacCtx.NvmCtx->MacParams.RxCChannel.Datarate = rxParamSetupReq.Datarate;
MacCtx.NvmCtx->MacParams.Rx2Channel.Frequency = rxParamSetupReq.Frequency;
MacCtx.NvmCtx->MacParams.RxCChannel.Frequency = rxParamSetupReq.Frequency;
MacCtx.NvmCtx->MacParams.Rx1DrOffset = rxParamSetupReq.DrOffset;
}
macCmdPayload[0] = status;
LoRaMacCommandsAddCmd( MOTE_MAC_RX_PARAM_SETUP_ANS, macCmdPayload, 1 );
// Setup indication to inform the application
SetMlmeScheduleUplinkIndication( );
break;
}
case SRV_MAC_DEV_STATUS_REQ:
{
uint8_t batteryLevel = BAT_LEVEL_NO_MEASURE;
if( ( MacCtx.MacCallbacks != NULL ) && ( MacCtx.MacCallbacks->GetBatteryLevel != NULL ) )
{
batteryLevel = MacCtx.MacCallbacks->GetBatteryLevel( );
}
macCmdPayload[0] = batteryLevel;
macCmdPayload[1] = ( uint8_t )( snr & 0x3F );
LoRaMacCommandsAddCmd( MOTE_MAC_DEV_STATUS_ANS, macCmdPayload, 2 );
break;
}
case SRV_MAC_NEW_CHANNEL_REQ:
{
NewChannelReqParams_t newChannelReq;
ChannelParams_t chParam;
status = 0x03;
newChannelReq.ChannelId = payload[macIndex++];
newChannelReq.NewChannel = &chParam;
chParam.Frequency = ( uint32_t ) payload[macIndex++];
chParam.Frequency |= ( uint32_t ) payload[macIndex++] << 8;
chParam.Frequency |= ( uint32_t ) payload[macIndex++] << 16;
chParam.Frequency *= 100;
chParam.Rx1Frequency = 0;
chParam.DrRange.Value = payload[macIndex++];
status = RegionNewChannelReq( MacCtx.NvmCtx->Region, &newChannelReq );
macCmdPayload[0] = status;
LoRaMacCommandsAddCmd( MOTE_MAC_NEW_CHANNEL_ANS, macCmdPayload, 1 );
break;
}
case SRV_MAC_RX_TIMING_SETUP_REQ:
{
uint8_t delay = payload[macIndex++] & 0x0F;
if( delay == 0 )
{
delay++;
}
MacCtx.NvmCtx->MacParams.ReceiveDelay1 = delay * 1000;
MacCtx.NvmCtx->MacParams.ReceiveDelay2 = MacCtx.NvmCtx->MacParams.ReceiveDelay1 + 1000;
LoRaMacCommandsAddCmd( MOTE_MAC_RX_TIMING_SETUP_ANS, macCmdPayload, 0 );
// Setup indication to inform the application
SetMlmeScheduleUplinkIndication( );
break;
}
case SRV_MAC_TX_PARAM_SETUP_REQ:
{
TxParamSetupReqParams_t txParamSetupReq;
GetPhyParams_t getPhy;
PhyParam_t phyParam;
uint8_t eirpDwellTime = payload[macIndex++];
txParamSetupReq.UplinkDwellTime = 0;
txParamSetupReq.DownlinkDwellTime = 0;
if( ( eirpDwellTime & 0x20 ) == 0x20 )
{
txParamSetupReq.DownlinkDwellTime = 1;
}
if( ( eirpDwellTime & 0x10 ) == 0x10 )
{
txParamSetupReq.UplinkDwellTime = 1;
}
txParamSetupReq.MaxEirp = eirpDwellTime & 0x0F;
// Check the status for correctness
if( RegionTxParamSetupReq( MacCtx.NvmCtx->Region, &txParamSetupReq ) != -1 )
{
// Accept command
MacCtx.NvmCtx->MacParams.UplinkDwellTime = txParamSetupReq.UplinkDwellTime;
MacCtx.NvmCtx->MacParams.DownlinkDwellTime = txParamSetupReq.DownlinkDwellTime;
MacCtx.NvmCtx->MacParams.MaxEirp = LoRaMacMaxEirpTable[txParamSetupReq.MaxEirp];
// Update the datarate in case of the new configuration limits it
getPhy.Attribute = PHY_MIN_TX_DR;
getPhy.UplinkDwellTime = MacCtx.NvmCtx->MacParams.UplinkDwellTime;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParams.ChannelsDatarate = MAX( MacCtx.NvmCtx->MacParams.ChannelsDatarate, ( int8_t )phyParam.Value );
// Add command response
LoRaMacCommandsAddCmd( MOTE_MAC_TX_PARAM_SETUP_ANS, macCmdPayload, 0 );
}
break;
}
case SRV_MAC_DL_CHANNEL_REQ:
{
DlChannelReqParams_t dlChannelReq;
status = 0x03;
dlChannelReq.ChannelId = payload[macIndex++];
dlChannelReq.Rx1Frequency = ( uint32_t ) payload[macIndex++];
dlChannelReq.Rx1Frequency |= ( uint32_t ) payload[macIndex++] << 8;
dlChannelReq.Rx1Frequency |= ( uint32_t ) payload[macIndex++] << 16;
dlChannelReq.Rx1Frequency *= 100;
status = RegionDlChannelReq( MacCtx.NvmCtx->Region, &dlChannelReq );
macCmdPayload[0] = status;
LoRaMacCommandsAddCmd( MOTE_MAC_DL_CHANNEL_ANS, macCmdPayload, 1 );
// Setup indication to inform the application
SetMlmeScheduleUplinkIndication( );
break;
}
case SRV_MAC_DEVICE_TIME_ANS:
{
SysTime_t gpsEpochTime = { 0 };
SysTime_t sysTime = { 0 };
SysTime_t sysTimeCurrent = { 0 };
gpsEpochTime.Seconds = ( uint32_t )payload[macIndex++];
gpsEpochTime.Seconds |= ( uint32_t )payload[macIndex++] << 8;
gpsEpochTime.Seconds |= ( uint32_t )payload[macIndex++] << 16;
gpsEpochTime.Seconds |= ( uint32_t )payload[macIndex++] << 24;
gpsEpochTime.SubSeconds = payload[macIndex++];
// Convert the fractional second received in ms
// round( pow( 0.5, 8.0 ) * 1000 ) = 3.90625
gpsEpochTime.SubSeconds = ( int16_t )( ( ( int32_t )gpsEpochTime.SubSeconds * 1000 ) >> 8 );
// Copy received GPS Epoch time into system time
sysTime = gpsEpochTime;
// Add Unix to Gps epcoh offset. The system time is based on Unix time.
sysTime.Seconds += UNIX_GPS_EPOCH_OFFSET;
// Compensate time difference between Tx Done time and now
sysTimeCurrent = SysTimeGet( );
sysTime = SysTimeAdd( sysTimeCurrent, SysTimeSub( sysTime, MacCtx.LastTxSysTime ) );
// Apply the new system time.
SysTimeSet( sysTime );
LoRaMacClassBDeviceTimeAns( );
MacCtx.McpsIndication.DeviceTimeAnsReceived = true;
break;
}
case SRV_MAC_PING_SLOT_INFO_ANS:
{
// According to the specification, it is not allowed to process this answer in
// a ping or multicast slot
if( ( MacCtx.RxSlot != RX_SLOT_WIN_CLASS_B_PING_SLOT ) && ( MacCtx.RxSlot != RX_SLOT_WIN_CLASS_B_MULTICAST_SLOT ) )
{
LoRaMacClassBPingSlotInfoAns( );
}
break;
}
case SRV_MAC_PING_SLOT_CHANNEL_REQ:
{
uint8_t status = 0x03;
uint32_t frequency = 0;
uint8_t datarate;
frequency = ( uint32_t )payload[macIndex++];
frequency |= ( uint32_t )payload[macIndex++] << 8;
frequency |= ( uint32_t )payload[macIndex++] << 16;
frequency *= 100;
datarate = payload[macIndex++] & 0x0F;
status = LoRaMacClassBPingSlotChannelReq( datarate, frequency );
macCmdPayload[0] = status;
LoRaMacCommandsAddCmd( MOTE_MAC_PING_SLOT_FREQ_ANS, macCmdPayload, 1 );
break;
}
case SRV_MAC_BEACON_TIMING_ANS:
{
uint16_t beaconTimingDelay = 0;
uint8_t beaconTimingChannel = 0;
beaconTimingDelay = ( uint16_t )payload[macIndex++];
beaconTimingDelay |= ( uint16_t )payload[macIndex++] << 8;
beaconTimingChannel = payload[macIndex++];
LoRaMacClassBBeaconTimingAns( beaconTimingDelay, beaconTimingChannel, RxDoneParams.LastRxDone );
break;
}
case SRV_MAC_BEACON_FREQ_REQ:
{
uint32_t frequency = 0;
frequency = ( uint32_t )payload[macIndex++];
frequency |= ( uint32_t )payload[macIndex++] << 8;
frequency |= ( uint32_t )payload[macIndex++] << 16;
frequency *= 100;
if( LoRaMacClassBBeaconFreqReq( frequency ) == true )
{
macCmdPayload[0] = 1;
}
else
{
macCmdPayload[0] = 0;
}
LoRaMacCommandsAddCmd( MOTE_MAC_BEACON_FREQ_ANS, macCmdPayload, 1 );
}
break;
default:
// Unknown command. ABORT MAC commands processing
return;
}
}
}
LoRaMacStatus_t Send( LoRaMacHeader_t* macHdr, uint8_t fPort, void* fBuffer, uint16_t fBufferSize )
{
LoRaMacFrameCtrl_t fCtrl;
LoRaMacStatus_t status = LORAMAC_STATUS_PARAMETER_INVALID;
int8_t datarate = MacCtx.NvmCtx->MacParams.ChannelsDatarate;
int8_t txPower = MacCtx.NvmCtx->MacParams.ChannelsTxPower;
uint32_t adrAckCounter = MacCtx.NvmCtx->AdrAckCounter;
CalcNextAdrParams_t adrNext;
// Check if we are joined
if( MacCtx.NvmCtx->NetworkActivation == ACTIVATION_TYPE_NONE )
{
return LORAMAC_STATUS_NO_NETWORK_JOINED;
}
if( MacCtx.NvmCtx->MaxDCycle == 0 )
{
MacCtx.NvmCtx->AggregatedTimeOff = 0;
}
fCtrl.Value = 0;
fCtrl.Bits.FOptsLen = 0;
fCtrl.Bits.Adr = MacCtx.NvmCtx->AdrCtrlOn;
// Check class b
if( MacCtx.NvmCtx->DeviceClass == CLASS_B )
{
fCtrl.Bits.FPending = 1;
}
else
{
fCtrl.Bits.FPending = 0;
}
// Check server ack
if( MacCtx.NvmCtx->SrvAckRequested == true )
{
fCtrl.Bits.Ack = 1;
}
// ADR next request
adrNext.Version = MacCtx.NvmCtx->Version;
adrNext.UpdateChanMask = true;
adrNext.AdrEnabled = fCtrl.Bits.Adr;
adrNext.AdrAckCounter = MacCtx.NvmCtx->AdrAckCounter;
adrNext.AdrAckLimit = MacCtx.AdrAckLimit;
adrNext.AdrAckDelay = MacCtx.AdrAckDelay;
adrNext.Datarate = MacCtx.NvmCtx->MacParams.ChannelsDatarate;
adrNext.TxPower = MacCtx.NvmCtx->MacParams.ChannelsTxPower;
adrNext.UplinkDwellTime = MacCtx.NvmCtx->MacParams.UplinkDwellTime;
adrNext.Region = MacCtx.NvmCtx->Region;
fCtrl.Bits.AdrAckReq = LoRaMacAdrCalcNext( &adrNext, &MacCtx.NvmCtx->MacParams.ChannelsDatarate,
&MacCtx.NvmCtx->MacParams.ChannelsTxPower, &adrAckCounter );
// Prepare the frame
status = PrepareFrame( macHdr, &fCtrl, fPort, fBuffer, fBufferSize );
// Validate status
if( ( status == LORAMAC_STATUS_OK ) || ( status == LORAMAC_STATUS_SKIPPED_APP_DATA ) )
{
// Schedule frame, do not allow delayed transmissions
status = ScheduleTx( false );
}
// Post processing
if( status != LORAMAC_STATUS_OK )
{
// Bad case - restore
// Store local variables
MacCtx.NvmCtx->MacParams.ChannelsDatarate = datarate;
MacCtx.NvmCtx->MacParams.ChannelsTxPower = txPower;
}
else
{
// Good case
MacCtx.NvmCtx->SrvAckRequested = false;
MacCtx.NvmCtx->AdrAckCounter = adrAckCounter;
// Remove all none sticky MAC commands
if( LoRaMacCommandsRemoveNoneStickyCmds( ) != LORAMAC_COMMANDS_SUCCESS )
{
return LORAMAC_STATUS_MAC_COMMAD_ERROR;
}
}
return status;
}
LoRaMacStatus_t SendReJoinReq( JoinReqIdentifier_t joinReqType )
{
LoRaMacStatus_t status = LORAMAC_STATUS_OK;
LoRaMacHeader_t macHdr;
macHdr.Value = 0;
bool allowDelayedTx = true;
// Setup join/rejoin message
switch( joinReqType )
{
case JOIN_REQ:
{
SwitchClass( CLASS_A );
MacCtx.TxMsg.Type = LORAMAC_MSG_TYPE_JOIN_REQUEST;
MacCtx.TxMsg.Message.JoinReq.Buffer = MacCtx.PktBuffer;
MacCtx.TxMsg.Message.JoinReq.BufSize = LORAMAC_PHY_MAXPAYLOAD;
macHdr.Bits.MType = FRAME_TYPE_JOIN_REQ;
MacCtx.TxMsg.Message.JoinReq.MHDR.Value = macHdr.Value;
memcpy1( MacCtx.TxMsg.Message.JoinReq.JoinEUI, SecureElementGetJoinEui( ), LORAMAC_JOIN_EUI_FIELD_SIZE );
memcpy1( MacCtx.TxMsg.Message.JoinReq.DevEUI, SecureElementGetDevEui( ), LORAMAC_DEV_EUI_FIELD_SIZE );
allowDelayedTx = false;
break;
}
default:
status = LORAMAC_STATUS_SERVICE_UNKNOWN;
break;
}
// Schedule frame
status = ScheduleTx( allowDelayedTx );
return status;
}
static LoRaMacStatus_t CheckForClassBCollision( void )
{
if( LoRaMacClassBIsBeaconExpected( ) == true )
{
return LORAMAC_STATUS_BUSY_BEACON_RESERVED_TIME;
}
if( MacCtx.NvmCtx->DeviceClass == CLASS_B )
{
if( LoRaMacClassBIsPingExpected( ) == true )
{
return LORAMAC_STATUS_BUSY_PING_SLOT_WINDOW_TIME;
}
else if( LoRaMacClassBIsMulticastExpected( ) == true )
{
return LORAMAC_STATUS_BUSY_PING_SLOT_WINDOW_TIME;
}
}
return LORAMAC_STATUS_OK;
}
static LoRaMacStatus_t ScheduleTx( bool allowDelayedTx )
{
LoRaMacStatus_t status = LORAMAC_STATUS_PARAMETER_INVALID;
TimerTime_t dutyCycleTimeOff = 0;
NextChanParams_t nextChan;
size_t macCmdsSize = 0;
// Check class b collisions
status = CheckForClassBCollision( );
if( status != LORAMAC_STATUS_OK )
{
return status;
}
// Update back-off
CalculateBackOff( MacCtx.NvmCtx->LastTxChannel );
nextChan.AggrTimeOff = MacCtx.NvmCtx->AggregatedTimeOff;
nextChan.Datarate = MacCtx.NvmCtx->MacParams.ChannelsDatarate;
nextChan.DutyCycleEnabled = MacCtx.NvmCtx->DutyCycleOn;
nextChan.QueryNextTxDelayOnly = false;
nextChan.Joined = false;
if( MacCtx.NvmCtx->NetworkActivation != ACTIVATION_TYPE_NONE )
{
nextChan.Joined = true;
}
nextChan.LastAggrTx = MacCtx.NvmCtx->LastTxDoneTime;
// Select channel
status = RegionNextChannel( MacCtx.NvmCtx->Region, &nextChan, &MacCtx.Channel, &dutyCycleTimeOff, &MacCtx.NvmCtx->AggregatedTimeOff );
if( status != LORAMAC_STATUS_OK )
{
if( ( status == LORAMAC_STATUS_DUTYCYCLE_RESTRICTED ) &&
( allowDelayedTx == true ) )
{
// Allow delayed transmissions. We have to allow it in case
// the MAC must retransmit a frame with the frame repetitions
if( dutyCycleTimeOff != 0 )
{// Send later - prepare timer
MacCtx.MacState |= LORAMAC_TX_DELAYED;
TimerSetValue( &MacCtx.TxDelayedTimer, dutyCycleTimeOff );
TimerStart( &MacCtx.TxDelayedTimer );
}
return LORAMAC_STATUS_OK;
}
else
{// State where the MAC cannot send a frame
return status;
}
}
// Compute Rx1 windows parameters
RegionComputeRxWindowParameters( MacCtx.NvmCtx->Region,
RegionApplyDrOffset( MacCtx.NvmCtx->Region, MacCtx.NvmCtx->MacParams.DownlinkDwellTime, MacCtx.NvmCtx->MacParams.ChannelsDatarate, MacCtx.NvmCtx->MacParams.Rx1DrOffset ),
MacCtx.NvmCtx->MacParams.MinRxSymbols,
MacCtx.NvmCtx->MacParams.SystemMaxRxError,
&MacCtx.RxWindow1Config );
// Compute Rx2 windows parameters
RegionComputeRxWindowParameters( MacCtx.NvmCtx->Region,
MacCtx.NvmCtx->MacParams.Rx2Channel.Datarate,
MacCtx.NvmCtx->MacParams.MinRxSymbols,
MacCtx.NvmCtx->MacParams.SystemMaxRxError,
&MacCtx.RxWindow2Config );
if( MacCtx.NvmCtx->NetworkActivation == ACTIVATION_TYPE_NONE )
{
MacCtx.RxWindow1Delay = MacCtx.NvmCtx->MacParams.JoinAcceptDelay1 + MacCtx.RxWindow1Config.WindowOffset;
MacCtx.RxWindow2Delay = MacCtx.NvmCtx->MacParams.JoinAcceptDelay2 + MacCtx.RxWindow2Config.WindowOffset;
}
else
{
if( LoRaMacCommandsGetSizeSerializedCmds( &macCmdsSize ) != LORAMAC_COMMANDS_SUCCESS )
{
return LORAMAC_STATUS_MAC_COMMAD_ERROR;
}
if( ValidatePayloadLength( MacCtx.AppDataSize, MacCtx.NvmCtx->MacParams.ChannelsDatarate, macCmdsSize ) == false )
{
return LORAMAC_STATUS_LENGTH_ERROR;
}
MacCtx.RxWindow1Delay = MacCtx.NvmCtx->MacParams.ReceiveDelay1 + MacCtx.RxWindow1Config.WindowOffset;
MacCtx.RxWindow2Delay = MacCtx.NvmCtx->MacParams.ReceiveDelay2 + MacCtx.RxWindow2Config.WindowOffset;
}
// Secure frame
LoRaMacStatus_t retval = SecureFrame( MacCtx.NvmCtx->MacParams.ChannelsDatarate, MacCtx.Channel );
if( retval != LORAMAC_STATUS_OK )
{
return retval;
}
// Try to send now
return SendFrameOnChannel( MacCtx.Channel );
}
static LoRaMacStatus_t SecureFrame( uint8_t txDr, uint8_t txCh )
{
LoRaMacCryptoStatus_t macCryptoStatus = LORAMAC_CRYPTO_ERROR;
uint32_t fCntUp = 0;
switch( MacCtx.TxMsg.Type )
{
case LORAMAC_MSG_TYPE_JOIN_REQUEST:
macCryptoStatus = LoRaMacCryptoPrepareJoinRequest( &MacCtx.TxMsg.Message.JoinReq );
if( LORAMAC_CRYPTO_SUCCESS != macCryptoStatus )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
MacCtx.PktBufferLen = MacCtx.TxMsg.Message.JoinReq.BufSize;
break;
case LORAMAC_MSG_TYPE_DATA:
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoGetFCntUp( &fCntUp ) )
{
return LORAMAC_STATUS_FCNT_HANDLER_ERROR;
}
if( ( MacCtx.ChannelsNbTransCounter >= 1 ) || ( MacCtx.AckTimeoutRetriesCounter > 1 ) )
{
fCntUp -= 1;
}
macCryptoStatus = LoRaMacCryptoSecureMessage( fCntUp, txDr, txCh, &MacCtx.TxMsg.Message.Data );
if( LORAMAC_CRYPTO_SUCCESS != macCryptoStatus )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
MacCtx.PktBufferLen = MacCtx.TxMsg.Message.Data.BufSize;
break;
case LORAMAC_MSG_TYPE_JOIN_ACCEPT:
case LORAMAC_MSG_TYPE_UNDEF:
default:
return LORAMAC_STATUS_PARAMETER_INVALID;
}
return LORAMAC_STATUS_OK;
}
static void CalculateBackOff( uint8_t channel )
{
CalcBackOffParams_t calcBackOff;
if( MacCtx.NvmCtx->NetworkActivation == ACTIVATION_TYPE_NONE )
{
calcBackOff.Joined = false;
}
else
{
calcBackOff.Joined = true;
}
calcBackOff.DutyCycleEnabled = MacCtx.NvmCtx->DutyCycleOn;
calcBackOff.Channel = channel;
calcBackOff.ElapsedTime = SysTimeSub( SysTimeGetMcuTime( ), MacCtx.NvmCtx->InitializationTime );
calcBackOff.TxTimeOnAir = MacCtx.TxTimeOnAir;
calcBackOff.LastTxIsJoinRequest = false;
if( MacCtx.NvmCtx->NetworkActivation == ACTIVATION_TYPE_NONE )
{
calcBackOff.LastTxIsJoinRequest = true;
}
// Update regional back-off
RegionCalcBackOff( MacCtx.NvmCtx->Region, &calcBackOff );
// Update aggregated time-off. This must be an assignment and no incremental
// update as we do only calculate the time-off based on the last transmission
MacCtx.NvmCtx->AggregatedTimeOff = ( MacCtx.TxTimeOnAir * MacCtx.NvmCtx->AggregatedDCycle - MacCtx.TxTimeOnAir );
}
static void RemoveMacCommands( LoRaMacRxSlot_t rxSlot, LoRaMacFrameCtrl_t fCtrl, Mcps_t request )
{
if( rxSlot == RX_SLOT_WIN_1 || rxSlot == RX_SLOT_WIN_2 )
{
// Remove all sticky MAC commands answers since we can assume
// that they have been received by the server.
if( request == MCPS_CONFIRMED )
{
if( fCtrl.Bits.Ack == 1 )
{ // For confirmed uplinks only if we have received an ACK.
LoRaMacCommandsRemoveStickyAnsCmds( );
}
}
else
{
LoRaMacCommandsRemoveStickyAnsCmds( );
}
}
}
static void ResetMacParameters( void )
{
MacCtx.NvmCtx->NetworkActivation = ACTIVATION_TYPE_NONE;
// ADR counter
MacCtx.NvmCtx->AdrAckCounter = 0;
MacCtx.ChannelsNbTransCounter = 0;
MacCtx.AckTimeoutRetries = 1;
MacCtx.AckTimeoutRetriesCounter = 1;
MacCtx.AckTimeoutRetry = false;
MacCtx.NvmCtx->MaxDCycle = 0;
MacCtx.NvmCtx->AggregatedDCycle = 1;
MacCtx.NvmCtx->MacParams.ChannelsTxPower = MacCtx.NvmCtx->MacParamsDefaults.ChannelsTxPower;
MacCtx.NvmCtx->MacParams.ChannelsDatarate = MacCtx.NvmCtx->MacParamsDefaults.ChannelsDatarate;
MacCtx.NvmCtx->MacParams.Rx1DrOffset = MacCtx.NvmCtx->MacParamsDefaults.Rx1DrOffset;
MacCtx.NvmCtx->MacParams.Rx2Channel = MacCtx.NvmCtx->MacParamsDefaults.Rx2Channel;
MacCtx.NvmCtx->MacParams.RxCChannel = MacCtx.NvmCtx->MacParamsDefaults.RxCChannel;
MacCtx.NvmCtx->MacParams.UplinkDwellTime = MacCtx.NvmCtx->MacParamsDefaults.UplinkDwellTime;
MacCtx.NvmCtx->MacParams.DownlinkDwellTime = MacCtx.NvmCtx->MacParamsDefaults.DownlinkDwellTime;
MacCtx.NvmCtx->MacParams.MaxEirp = MacCtx.NvmCtx->MacParamsDefaults.MaxEirp;
MacCtx.NvmCtx->MacParams.AntennaGain = MacCtx.NvmCtx->MacParamsDefaults.AntennaGain;
MacCtx.NodeAckRequested = false;
MacCtx.NvmCtx->SrvAckRequested = false;
// Reset to application defaults
InitDefaultsParams_t params;
params.Type = INIT_TYPE_INIT;
params.NvmCtx = NULL;
RegionInitDefaults( MacCtx.NvmCtx->Region, ¶ms );
// Initialize channel index.
MacCtx.Channel = 0;
MacCtx.NvmCtx->LastTxChannel = MacCtx.Channel;
// Initialize Rx2 config parameters.
MacCtx.RxWindow2Config.Channel = MacCtx.Channel;
MacCtx.RxWindow2Config.Frequency = MacCtx.NvmCtx->MacParams.Rx2Channel.Frequency;
MacCtx.RxWindow2Config.DownlinkDwellTime = MacCtx.NvmCtx->MacParams.DownlinkDwellTime;
MacCtx.RxWindow2Config.RxContinuous = false;
MacCtx.RxWindow2Config.RxSlot = RX_SLOT_WIN_2;
// Initialize RxC config parameters.
MacCtx.RxWindowCConfig = MacCtx.RxWindow2Config;
MacCtx.RxWindowCConfig.RxContinuous = true;
MacCtx.RxWindowCConfig.RxSlot = RX_SLOT_WIN_CLASS_C;
}
/*!
* \brief Initializes and opens the reception window
*
* \param [IN] rxTimer Window timer to be topped.
* \param [IN] rxConfig Window parameters to be setup
*/
static void RxWindowSetup( TimerEvent_t* rxTimer, RxConfigParams_t* rxConfig )
{
TimerStop( rxTimer );
// Ensure the radio is Idle
Radio.Standby( );
if( RegionRxConfig( MacCtx.NvmCtx->Region, rxConfig, ( int8_t* )&MacCtx.McpsIndication.RxDatarate ) == true )
{
Radio.Rx( MacCtx.NvmCtx->MacParams.MaxRxWindow );
MacCtx.RxSlot = rxConfig->RxSlot;
}
}
static void OpenContinuousRxCWindow( void )
{
// Compute RxC windows parameters
RegionComputeRxWindowParameters( MacCtx.NvmCtx->Region,
MacCtx.NvmCtx->MacParams.RxCChannel.Datarate,
MacCtx.NvmCtx->MacParams.MinRxSymbols,
MacCtx.NvmCtx->MacParams.SystemMaxRxError,
&MacCtx.RxWindowCConfig );
MacCtx.RxWindowCConfig.RxSlot = RX_SLOT_WIN_CLASS_C;
// Setup continuous listening
MacCtx.RxWindowCConfig.RxContinuous = true;
// At this point the Radio should be idle.
// Thus, there is no need to set the radio in standby mode.
if( RegionRxConfig( MacCtx.NvmCtx->Region, &MacCtx.RxWindowCConfig, ( int8_t* )&MacCtx.McpsIndication.RxDatarate ) == true )
{
Radio.Rx( 0 ); // Continuous mode
MacCtx.RxSlot = MacCtx.RxWindowCConfig.RxSlot;
}
}
LoRaMacStatus_t PrepareFrame( LoRaMacHeader_t* macHdr, LoRaMacFrameCtrl_t* fCtrl, uint8_t fPort, void* fBuffer, uint16_t fBufferSize )
{
MacCtx.PktBufferLen = 0;
MacCtx.NodeAckRequested = false;
uint32_t fCntUp = 0;
size_t macCmdsSize = 0;
uint8_t availableSize = 0;
if( fBuffer == NULL )
{
fBufferSize = 0;
}
memcpy1( MacCtx.AppData, ( uint8_t* ) fBuffer, fBufferSize );
MacCtx.AppDataSize = fBufferSize;
MacCtx.PktBuffer[0] = macHdr->Value;
switch( macHdr->Bits.MType )
{
case FRAME_TYPE_DATA_CONFIRMED_UP:
MacCtx.NodeAckRequested = true;
// Intentional fall through
case FRAME_TYPE_DATA_UNCONFIRMED_UP:
MacCtx.TxMsg.Type = LORAMAC_MSG_TYPE_DATA;
MacCtx.TxMsg.Message.Data.Buffer = MacCtx.PktBuffer;
MacCtx.TxMsg.Message.Data.BufSize = LORAMAC_PHY_MAXPAYLOAD;
MacCtx.TxMsg.Message.Data.MHDR.Value = macHdr->Value;
MacCtx.TxMsg.Message.Data.FPort = fPort;
MacCtx.TxMsg.Message.Data.FHDR.DevAddr = MacCtx.NvmCtx->DevAddr;
MacCtx.TxMsg.Message.Data.FHDR.FCtrl.Value = fCtrl->Value;
MacCtx.TxMsg.Message.Data.FRMPayloadSize = MacCtx.AppDataSize;
MacCtx.TxMsg.Message.Data.FRMPayload = MacCtx.AppData;
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoGetFCntUp( &fCntUp ) )
{
return LORAMAC_STATUS_FCNT_HANDLER_ERROR;
}
MacCtx.TxMsg.Message.Data.FHDR.FCnt = ( uint16_t )fCntUp;
// Reset confirm parameters
MacCtx.McpsConfirm.NbRetries = 0;
MacCtx.McpsConfirm.AckReceived = false;
MacCtx.McpsConfirm.UpLinkCounter = fCntUp;
// Handle the MAC commands if there are any available
if( LoRaMacCommandsGetSizeSerializedCmds( &macCmdsSize ) != LORAMAC_COMMANDS_SUCCESS )
{
return LORAMAC_STATUS_MAC_COMMAD_ERROR;
}
if( macCmdsSize > 0 )
{
availableSize = GetMaxAppPayloadWithoutFOptsLength( MacCtx.NvmCtx->MacParams.ChannelsDatarate );
// There is application payload available and the MAC commands fit into FOpts field.
if( ( MacCtx.AppDataSize > 0 ) && ( macCmdsSize <= LORA_MAC_COMMAND_MAX_FOPTS_LENGTH ) )
{
if( LoRaMacCommandsSerializeCmds( LORA_MAC_COMMAND_MAX_FOPTS_LENGTH, &macCmdsSize, MacCtx.TxMsg.Message.Data.FHDR.FOpts ) != LORAMAC_COMMANDS_SUCCESS )
{
return LORAMAC_STATUS_MAC_COMMAD_ERROR;
}
fCtrl->Bits.FOptsLen = macCmdsSize;
// Update FCtrl field with new value of FOptionsLength
MacCtx.TxMsg.Message.Data.FHDR.FCtrl.Value = fCtrl->Value;
}
// There is application payload available but the MAC commands does NOT fit into FOpts field.
else if( ( MacCtx.AppDataSize > 0 ) && ( macCmdsSize > LORA_MAC_COMMAND_MAX_FOPTS_LENGTH ) )
{
if( LoRaMacCommandsSerializeCmds( availableSize, &macCmdsSize, MacCtx.NvmCtx->MacCommandsBuffer ) != LORAMAC_COMMANDS_SUCCESS )
{
return LORAMAC_STATUS_MAC_COMMAD_ERROR;
}
return LORAMAC_STATUS_SKIPPED_APP_DATA;
}
// No application payload available therefore add all mac commands to the FRMPayload.
else
{
if( LoRaMacCommandsSerializeCmds( availableSize, &macCmdsSize, MacCtx.NvmCtx->MacCommandsBuffer ) != LORAMAC_COMMANDS_SUCCESS )
{
return LORAMAC_STATUS_MAC_COMMAD_ERROR;
}
// Force FPort to be zero
MacCtx.TxMsg.Message.Data.FPort = 0;
MacCtx.TxMsg.Message.Data.FRMPayload = MacCtx.NvmCtx->MacCommandsBuffer;
MacCtx.TxMsg.Message.Data.FRMPayloadSize = macCmdsSize;
}
}
break;
case FRAME_TYPE_PROPRIETARY:
if( ( fBuffer != NULL ) && ( MacCtx.AppDataSize > 0 ) )
{
memcpy1( MacCtx.PktBuffer + LORAMAC_MHDR_FIELD_SIZE, ( uint8_t* ) fBuffer, MacCtx.AppDataSize );
MacCtx.PktBufferLen = LORAMAC_MHDR_FIELD_SIZE + MacCtx.AppDataSize;
}
break;
default:
return LORAMAC_STATUS_SERVICE_UNKNOWN;
}
return LORAMAC_STATUS_OK;
}
LoRaMacStatus_t SendFrameOnChannel( uint8_t channel )
{
TxConfigParams_t txConfig;
int8_t txPower = 0;
txConfig.Channel = channel;
txConfig.Datarate = MacCtx.NvmCtx->MacParams.ChannelsDatarate;
txConfig.TxPower = MacCtx.NvmCtx->MacParams.ChannelsTxPower;
txConfig.MaxEirp = MacCtx.NvmCtx->MacParams.MaxEirp;
txConfig.AntennaGain = MacCtx.NvmCtx->MacParams.AntennaGain;
txConfig.PktLen = MacCtx.PktBufferLen;
RegionTxConfig( MacCtx.NvmCtx->Region, &txConfig, &txPower, &MacCtx.TxTimeOnAir );
MacCtx.McpsConfirm.Status = LORAMAC_EVENT_INFO_STATUS_ERROR;
MacCtx.McpsConfirm.Datarate = MacCtx.NvmCtx->MacParams.ChannelsDatarate;
MacCtx.McpsConfirm.TxPower = txPower;
MacCtx.McpsConfirm.Channel = channel;
// Store the time on air
MacCtx.McpsConfirm.TxTimeOnAir = MacCtx.TxTimeOnAir;
MacCtx.MlmeConfirm.TxTimeOnAir = MacCtx.TxTimeOnAir;
if( LoRaMacClassBIsBeaconModeActive( ) == true )
{
// Currently, the Time-On-Air can only be computed when the radio is configured with
// the TX configuration
TimerTime_t collisionTime = LoRaMacClassBIsUplinkCollision( MacCtx.TxTimeOnAir );
if( collisionTime > 0 )
{
return LORAMAC_STATUS_BUSY_UPLINK_COLLISION;
}
}
if( MacCtx.NvmCtx->DeviceClass == CLASS_B )
{
// Stop slots for class b
LoRaMacClassBStopRxSlots( );
}
LoRaMacClassBHaltBeaconing( );
MacCtx.MacState |= LORAMAC_TX_RUNNING;
if( MacCtx.NodeAckRequested == false )
{
MacCtx.ChannelsNbTransCounter++;
}
// Send now
Radio.Send( MacCtx.PktBuffer, MacCtx.PktBufferLen );
return LORAMAC_STATUS_OK;
}
LoRaMacStatus_t SetTxContinuousWave( uint16_t timeout )
{
ContinuousWaveParams_t continuousWave;
continuousWave.Channel = MacCtx.Channel;
continuousWave.Datarate = MacCtx.NvmCtx->MacParams.ChannelsDatarate;
continuousWave.TxPower = MacCtx.NvmCtx->MacParams.ChannelsTxPower;
continuousWave.MaxEirp = MacCtx.NvmCtx->MacParams.MaxEirp;
continuousWave.AntennaGain = MacCtx.NvmCtx->MacParams.AntennaGain;
continuousWave.Timeout = timeout;
RegionSetContinuousWave( MacCtx.NvmCtx->Region, &continuousWave );
MacCtx.MacState |= LORAMAC_TX_RUNNING;
return LORAMAC_STATUS_OK;
}
LoRaMacStatus_t SetTxContinuousWave1( uint16_t timeout, uint32_t frequency, uint8_t power )
{
Radio.SetTxContinuousWave( frequency, power, timeout );
MacCtx.MacState |= LORAMAC_TX_RUNNING;
return LORAMAC_STATUS_OK;
}
LoRaMacCtxs_t* GetCtxs( void )
{
Contexts.MacNvmCtx = &NvmMacCtx;
Contexts.MacNvmCtxSize = sizeof( NvmMacCtx );
Contexts.CryptoNvmCtx = LoRaMacCryptoGetNvmCtx( &Contexts.CryptoNvmCtxSize );
GetNvmCtxParams_t params ={ 0 };
Contexts.RegionNvmCtx = RegionGetNvmCtx( MacCtx.NvmCtx->Region, ¶ms );
Contexts.RegionNvmCtxSize = params.nvmCtxSize;
Contexts.SecureElementNvmCtx = SecureElementGetNvmCtx( &Contexts.SecureElementNvmCtxSize );
Contexts.CommandsNvmCtx = LoRaMacCommandsGetNvmCtx( &Contexts.CommandsNvmCtxSize );
Contexts.ClassBNvmCtx = LoRaMacClassBGetNvmCtx( &Contexts.ClassBNvmCtxSize );
Contexts.ConfirmQueueNvmCtx = LoRaMacConfirmQueueGetNvmCtx( &Contexts.ConfirmQueueNvmCtxSize );
return &Contexts;
}
LoRaMacStatus_t RestoreCtxs( LoRaMacCtxs_t* contexts )
{
if( contexts == NULL )
{
return LORAMAC_STATUS_PARAMETER_INVALID;
}
if( MacCtx.MacState != LORAMAC_STOPPED )
{
return LORAMAC_STATUS_BUSY;
}
if( contexts->MacNvmCtx != NULL )
{
memcpy1( ( uint8_t* ) &NvmMacCtx, ( uint8_t* ) contexts->MacNvmCtx, contexts->MacNvmCtxSize );
}
InitDefaultsParams_t params;
params.Type = INIT_TYPE_RESTORE_CTX;
params.NvmCtx = contexts->RegionNvmCtx;
RegionInitDefaults( MacCtx.NvmCtx->Region, ¶ms );
// Initialize RxC config parameters.
MacCtx.RxWindowCConfig.Channel = MacCtx.Channel;
MacCtx.RxWindowCConfig.Frequency = MacCtx.NvmCtx->MacParams.RxCChannel.Frequency;
MacCtx.RxWindowCConfig.DownlinkDwellTime = MacCtx.NvmCtx->MacParams.DownlinkDwellTime;
MacCtx.RxWindowCConfig.RxContinuous = true;
MacCtx.RxWindowCConfig.RxSlot = RX_SLOT_WIN_CLASS_C;
if( SecureElementRestoreNvmCtx( contexts->SecureElementNvmCtx ) != SECURE_ELEMENT_SUCCESS )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
if( LoRaMacCryptoRestoreNvmCtx( contexts->CryptoNvmCtx ) != LORAMAC_CRYPTO_SUCCESS )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
if( LoRaMacCommandsRestoreNvmCtx( contexts->CommandsNvmCtx ) != LORAMAC_COMMANDS_SUCCESS )
{
return LORAMAC_STATUS_MAC_COMMAD_ERROR;
}
if( LoRaMacClassBRestoreNvmCtx( contexts->ClassBNvmCtx ) != true )
{
return LORAMAC_STATUS_CLASS_B_ERROR;
}
if( LoRaMacConfirmQueueRestoreNvmCtx( contexts->ConfirmQueueNvmCtx ) != true )
{
return LORAMAC_STATUS_CONFIRM_QUEUE_ERROR;
}
return LORAMAC_STATUS_OK;
}
LoRaMacStatus_t DetermineFrameType( LoRaMacMessageData_t* macMsg, FType_t* fType )
{
if( ( macMsg == NULL ) || ( fType == NULL ) )
{
return LORAMAC_STATUS_PARAMETER_INVALID;
}
/* The LoRaWAN specification allows several possible configurations how data up/down frames are built up.
* In sake of clearness the following naming is applied. Please keep in mind that this is
* implementation specific since there is no definition in the LoRaWAN specification included.
*
* X -> Field is available
* - -> Field is not available
*
* +-------+ +----------+------+-------+--------------+
* | FType | | FOptsLen | Fopt | FPort | FRMPayload |
* +-------+ +----------+------+-------+--------------+
* | A | | > 0 | X | > 0 | X |
* +-------+ +----------+------+-------+--------------+
* | B | | >= 0 | X/- | - | - |
* +-------+ +----------+------+-------+--------------+
* | C | | = 0 | - | = 0 | MAC commands |
* +-------+ +----------+------+-------+--------------+
* | D | | = 0 | - | > 0 | X |
* +-------+ +----------+------+-------+--------------+
*/
if( ( macMsg->FHDR.FCtrl.Bits.FOptsLen > 0 ) && ( macMsg->FPort > 0 ) )
{
*fType = FRAME_TYPE_A;
}
else if( macMsg->FRMPayloadSize == 0 )
{
*fType = FRAME_TYPE_B;
}
else if( ( macMsg->FHDR.FCtrl.Bits.FOptsLen == 0 ) && ( macMsg->FPort == 0 ) )
{
*fType = FRAME_TYPE_C;
}
else if( ( macMsg->FHDR.FCtrl.Bits.FOptsLen == 0 ) && ( macMsg->FPort > 0 ) )
{
*fType = FRAME_TYPE_D;
}
else
{
// Should never happen.
return LORAMAC_STATUS_ERROR;
}
return LORAMAC_STATUS_OK;
}
static bool CheckRetransUnconfirmedUplink( void )
{
// Unconfirmed uplink, when all retransmissions are done.
if( MacCtx.ChannelsNbTransCounter >=
MacCtx.NvmCtx->MacParams.ChannelsNbTrans )
{
return true;
}
else if( MacCtx.MacFlags.Bits.McpsInd == 1 )
{
// For Class A stop in each case
if( MacCtx.NvmCtx->DeviceClass == CLASS_A )
{
return true;
}
else
{// For Class B & C stop only if the frame was received in RX1 window
if( MacCtx.McpsIndication.RxSlot == RX_SLOT_WIN_1 )
{
return true;
}
}
}
return false;
}
static bool CheckRetransConfirmedUplink( void )
{
// Confirmed uplink, when all retransmissions ( tries to get a ack ) are done.
if( MacCtx.AckTimeoutRetriesCounter >=
MacCtx.AckTimeoutRetries )
{
return true;
}
else if( MacCtx.MacFlags.Bits.McpsInd == 1 )
{
if( MacCtx.McpsConfirm.AckReceived == true )
{
return true;
}
}
return false;
}
static bool StopRetransmission( void )
{
if( ( MacCtx.MacFlags.Bits.McpsInd == 0 ) ||
( ( MacCtx.McpsIndication.RxSlot != RX_SLOT_WIN_1 ) &&
( MacCtx.McpsIndication.RxSlot != RX_SLOT_WIN_2 ) ) )
{ // Maximum repetitions without downlink. Increase ADR Ack counter.
// Only process the case when the MAC did not receive a downlink.
if( MacCtx.NvmCtx->AdrCtrlOn == true )
{
MacCtx.NvmCtx->AdrAckCounter++;
}
}
MacCtx.ChannelsNbTransCounter = 0;
MacCtx.NodeAckRequested = false;
MacCtx.AckTimeoutRetry = false;
MacCtx.MacState &= ~LORAMAC_TX_RUNNING;
return true;
}
static void AckTimeoutRetriesProcess( void )
{
if( MacCtx.AckTimeoutRetriesCounter < MacCtx.AckTimeoutRetries )
{
MacCtx.AckTimeoutRetriesCounter++;
if( ( MacCtx.AckTimeoutRetriesCounter % 2 ) == 1 )
{
GetPhyParams_t getPhy;
PhyParam_t phyParam;
getPhy.Attribute = PHY_NEXT_LOWER_TX_DR;
getPhy.UplinkDwellTime = MacCtx.NvmCtx->MacParams.UplinkDwellTime;
getPhy.Datarate = MacCtx.NvmCtx->MacParams.ChannelsDatarate;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParams.ChannelsDatarate = phyParam.Value;
}
}
}
static void AckTimeoutRetriesFinalize( void )
{
if( MacCtx.McpsConfirm.AckReceived == false )
{
InitDefaultsParams_t params;
params.Type = INIT_TYPE_RESTORE_DEFAULT_CHANNELS;
params.NvmCtx = Contexts.RegionNvmCtx;
RegionInitDefaults( MacCtx.NvmCtx->Region, ¶ms );
MacCtx.NodeAckRequested = false;
MacCtx.McpsConfirm.AckReceived = false;
}
MacCtx.McpsConfirm.NbRetries = MacCtx.AckTimeoutRetriesCounter;
}
static void CallNvmCtxCallback( LoRaMacNvmCtxModule_t module )
{
if( ( MacCtx.MacCallbacks != NULL ) && ( MacCtx.MacCallbacks->NvmContextChange != NULL ) )
{
MacCtx.MacCallbacks->NvmContextChange( module );
}
}
static void EventMacNvmCtxChanged( void )
{
CallNvmCtxCallback( LORAMAC_NVMCTXMODULE_MAC );
}
static void EventRegionNvmCtxChanged( void )
{
CallNvmCtxCallback( LORAMAC_NVMCTXMODULE_REGION );
}
static void EventCryptoNvmCtxChanged( void )
{
CallNvmCtxCallback( LORAMAC_NVMCTXMODULE_CRYPTO );
}
static void EventSecureElementNvmCtxChanged( void )
{
CallNvmCtxCallback( LORAMAC_NVMCTXMODULE_SECURE_ELEMENT );
}
static void EventCommandsNvmCtxChanged( void )
{
CallNvmCtxCallback( LORAMAC_NVMCTXMODULE_COMMANDS );
}
static void EventClassBNvmCtxChanged( void )
{
CallNvmCtxCallback( LORAMAC_NVMCTXMODULE_CLASS_B );
}
static void EventConfirmQueueNvmCtxChanged( void )
{
CallNvmCtxCallback( LORAMAC_NVMCTXMODULE_CONFIRM_QUEUE );
}
static uint8_t IsRequestPending( void )
{
if( ( MacCtx.MacFlags.Bits.MlmeReq == 1 ) ||
( MacCtx.MacFlags.Bits.McpsReq == 1 ) )
{
return 1;
}
return 0;
}
LoRaMacStatus_t LoRaMacInitialization( LoRaMacPrimitives_t* primitives, LoRaMacCallback_t* callbacks, LoRaMacRegion_t region )
{
GetPhyParams_t getPhy;
PhyParam_t phyParam;
LoRaMacClassBCallback_t classBCallbacks;
LoRaMacClassBParams_t classBParams;
if( ( primitives == NULL ) ||
( callbacks == NULL ) )
{
return LORAMAC_STATUS_PARAMETER_INVALID;
}
if( ( primitives->MacMcpsConfirm == NULL ) ||
( primitives->MacMcpsIndication == NULL ) ||
( primitives->MacMlmeConfirm == NULL ) ||
( primitives->MacMlmeIndication == NULL ) )
{
return LORAMAC_STATUS_PARAMETER_INVALID;
}
// Verify if the region is supported
if( RegionIsActive( region ) == false )
{
return LORAMAC_STATUS_REGION_NOT_SUPPORTED;
}
// Confirm queue reset
LoRaMacConfirmQueueInit( primitives, EventConfirmQueueNvmCtxChanged );
// Initialize the module context with zeros
memset1( ( uint8_t* ) &NvmMacCtx, 0x00, sizeof( LoRaMacNvmCtx_t ) );
memset1( ( uint8_t* ) &MacCtx, 0x00, sizeof( LoRaMacCtx_t ) );
MacCtx.NvmCtx = &NvmMacCtx;
// Set non zero variables to its default value
MacCtx.AckTimeoutRetriesCounter = 1;
MacCtx.AckTimeoutRetries = 1;
MacCtx.NvmCtx->Region = region;
MacCtx.NvmCtx->DeviceClass = CLASS_A;
// Setup version
MacCtx.NvmCtx->Version.Value = LORAMAC_VERSION;
// Reset to defaults
getPhy.Attribute = PHY_DUTY_CYCLE;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->DutyCycleOn = ( bool ) phyParam.Value;
getPhy.Attribute = PHY_DEF_TX_POWER;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParamsDefaults.ChannelsTxPower = phyParam.Value;
getPhy.Attribute = PHY_DEF_TX_DR;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParamsDefaults.ChannelsDatarate = phyParam.Value;
getPhy.Attribute = PHY_MAX_RX_WINDOW;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParamsDefaults.MaxRxWindow = phyParam.Value;
getPhy.Attribute = PHY_RECEIVE_DELAY1;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParamsDefaults.ReceiveDelay1 = phyParam.Value;
getPhy.Attribute = PHY_RECEIVE_DELAY2;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParamsDefaults.ReceiveDelay2 = phyParam.Value;
getPhy.Attribute = PHY_JOIN_ACCEPT_DELAY1;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParamsDefaults.JoinAcceptDelay1 = phyParam.Value;
getPhy.Attribute = PHY_JOIN_ACCEPT_DELAY2;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParamsDefaults.JoinAcceptDelay2 = phyParam.Value;
getPhy.Attribute = PHY_DEF_DR1_OFFSET;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParamsDefaults.Rx1DrOffset = phyParam.Value;
getPhy.Attribute = PHY_DEF_RX2_FREQUENCY;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParamsDefaults.Rx2Channel.Frequency = phyParam.Value;
MacCtx.NvmCtx->MacParamsDefaults.RxCChannel.Frequency = phyParam.Value;
getPhy.Attribute = PHY_DEF_RX2_DR;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParamsDefaults.Rx2Channel.Datarate = phyParam.Value;
MacCtx.NvmCtx->MacParamsDefaults.RxCChannel.Datarate = phyParam.Value;
getPhy.Attribute = PHY_DEF_UPLINK_DWELL_TIME;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParamsDefaults.UplinkDwellTime = phyParam.Value;
getPhy.Attribute = PHY_DEF_DOWNLINK_DWELL_TIME;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParamsDefaults.DownlinkDwellTime = phyParam.Value;
getPhy.Attribute = PHY_DEF_MAX_EIRP;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParamsDefaults.MaxEirp = phyParam.fValue;
getPhy.Attribute = PHY_DEF_ANTENNA_GAIN;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.NvmCtx->MacParamsDefaults.AntennaGain = phyParam.fValue;
getPhy.Attribute = PHY_DEF_ADR_ACK_LIMIT;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.AdrAckLimit = phyParam.Value;
getPhy.Attribute = PHY_DEF_ADR_ACK_DELAY;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
MacCtx.AdrAckDelay = phyParam.Value;
// Init parameters which are not set in function ResetMacParameters
MacCtx.NvmCtx->MacParamsDefaults.ChannelsNbTrans = 1;
MacCtx.NvmCtx->MacParamsDefaults.SystemMaxRxError = 10;
MacCtx.NvmCtx->MacParamsDefaults.MinRxSymbols = 6;
MacCtx.NvmCtx->MacParams.SystemMaxRxError = MacCtx.NvmCtx->MacParamsDefaults.SystemMaxRxError;
MacCtx.NvmCtx->MacParams.MinRxSymbols = MacCtx.NvmCtx->MacParamsDefaults.MinRxSymbols;
MacCtx.NvmCtx->MacParams.MaxRxWindow = MacCtx.NvmCtx->MacParamsDefaults.MaxRxWindow;
MacCtx.NvmCtx->MacParams.ReceiveDelay1 = MacCtx.NvmCtx->MacParamsDefaults.ReceiveDelay1;
MacCtx.NvmCtx->MacParams.ReceiveDelay2 = MacCtx.NvmCtx->MacParamsDefaults.ReceiveDelay2;
MacCtx.NvmCtx->MacParams.JoinAcceptDelay1 = MacCtx.NvmCtx->MacParamsDefaults.JoinAcceptDelay1;
MacCtx.NvmCtx->MacParams.JoinAcceptDelay2 = MacCtx.NvmCtx->MacParamsDefaults.JoinAcceptDelay2;
MacCtx.NvmCtx->MacParams.ChannelsNbTrans = MacCtx.NvmCtx->MacParamsDefaults.ChannelsNbTrans;
InitDefaultsParams_t params;
params.Type = INIT_TYPE_BANDS;
params.NvmCtx = NULL;
RegionInitDefaults( MacCtx.NvmCtx->Region, ¶ms );
ResetMacParameters( );
MacCtx.NvmCtx->PublicNetwork = true;
MacCtx.MacPrimitives = primitives;
MacCtx.MacCallbacks = callbacks;
MacCtx.MacFlags.Value = 0;
MacCtx.MacState = LORAMAC_STOPPED;
// Reset duty cycle times
MacCtx.NvmCtx->LastTxDoneTime = 0;
MacCtx.NvmCtx->AggregatedTimeOff = 0;
// Initialize timers
TimerInit( &MacCtx.TxDelayedTimer, OnTxDelayedTimerEvent );
TimerInit( &MacCtx.RxWindowTimer1, OnRxWindow1TimerEvent );
TimerInit( &MacCtx.RxWindowTimer2, OnRxWindow2TimerEvent );
TimerInit( &MacCtx.AckTimeoutTimer, OnAckTimeoutTimerEvent );
// Store the current initialization time
MacCtx.NvmCtx->InitializationTime = SysTimeGetMcuTime( );
// Initialize Radio driver
MacCtx.RadioEvents.TxDone = OnRadioTxDone;
MacCtx.RadioEvents.RxDone = OnRadioRxDone;
MacCtx.RadioEvents.RxError = OnRadioRxError;
MacCtx.RadioEvents.TxTimeout = OnRadioTxTimeout;
MacCtx.RadioEvents.RxTimeout = OnRadioRxTimeout;
Radio.Init( &MacCtx.RadioEvents );
// Initialize the Secure Element driver
if( SecureElementInit( EventSecureElementNvmCtxChanged ) != SECURE_ELEMENT_SUCCESS )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
// Initialize Crypto module
if( LoRaMacCryptoInit( EventCryptoNvmCtxChanged ) != LORAMAC_CRYPTO_SUCCESS )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
// Initialize MAC commands module
if( LoRaMacCommandsInit( EventCommandsNvmCtxChanged ) != LORAMAC_COMMANDS_SUCCESS )
{
return LORAMAC_STATUS_MAC_COMMAD_ERROR;
}
// Set multicast downlink counter reference
if( LoRaMacCryptoSetMulticastReference( MacCtx.NvmCtx->MulticastChannelList ) != LORAMAC_CRYPTO_SUCCESS )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
// Random seed initialization
srand1( Radio.Random( ) );
Radio.SetPublicNetwork( MacCtx.NvmCtx->PublicNetwork );
Radio.Sleep( );
// Initialize class b
// Apply callback
classBCallbacks.GetTemperatureLevel = NULL;
classBCallbacks.MacProcessNotify = NULL;
if( callbacks != NULL )
{
classBCallbacks.GetTemperatureLevel = callbacks->GetTemperatureLevel;
classBCallbacks.MacProcessNotify = callbacks->MacProcessNotify;
}
// Must all be static. Don't use local references.
classBParams.MlmeIndication = &MacCtx.MlmeIndication;
classBParams.McpsIndication = &MacCtx.McpsIndication;
classBParams.MlmeConfirm = &MacCtx.MlmeConfirm;
classBParams.LoRaMacFlags = &MacCtx.MacFlags;
classBParams.LoRaMacDevAddr = &MacCtx.NvmCtx->DevAddr;
classBParams.LoRaMacRegion = &MacCtx.NvmCtx->Region;
classBParams.LoRaMacParams = &MacCtx.NvmCtx->MacParams;
classBParams.MulticastChannels = &MacCtx.NvmCtx->MulticastChannelList[0];
LoRaMacClassBInit( &classBParams, &classBCallbacks, &EventClassBNvmCtxChanged );
LoRaMacEnableRequests( LORAMAC_REQUEST_HANDLING_ON );
return LORAMAC_STATUS_OK;
}
LoRaMacStatus_t LoRaMacStart( void )
{
MacCtx.MacState = LORAMAC_IDLE;
return LORAMAC_STATUS_OK;
}
LoRaMacStatus_t LoRaMacStop( void )
{
if( LoRaMacIsBusy( ) == false )
{
MacCtx.MacState = LORAMAC_STOPPED;
return LORAMAC_STATUS_OK;
}
else if( MacCtx.MacState == LORAMAC_STOPPED )
{
return LORAMAC_STATUS_OK;
}
return LORAMAC_STATUS_BUSY;
}
LoRaMacStatus_t LoRaMacQueryNextTxDelay( int8_t datarate, TimerTime_t* time )
{
NextChanParams_t nextChan;
uint8_t channel = 0;
CalcNextAdrParams_t adrNext;
uint32_t adrAckCounter = MacCtx.NvmCtx->AdrAckCounter;
int8_t txPower = MacCtx.NvmCtx->MacParamsDefaults.ChannelsTxPower;
if( time == NULL )
{
return LORAMAC_STATUS_PARAMETER_INVALID;
}
if( MacCtx.NvmCtx->LastTxDoneTime == 0 )
{
*time = 0;
return LORAMAC_STATUS_OK;
}
// Update back-off
CalculateBackOff( MacCtx.NvmCtx->LastTxChannel );
nextChan.AggrTimeOff = MacCtx.NvmCtx->AggregatedTimeOff;
nextChan.Datarate = datarate;
nextChan.DutyCycleEnabled = MacCtx.NvmCtx->DutyCycleOn;
nextChan.QueryNextTxDelayOnly = true;
nextChan.Joined = true;
nextChan.LastAggrTx = MacCtx.NvmCtx->LastTxDoneTime;
if( MacCtx.NvmCtx->NetworkActivation == ACTIVATION_TYPE_NONE )
{
nextChan.Joined = false;
}
if( MacCtx.NvmCtx->AdrCtrlOn == true )
{
// Setup ADR request
adrNext.UpdateChanMask = false;
adrNext.AdrEnabled = MacCtx.NvmCtx->AdrCtrlOn;
adrNext.AdrAckCounter = MacCtx.NvmCtx->AdrAckCounter;
adrNext.AdrAckLimit = MacCtx.AdrAckLimit;
adrNext.AdrAckDelay = MacCtx.AdrAckDelay;
adrNext.Datarate = MacCtx.NvmCtx->MacParams.ChannelsDatarate;
adrNext.TxPower = MacCtx.NvmCtx->MacParams.ChannelsTxPower;
adrNext.UplinkDwellTime = MacCtx.NvmCtx->MacParams.UplinkDwellTime;
adrNext.Region = MacCtx.NvmCtx->Region;
// We call the function for information purposes only. We don't want to
// apply the datarate, the tx power and the ADR ack counter.
LoRaMacAdrCalcNext( &adrNext, &nextChan.Datarate, &txPower, &adrAckCounter );
}
// Select channel
return RegionNextChannel( MacCtx.NvmCtx->Region, &nextChan, &channel, time, &MacCtx.NvmCtx->AggregatedTimeOff );
}
LoRaMacStatus_t LoRaMacQueryTxPossible( uint8_t size, LoRaMacTxInfo_t* txInfo )
{
CalcNextAdrParams_t adrNext;
uint32_t adrAckCounter = MacCtx.NvmCtx->AdrAckCounter;
int8_t datarate = MacCtx.NvmCtx->MacParamsDefaults.ChannelsDatarate;
int8_t txPower = MacCtx.NvmCtx->MacParamsDefaults.ChannelsTxPower;
size_t macCmdsSize = 0;
if( txInfo == NULL )
{
return LORAMAC_STATUS_PARAMETER_INVALID;
}
// Setup ADR request
adrNext.Version = MacCtx.NvmCtx->Version;
adrNext.UpdateChanMask = false;
adrNext.AdrEnabled = MacCtx.NvmCtx->AdrCtrlOn;
adrNext.AdrAckCounter = MacCtx.NvmCtx->AdrAckCounter;
adrNext.AdrAckLimit = MacCtx.AdrAckLimit;
adrNext.AdrAckDelay = MacCtx.AdrAckDelay;
adrNext.Datarate = MacCtx.NvmCtx->MacParams.ChannelsDatarate;
adrNext.TxPower = MacCtx.NvmCtx->MacParams.ChannelsTxPower;
adrNext.UplinkDwellTime = MacCtx.NvmCtx->MacParams.UplinkDwellTime;
adrNext.Region = MacCtx.NvmCtx->Region;
// We call the function for information purposes only. We don't want to
// apply the datarate, the tx power and the ADR ack counter.
LoRaMacAdrCalcNext( &adrNext, &datarate, &txPower, &adrAckCounter );
txInfo->CurrentPossiblePayloadSize = GetMaxAppPayloadWithoutFOptsLength( datarate );
if( LoRaMacCommandsGetSizeSerializedCmds( &macCmdsSize ) != LORAMAC_COMMANDS_SUCCESS )
{
return LORAMAC_STATUS_MAC_COMMAD_ERROR;
}
// Verify if the MAC commands fit into the FOpts and into the maximum payload.
if( ( LORA_MAC_COMMAND_MAX_FOPTS_LENGTH >= macCmdsSize ) && ( txInfo->CurrentPossiblePayloadSize >= macCmdsSize ) )
{
txInfo->MaxPossibleApplicationDataSize = txInfo->CurrentPossiblePayloadSize - macCmdsSize;
// Verify if the application data together with MAC command fit into the maximum payload.
if( txInfo->CurrentPossiblePayloadSize >= ( macCmdsSize + size ) )
{
return LORAMAC_STATUS_OK;
}
else
{
return LORAMAC_STATUS_LENGTH_ERROR;
}
}
else
{
txInfo->MaxPossibleApplicationDataSize = 0;
return LORAMAC_STATUS_LENGTH_ERROR;
}
}
LoRaMacStatus_t LoRaMacMibGetRequestConfirm( MibRequestConfirm_t* mibGet )
{
LoRaMacStatus_t status = LORAMAC_STATUS_OK;
GetPhyParams_t getPhy;
PhyParam_t phyParam;
if( mibGet == NULL )
{
return LORAMAC_STATUS_PARAMETER_INVALID;
}
switch( mibGet->Type )
{
case MIB_DEVICE_CLASS:
{
mibGet->Param.Class = MacCtx.NvmCtx->DeviceClass;
break;
}
case MIB_NETWORK_ACTIVATION:
{
mibGet->Param.NetworkActivation = MacCtx.NvmCtx->NetworkActivation;
break;
}
case MIB_DEV_EUI:
{
mibGet->Param.DevEui = SecureElementGetDevEui( );
break;
}
case MIB_JOIN_EUI:
{
mibGet->Param.JoinEui = SecureElementGetJoinEui( );
break;
}
case MIB_SE_PIN:
{
mibGet->Param.JoinEui = SecureElementGetPin( );
break;
}
case MIB_ADR:
{
mibGet->Param.AdrEnable = MacCtx.NvmCtx->AdrCtrlOn;
break;
}
case MIB_NET_ID:
{
mibGet->Param.NetID = MacCtx.NvmCtx->NetID;
break;
}
case MIB_DEV_ADDR:
{
mibGet->Param.DevAddr = MacCtx.NvmCtx->DevAddr;
break;
}
case MIB_PUBLIC_NETWORK:
{
mibGet->Param.EnablePublicNetwork = MacCtx.NvmCtx->PublicNetwork;
break;
}
case MIB_CHANNELS:
{
getPhy.Attribute = PHY_CHANNELS;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
mibGet->Param.ChannelList = phyParam.Channels;
break;
}
case MIB_RX2_CHANNEL:
{
mibGet->Param.Rx2Channel = MacCtx.NvmCtx->MacParams.Rx2Channel;
break;
}
case MIB_RX2_DEFAULT_CHANNEL:
{
mibGet->Param.Rx2Channel = MacCtx.NvmCtx->MacParamsDefaults.Rx2Channel;
break;
}
case MIB_RXC_CHANNEL:
{
mibGet->Param.RxCChannel = MacCtx.NvmCtx->MacParams.RxCChannel;
break;
}
case MIB_RXC_DEFAULT_CHANNEL:
{
mibGet->Param.RxCChannel = MacCtx.NvmCtx->MacParamsDefaults.RxCChannel;
break;
}
case MIB_CHANNELS_DEFAULT_MASK:
{
getPhy.Attribute = PHY_CHANNELS_DEFAULT_MASK;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
mibGet->Param.ChannelsDefaultMask = phyParam.ChannelsMask;
break;
}
case MIB_CHANNELS_MASK:
{
getPhy.Attribute = PHY_CHANNELS_MASK;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
mibGet->Param.ChannelsMask = phyParam.ChannelsMask;
break;
}
case MIB_CHANNELS_NB_TRANS:
{
mibGet->Param.ChannelsNbTrans = MacCtx.NvmCtx->MacParams.ChannelsNbTrans;
break;
}
case MIB_MAX_RX_WINDOW_DURATION:
{
mibGet->Param.MaxRxWindow = MacCtx.NvmCtx->MacParams.MaxRxWindow;
break;
}
case MIB_RECEIVE_DELAY_1:
{
mibGet->Param.ReceiveDelay1 = MacCtx.NvmCtx->MacParams.ReceiveDelay1;
break;
}
case MIB_RECEIVE_DELAY_2:
{
mibGet->Param.ReceiveDelay2 = MacCtx.NvmCtx->MacParams.ReceiveDelay2;
break;
}
case MIB_JOIN_ACCEPT_DELAY_1:
{
mibGet->Param.JoinAcceptDelay1 = MacCtx.NvmCtx->MacParams.JoinAcceptDelay1;
break;
}
case MIB_JOIN_ACCEPT_DELAY_2:
{
mibGet->Param.JoinAcceptDelay2 = MacCtx.NvmCtx->MacParams.JoinAcceptDelay2;
break;
}
case MIB_CHANNELS_DEFAULT_DATARATE:
{
mibGet->Param.ChannelsDefaultDatarate = MacCtx.NvmCtx->MacParamsDefaults.ChannelsDatarate;
break;
}
case MIB_CHANNELS_DATARATE:
{
mibGet->Param.ChannelsDatarate = MacCtx.NvmCtx->MacParams.ChannelsDatarate;
break;
}
case MIB_CHANNELS_DEFAULT_TX_POWER:
{
mibGet->Param.ChannelsDefaultTxPower = MacCtx.NvmCtx->MacParamsDefaults.ChannelsTxPower;
break;
}
case MIB_CHANNELS_TX_POWER:
{
mibGet->Param.ChannelsTxPower = MacCtx.NvmCtx->MacParams.ChannelsTxPower;
break;
}
case MIB_SYSTEM_MAX_RX_ERROR:
{
mibGet->Param.SystemMaxRxError = MacCtx.NvmCtx->MacParams.SystemMaxRxError;
break;
}
case MIB_MIN_RX_SYMBOLS:
{
mibGet->Param.MinRxSymbols = MacCtx.NvmCtx->MacParams.MinRxSymbols;
break;
}
case MIB_ANTENNA_GAIN:
{
mibGet->Param.AntennaGain = MacCtx.NvmCtx->MacParams.AntennaGain;
break;
}
case MIB_NVM_CTXS:
{
mibGet->Param.Contexts = GetCtxs( );
break;
}
case MIB_DEFAULT_ANTENNA_GAIN:
{
mibGet->Param.DefaultAntennaGain = MacCtx.NvmCtx->MacParamsDefaults.AntennaGain;
break;
}
case MIB_LORAWAN_VERSION:
{
mibGet->Param.LrWanVersion.LoRaWan = MacCtx.NvmCtx->Version;
mibGet->Param.LrWanVersion.LoRaWanRegion = RegionGetVersion( );
break;
}
default:
{
status = LoRaMacClassBMibGetRequestConfirm( mibGet );
break;
}
}
return status;
}
LoRaMacStatus_t LoRaMacMibSetRequestConfirm( MibRequestConfirm_t* mibSet )
{
LoRaMacStatus_t status = LORAMAC_STATUS_OK;
ChanMaskSetParams_t chanMaskSet;
VerifyParams_t verify;
if( mibSet == NULL )
{
return LORAMAC_STATUS_PARAMETER_INVALID;
}
if( ( MacCtx.MacState & LORAMAC_TX_RUNNING ) == LORAMAC_TX_RUNNING )
{
return LORAMAC_STATUS_BUSY;
}
switch( mibSet->Type )
{
case MIB_DEVICE_CLASS:
{
status = SwitchClass( mibSet->Param.Class );
break;
}
case MIB_NETWORK_ACTIVATION:
{
if( mibSet->Param.NetworkActivation != ACTIVATION_TYPE_OTAA )
{
MacCtx.NvmCtx->NetworkActivation = mibSet->Param.NetworkActivation;
}
else
{ // Do not allow to set ACTIVATION_TYPE_OTAA since the MAC will set it automatically after a successful join process.
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_DEV_EUI:
{
if( SecureElementSetDevEui( mibSet->Param.DevEui ) != SECURE_ELEMENT_SUCCESS )
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_JOIN_EUI:
{
if( SecureElementSetJoinEui( mibSet->Param.JoinEui ) != SECURE_ELEMENT_SUCCESS )
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_SE_PIN:
{
if( SecureElementSetPin( mibSet->Param.SePin ) != SECURE_ELEMENT_SUCCESS )
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_ADR:
{
MacCtx.NvmCtx->AdrCtrlOn = mibSet->Param.AdrEnable;
break;
}
case MIB_NET_ID:
{
MacCtx.NvmCtx->NetID = mibSet->Param.NetID;
break;
}
case MIB_DEV_ADDR:
{
MacCtx.NvmCtx->DevAddr = mibSet->Param.DevAddr;
break;
}
case MIB_APP_KEY:
{
if( mibSet->Param.AppKey != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( APP_KEY, mibSet->Param.AppKey ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_NWK_KEY:
{
if( mibSet->Param.NwkKey != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( NWK_KEY, mibSet->Param.NwkKey ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_J_S_INT_KEY:
{
if( mibSet->Param.JSIntKey != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( J_S_INT_KEY, mibSet->Param.JSIntKey ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_J_S_ENC_KEY:
{
if( mibSet->Param.JSEncKey != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( J_S_ENC_KEY, mibSet->Param.JSEncKey ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_F_NWK_S_INT_KEY:
{
if( mibSet->Param.FNwkSIntKey != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( F_NWK_S_INT_KEY, mibSet->Param.FNwkSIntKey ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_S_NWK_S_INT_KEY:
{
if( mibSet->Param.SNwkSIntKey != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( S_NWK_S_INT_KEY, mibSet->Param.SNwkSIntKey ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_NWK_S_ENC_KEY:
{
if( mibSet->Param.NwkSEncKey != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( NWK_S_ENC_KEY, mibSet->Param.NwkSEncKey ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_APP_S_KEY:
{
if( mibSet->Param.AppSKey != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( APP_S_KEY, mibSet->Param.AppSKey ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_MC_KE_KEY:
{
if( mibSet->Param.McKEKey != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( MC_KE_KEY, mibSet->Param.McKEKey ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_MC_KEY_0:
{
if( mibSet->Param.McKey0 != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( MC_KEY_0, mibSet->Param.McKey0 ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_MC_APP_S_KEY_0:
{
if( mibSet->Param.McAppSKey0 != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( MC_APP_S_KEY_0, mibSet->Param.McAppSKey0 ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_MC_NWK_S_KEY_0:
{
if( mibSet->Param.McNwkSKey0 != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( MC_NWK_S_KEY_0, mibSet->Param.McNwkSKey0 ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_MC_KEY_1:
{
if( mibSet->Param.McKey1 != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( MC_KEY_1, mibSet->Param.McKey1 ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_MC_APP_S_KEY_1:
{
if( mibSet->Param.McAppSKey1 != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( MC_APP_S_KEY_1, mibSet->Param.McAppSKey1 ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_MC_NWK_S_KEY_1:
{
if( mibSet->Param.McNwkSKey1 != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( MC_NWK_S_KEY_1, mibSet->Param.McNwkSKey1 ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_MC_KEY_2:
{
if( mibSet->Param.McKey2 != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( MC_KEY_2, mibSet->Param.McKey2 ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_MC_APP_S_KEY_2:
{
if( mibSet->Param.McAppSKey2 != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( MC_APP_S_KEY_2, mibSet->Param.McAppSKey2 ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_MC_NWK_S_KEY_2:
{
if( mibSet->Param.McNwkSKey2 != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( MC_NWK_S_KEY_2, mibSet->Param.McNwkSKey2 ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_MC_KEY_3:
{
if( mibSet->Param.McKey3 != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( MC_KEY_3, mibSet->Param.McKey3 ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_MC_APP_S_KEY_3:
{
if( mibSet->Param.McAppSKey3 != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( MC_APP_S_KEY_3, mibSet->Param.McAppSKey3 ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_MC_NWK_S_KEY_3:
{
if( mibSet->Param.McNwkSKey3 != NULL )
{
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( MC_NWK_S_KEY_3, mibSet->Param.McNwkSKey3 ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_PUBLIC_NETWORK:
{
MacCtx.NvmCtx->PublicNetwork = mibSet->Param.EnablePublicNetwork;
Radio.SetPublicNetwork( MacCtx.NvmCtx->PublicNetwork );
break;
}
case MIB_RX2_CHANNEL:
{
verify.DatarateParams.Datarate = mibSet->Param.Rx2Channel.Datarate;
verify.DatarateParams.DownlinkDwellTime = MacCtx.NvmCtx->MacParams.DownlinkDwellTime;
if( RegionVerify( MacCtx.NvmCtx->Region, &verify, PHY_RX_DR ) == true )
{
MacCtx.NvmCtx->MacParams.Rx2Channel = mibSet->Param.Rx2Channel;
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_RX2_DEFAULT_CHANNEL:
{
verify.DatarateParams.Datarate = mibSet->Param.Rx2Channel.Datarate;
verify.DatarateParams.DownlinkDwellTime = MacCtx.NvmCtx->MacParams.DownlinkDwellTime;
if( RegionVerify( MacCtx.NvmCtx->Region, &verify, PHY_RX_DR ) == true )
{
MacCtx.NvmCtx->MacParamsDefaults.Rx2Channel = mibSet->Param.Rx2DefaultChannel;
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_RXC_CHANNEL:
{
verify.DatarateParams.Datarate = mibSet->Param.RxCChannel.Datarate;
verify.DatarateParams.DownlinkDwellTime = MacCtx.NvmCtx->MacParams.DownlinkDwellTime;
if( RegionVerify( MacCtx.NvmCtx->Region, &verify, PHY_RX_DR ) == true )
{
MacCtx.NvmCtx->MacParams.RxCChannel = mibSet->Param.RxCChannel;
if( ( MacCtx.NvmCtx->DeviceClass == CLASS_C ) && ( MacCtx.NvmCtx->NetworkActivation != ACTIVATION_TYPE_NONE ) )
{
// We can only compute the RX window parameters directly, if we are already
// in class c mode and joined. We cannot setup an RX window in case of any other
// class type.
// Set the radio into sleep mode in case we are still in RX mode
Radio.Sleep( );
OpenContinuousRxCWindow( );
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_RXC_DEFAULT_CHANNEL:
{
verify.DatarateParams.Datarate = mibSet->Param.RxCChannel.Datarate;
verify.DatarateParams.DownlinkDwellTime = MacCtx.NvmCtx->MacParams.DownlinkDwellTime;
if( RegionVerify( MacCtx.NvmCtx->Region, &verify, PHY_RX_DR ) == true )
{
MacCtx.NvmCtx->MacParamsDefaults.RxCChannel = mibSet->Param.RxCDefaultChannel;
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_CHANNELS_DEFAULT_MASK:
{
chanMaskSet.ChannelsMaskIn = mibSet->Param.ChannelsDefaultMask;
chanMaskSet.ChannelsMaskType = CHANNELS_DEFAULT_MASK;
if( RegionChanMaskSet( MacCtx.NvmCtx->Region, &chanMaskSet ) == false )
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_CHANNELS_MASK:
{
chanMaskSet.ChannelsMaskIn = mibSet->Param.ChannelsMask;
chanMaskSet.ChannelsMaskType = CHANNELS_MASK;
if( RegionChanMaskSet( MacCtx.NvmCtx->Region, &chanMaskSet ) == false )
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_CHANNELS_NB_TRANS:
{
if( ( mibSet->Param.ChannelsNbTrans >= 1 ) &&
( mibSet->Param.ChannelsNbTrans <= 15 ) )
{
MacCtx.NvmCtx->MacParams.ChannelsNbTrans = mibSet->Param.ChannelsNbTrans;
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_MAX_RX_WINDOW_DURATION:
{
MacCtx.NvmCtx->MacParams.MaxRxWindow = mibSet->Param.MaxRxWindow;
break;
}
case MIB_RECEIVE_DELAY_1:
{
MacCtx.NvmCtx->MacParams.ReceiveDelay1 = mibSet->Param.ReceiveDelay1;
break;
}
case MIB_RECEIVE_DELAY_2:
{
MacCtx.NvmCtx->MacParams.ReceiveDelay2 = mibSet->Param.ReceiveDelay2;
break;
}
case MIB_JOIN_ACCEPT_DELAY_1:
{
MacCtx.NvmCtx->MacParams.JoinAcceptDelay1 = mibSet->Param.JoinAcceptDelay1;
break;
}
case MIB_JOIN_ACCEPT_DELAY_2:
{
MacCtx.NvmCtx->MacParams.JoinAcceptDelay2 = mibSet->Param.JoinAcceptDelay2;
break;
}
case MIB_CHANNELS_DEFAULT_DATARATE:
{
verify.DatarateParams.Datarate = mibSet->Param.ChannelsDefaultDatarate;
if( RegionVerify( MacCtx.NvmCtx->Region, &verify, PHY_DEF_TX_DR ) == true )
{
MacCtx.NvmCtx->MacParamsDefaults.ChannelsDatarate = verify.DatarateParams.Datarate;
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_CHANNELS_DATARATE:
{
verify.DatarateParams.Datarate = mibSet->Param.ChannelsDatarate;
verify.DatarateParams.UplinkDwellTime = MacCtx.NvmCtx->MacParams.UplinkDwellTime;
if( RegionVerify( MacCtx.NvmCtx->Region, &verify, PHY_TX_DR ) == true )
{
MacCtx.NvmCtx->MacParams.ChannelsDatarate = verify.DatarateParams.Datarate;
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_CHANNELS_DEFAULT_TX_POWER:
{
verify.TxPower = mibSet->Param.ChannelsDefaultTxPower;
if( RegionVerify( MacCtx.NvmCtx->Region, &verify, PHY_DEF_TX_POWER ) == true )
{
MacCtx.NvmCtx->MacParamsDefaults.ChannelsTxPower = verify.TxPower;
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_CHANNELS_TX_POWER:
{
verify.TxPower = mibSet->Param.ChannelsTxPower;
if( RegionVerify( MacCtx.NvmCtx->Region, &verify, PHY_TX_POWER ) == true )
{
MacCtx.NvmCtx->MacParams.ChannelsTxPower = verify.TxPower;
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_SYSTEM_MAX_RX_ERROR:
{
MacCtx.NvmCtx->MacParams.SystemMaxRxError = MacCtx.NvmCtx->MacParamsDefaults.SystemMaxRxError = mibSet->Param.SystemMaxRxError;
break;
}
case MIB_MIN_RX_SYMBOLS:
{
MacCtx.NvmCtx->MacParams.MinRxSymbols = MacCtx.NvmCtx->MacParamsDefaults.MinRxSymbols = mibSet->Param.MinRxSymbols;
break;
}
case MIB_ANTENNA_GAIN:
{
MacCtx.NvmCtx->MacParams.AntennaGain = mibSet->Param.AntennaGain;
break;
}
case MIB_DEFAULT_ANTENNA_GAIN:
{
MacCtx.NvmCtx->MacParamsDefaults.AntennaGain = mibSet->Param.DefaultAntennaGain;
break;
}
case MIB_NVM_CTXS:
{
if( mibSet->Param.Contexts != 0 )
{
status = RestoreCtxs( mibSet->Param.Contexts );
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
case MIB_ABP_LORAWAN_VERSION:
{
if( mibSet->Param.AbpLrWanVersion.Fields.Minor <= 1 )
{
MacCtx.NvmCtx->Version = mibSet->Param.AbpLrWanVersion;
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetLrWanVersion( mibSet->Param.AbpLrWanVersion ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
status = LORAMAC_STATUS_PARAMETER_INVALID;
}
break;
}
default:
{
status = LoRaMacMibClassBSetRequestConfirm( mibSet );
break;
}
}
EventRegionNvmCtxChanged( );
EventMacNvmCtxChanged( );
return status;
}
LoRaMacStatus_t LoRaMacChannelAdd( uint8_t id, ChannelParams_t params )
{
ChannelAddParams_t channelAdd;
// Validate if the MAC is in a correct state
if( ( MacCtx.MacState & LORAMAC_TX_RUNNING ) == LORAMAC_TX_RUNNING )
{
if( ( MacCtx.MacState & LORAMAC_TX_CONFIG ) != LORAMAC_TX_CONFIG )
{
return LORAMAC_STATUS_BUSY;
}
}
channelAdd.NewChannel = ¶ms;
channelAdd.ChannelId = id;
EventRegionNvmCtxChanged( );
return RegionChannelAdd( MacCtx.NvmCtx->Region, &channelAdd );
}
LoRaMacStatus_t LoRaMacChannelRemove( uint8_t id )
{
ChannelRemoveParams_t channelRemove;
if( ( MacCtx.MacState & LORAMAC_TX_RUNNING ) == LORAMAC_TX_RUNNING )
{
if( ( MacCtx.MacState & LORAMAC_TX_CONFIG ) != LORAMAC_TX_CONFIG )
{
return LORAMAC_STATUS_BUSY;
}
}
channelRemove.ChannelId = id;
if( RegionChannelsRemove( MacCtx.NvmCtx->Region, &channelRemove ) == false )
{
return LORAMAC_STATUS_PARAMETER_INVALID;
}
EventRegionNvmCtxChanged( );
return LORAMAC_STATUS_OK;
}
LoRaMacStatus_t LoRaMacMcChannelSetup( McChannelParams_t *channel )
{
if( ( MacCtx.MacState & LORAMAC_TX_RUNNING ) == LORAMAC_TX_RUNNING )
{
return LORAMAC_STATUS_BUSY;
}
if( channel->GroupID >= LORAMAC_MAX_MC_CTX )
{
return LORAMAC_STATUS_MC_GROUP_UNDEFINED;
}
MacCtx.NvmCtx->MulticastChannelList[channel->GroupID].ChannelParams = *channel;
if( channel->IsRemotelySetup == true )
{
const KeyIdentifier_t mcKeys[LORAMAC_MAX_MC_CTX] = { MC_KEY_0, MC_KEY_1, MC_KEY_2, MC_KEY_3 };
if( LoRaMacCryptoSetKey( mcKeys[channel->GroupID], channel->McKeys.McKeyE ) != LORAMAC_CRYPTO_SUCCESS )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
if( LoRaMacCryptoDeriveMcSessionKeyPair( channel->GroupID, channel->Address ) != LORAMAC_CRYPTO_SUCCESS )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
else
{
const KeyIdentifier_t mcAppSKeys[LORAMAC_MAX_MC_CTX] = { MC_APP_S_KEY_0, MC_APP_S_KEY_1, MC_APP_S_KEY_2, MC_APP_S_KEY_3 };
const KeyIdentifier_t mcNwkSKeys[LORAMAC_MAX_MC_CTX] = { MC_NWK_S_KEY_0, MC_NWK_S_KEY_1, MC_NWK_S_KEY_2, MC_NWK_S_KEY_3 };
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( mcAppSKeys[channel->GroupID], channel->McKeys.Session.McAppSKey ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
if( LORAMAC_CRYPTO_SUCCESS != LoRaMacCryptoSetKey( mcNwkSKeys[channel->GroupID], channel->McKeys.Session.McNwkSKey ) )
{
return LORAMAC_STATUS_CRYPTO_ERROR;
}
}
if( channel->Class == CLASS_B )
{
// Calculate class b parameters
LoRaMacClassBSetMulticastPeriodicity( &MacCtx.NvmCtx->MulticastChannelList[channel->GroupID] );
}
// Reset multicast channel downlink counter to initial value.
*MacCtx.NvmCtx->MulticastChannelList[channel->GroupID].DownLinkCounter = FCNT_DOWN_INITAL_VALUE;
EventMacNvmCtxChanged( );
EventRegionNvmCtxChanged( );
return LORAMAC_STATUS_OK;
}
LoRaMacStatus_t LoRaMacMcChannelDelete( AddressIdentifier_t groupID )
{
if( ( MacCtx.MacState & LORAMAC_TX_RUNNING ) == LORAMAC_TX_RUNNING )
{
return LORAMAC_STATUS_BUSY;
}
if( ( groupID >= LORAMAC_MAX_MC_CTX ) ||
( MacCtx.NvmCtx->MulticastChannelList[groupID].ChannelParams.IsEnabled == false ) )
{
return LORAMAC_STATUS_MC_GROUP_UNDEFINED;
}
McChannelParams_t channel;
// Set all channel fields with 0
memset1( ( uint8_t* )&channel, 0, sizeof( McChannelParams_t ) );
MacCtx.NvmCtx->MulticastChannelList[groupID].ChannelParams = channel;
EventMacNvmCtxChanged( );
EventRegionNvmCtxChanged( );
return LORAMAC_STATUS_OK;
}
uint8_t LoRaMacMcChannelGetGroupId( uint32_t mcAddress )
{
for( uint8_t i = 0; i < LORAMAC_MAX_MC_CTX; i++ )
{
if( mcAddress == MacCtx.NvmCtx->MulticastChannelList[i].ChannelParams.Address )
{
return i;
}
}
return 0xFF;
}
LoRaMacStatus_t LoRaMacMcChannelSetupRxParams( AddressIdentifier_t groupID, McRxParams_t *rxParams, uint8_t *status )
{
*status = 0x1C + ( groupID & 0x03 );
if( ( MacCtx.MacState & LORAMAC_TX_RUNNING ) == LORAMAC_TX_RUNNING )
{
return LORAMAC_STATUS_BUSY;
}
DeviceClass_t devClass = MacCtx.NvmCtx->MulticastChannelList[groupID].ChannelParams.Class;
if( ( devClass == CLASS_A ) || ( devClass > CLASS_C ) )
{
return LORAMAC_STATUS_PARAMETER_INVALID;
}
if( ( groupID >= LORAMAC_MAX_MC_CTX ) ||
( MacCtx.NvmCtx->MulticastChannelList[groupID].ChannelParams.IsEnabled == false ) )
{
return LORAMAC_STATUS_MC_GROUP_UNDEFINED;
}
*status &= 0x0F; // groupID OK
VerifyParams_t verify;
// Check datarate
if( devClass == CLASS_B )
{
verify.DatarateParams.Datarate = rxParams->ClassB.Datarate;
}
else
{
verify.DatarateParams.Datarate = rxParams->ClassC.Datarate;
}
verify.DatarateParams.DownlinkDwellTime = MacCtx.NvmCtx->MacParams.DownlinkDwellTime;
if( RegionVerify( MacCtx.NvmCtx->Region, &verify, PHY_RX_DR ) == true )
{
*status &= 0xFB; // datarate OK
}
// Check frequency
if( devClass == CLASS_B )
{
verify.Frequency = rxParams->ClassB.Frequency;
}
else
{
verify.Frequency = rxParams->ClassC.Frequency;
}
if( RegionVerify( MacCtx.NvmCtx->Region, &verify, PHY_FREQUENCY ) == true )
{
*status &= 0xF7; // frequency OK
}
if( *status == ( groupID & 0x03 ) )
{
// Apply parameters
MacCtx.NvmCtx->MulticastChannelList[groupID].ChannelParams.RxParams = *rxParams;
}
EventMacNvmCtxChanged( );
EventRegionNvmCtxChanged( );
return LORAMAC_STATUS_OK;
}
LoRaMacStatus_t LoRaMacMlmeRequest( MlmeReq_t* mlmeRequest )
{
LoRaMacStatus_t status = LORAMAC_STATUS_SERVICE_UNKNOWN;
MlmeConfirmQueue_t queueElement;
uint8_t macCmdPayload[2] = { 0x00, 0x00 };
if( mlmeRequest == NULL )
{
return LORAMAC_STATUS_PARAMETER_INVALID;
}
if( LoRaMacIsBusy( ) == true )
{
return LORAMAC_STATUS_BUSY;
}
if( LoRaMacConfirmQueueIsFull( ) == true )
{
return LORAMAC_STATUS_BUSY;
}
if( LoRaMacConfirmQueueGetCnt( ) == 0 )
{
memset1( ( uint8_t* ) &MacCtx.MlmeConfirm, 0, sizeof( MacCtx.MlmeConfirm ) );
}
MacCtx.MlmeConfirm.Status = LORAMAC_EVENT_INFO_STATUS_ERROR;
MacCtx.MacFlags.Bits.MlmeReq = 1;
queueElement.Request = mlmeRequest->Type;
queueElement.Status = LORAMAC_EVENT_INFO_STATUS_ERROR;
queueElement.RestrictCommonReadyToHandle = false;
switch( mlmeRequest->Type )
{
case MLME_JOIN:
{
if( ( MacCtx.MacState & LORAMAC_TX_DELAYED ) == LORAMAC_TX_DELAYED )
{
return LORAMAC_STATUS_BUSY;
}
ResetMacParameters( );
MacCtx.NvmCtx->MacParams.ChannelsDatarate = RegionAlternateDr( MacCtx.NvmCtx->Region, mlmeRequest->Req.Join.Datarate, ALTERNATE_DR );
queueElement.Status = LORAMAC_EVENT_INFO_STATUS_JOIN_FAIL;
status = SendReJoinReq( JOIN_REQ );
if( status != LORAMAC_STATUS_OK )
{
// Revert back the previous datarate ( mainly used for US915 like regions )
MacCtx.NvmCtx->MacParams.ChannelsDatarate = RegionAlternateDr( MacCtx.NvmCtx->Region, mlmeRequest->Req.Join.Datarate, ALTERNATE_DR_RESTORE );
}
break;
}
case MLME_LINK_CHECK:
{
// LoRaMac will send this command piggy-pack
status = LORAMAC_STATUS_OK;
if( LoRaMacCommandsAddCmd( MOTE_MAC_LINK_CHECK_REQ, macCmdPayload, 0 ) != LORAMAC_COMMANDS_SUCCESS )
{
status = LORAMAC_STATUS_MAC_COMMAD_ERROR;
}
break;
}
case MLME_TXCW:
{
status = SetTxContinuousWave( mlmeRequest->Req.TxCw.Timeout );
break;
}
case MLME_TXCW_1:
{
status = SetTxContinuousWave1( mlmeRequest->Req.TxCw.Timeout, mlmeRequest->Req.TxCw.Frequency, mlmeRequest->Req.TxCw.Power );
break;
}
case MLME_DEVICE_TIME:
{
// LoRaMac will send this command piggy-pack
status = LORAMAC_STATUS_OK;
if( LoRaMacCommandsAddCmd( MOTE_MAC_DEVICE_TIME_REQ, macCmdPayload, 0 ) != LORAMAC_COMMANDS_SUCCESS )
{
status = LORAMAC_STATUS_MAC_COMMAD_ERROR;
}
break;
}
case MLME_PING_SLOT_INFO:
{
if( MacCtx.NvmCtx->DeviceClass == CLASS_A )
{
uint8_t value = mlmeRequest->Req.PingSlotInfo.PingSlot.Value;
// LoRaMac will send this command piggy-pack
LoRaMacClassBSetPingSlotInfo( mlmeRequest->Req.PingSlotInfo.PingSlot.Fields.Periodicity );
macCmdPayload[0] = value;
status = LORAMAC_STATUS_OK;
if( LoRaMacCommandsAddCmd( MOTE_MAC_PING_SLOT_INFO_REQ, macCmdPayload, 1 ) != LORAMAC_COMMANDS_SUCCESS )
{
status = LORAMAC_STATUS_MAC_COMMAD_ERROR;
}
}
break;
}
case MLME_BEACON_TIMING:
{
// LoRaMac will send this command piggy-pack
status = LORAMAC_STATUS_OK;
if( LoRaMacCommandsAddCmd( MOTE_MAC_BEACON_TIMING_REQ, macCmdPayload, 0 ) != LORAMAC_COMMANDS_SUCCESS )
{
status = LORAMAC_STATUS_MAC_COMMAD_ERROR;
}
break;
}
case MLME_BEACON_ACQUISITION:
{
// Apply the request
queueElement.RestrictCommonReadyToHandle = true;
if( LoRaMacClassBIsAcquisitionInProgress( ) == false )
{
// Start class B algorithm
LoRaMacClassBSetBeaconState( BEACON_STATE_ACQUISITION );
LoRaMacClassBBeaconTimerEvent( NULL );
status = LORAMAC_STATUS_OK;
}
else
{
status = LORAMAC_STATUS_BUSY;
}
break;
}
default:
break;
}
if( status != LORAMAC_STATUS_OK )
{
if( LoRaMacConfirmQueueGetCnt( ) == 0 )
{
MacCtx.NodeAckRequested = false;
MacCtx.MacFlags.Bits.MlmeReq = 0;
}
}
else
{
LoRaMacConfirmQueueAdd( &queueElement );
EventMacNvmCtxChanged( );
}
return status;
}
LoRaMacStatus_t LoRaMacMcpsRequest( McpsReq_t* mcpsRequest )
{
GetPhyParams_t getPhy;
PhyParam_t phyParam;
LoRaMacStatus_t status = LORAMAC_STATUS_SERVICE_UNKNOWN;
LoRaMacHeader_t macHdr;
VerifyParams_t verify;
uint8_t fPort = 0;
void* fBuffer;
uint16_t fBufferSize;
int8_t datarate = DR_0;
bool readyToSend = false;
if( mcpsRequest == NULL )
{
return LORAMAC_STATUS_PARAMETER_INVALID;
}
if( LoRaMacIsBusy( ) == true )
{
return LORAMAC_STATUS_BUSY;
}
macHdr.Value = 0;
memset1( ( uint8_t* ) &MacCtx.McpsConfirm, 0, sizeof( MacCtx.McpsConfirm ) );
MacCtx.McpsConfirm.Status = LORAMAC_EVENT_INFO_STATUS_ERROR;
// AckTimeoutRetriesCounter must be reset every time a new request (unconfirmed or confirmed) is performed.
MacCtx.AckTimeoutRetriesCounter = 1;
switch( mcpsRequest->Type )
{
case MCPS_UNCONFIRMED:
{
readyToSend = true;
MacCtx.AckTimeoutRetries = 1;
macHdr.Bits.MType = FRAME_TYPE_DATA_UNCONFIRMED_UP;
fPort = mcpsRequest->Req.Unconfirmed.fPort;
fBuffer = mcpsRequest->Req.Unconfirmed.fBuffer;
fBufferSize = mcpsRequest->Req.Unconfirmed.fBufferSize;
datarate = mcpsRequest->Req.Unconfirmed.Datarate;
break;
}
case MCPS_CONFIRMED:
{
readyToSend = true;
MacCtx.AckTimeoutRetries = MIN( mcpsRequest->Req.Confirmed.NbTrials, MAX_ACK_RETRIES );
macHdr.Bits.MType = FRAME_TYPE_DATA_CONFIRMED_UP;
fPort = mcpsRequest->Req.Confirmed.fPort;
fBuffer = mcpsRequest->Req.Confirmed.fBuffer;
fBufferSize = mcpsRequest->Req.Confirmed.fBufferSize;
datarate = mcpsRequest->Req.Confirmed.Datarate;
break;
}
case MCPS_PROPRIETARY:
{
readyToSend = true;
MacCtx.AckTimeoutRetries = 1;
macHdr.Bits.MType = FRAME_TYPE_PROPRIETARY;
fBuffer = mcpsRequest->Req.Proprietary.fBuffer;
fBufferSize = mcpsRequest->Req.Proprietary.fBufferSize;
datarate = mcpsRequest->Req.Proprietary.Datarate;
break;
}
default:
break;
}
// Get the minimum possible datarate
getPhy.Attribute = PHY_MIN_TX_DR;
getPhy.UplinkDwellTime = MacCtx.NvmCtx->MacParams.UplinkDwellTime;
phyParam = RegionGetPhyParam( MacCtx.NvmCtx->Region, &getPhy );
// Apply the minimum possible datarate.
// Some regions have limitations for the minimum datarate.
datarate = MAX( datarate, ( int8_t )phyParam.Value );
if( readyToSend == true )
{
if( MacCtx.NvmCtx->AdrCtrlOn == false )
{
verify.DatarateParams.Datarate = datarate;
verify.DatarateParams.UplinkDwellTime = MacCtx.NvmCtx->MacParams.UplinkDwellTime;
if( RegionVerify( MacCtx.NvmCtx->Region, &verify, PHY_TX_DR ) == true )
{
MacCtx.NvmCtx->MacParams.ChannelsDatarate = verify.DatarateParams.Datarate;
}
else
{
return LORAMAC_STATUS_PARAMETER_INVALID;
}
}
status = Send( &macHdr, fPort, fBuffer, fBufferSize );
if( status == LORAMAC_STATUS_OK )
{
MacCtx.McpsConfirm.McpsRequest = mcpsRequest->Type;
MacCtx.MacFlags.Bits.McpsReq = 1;
}
else
{
MacCtx.NodeAckRequested = false;
}
}
EventMacNvmCtxChanged( );
return status;
}
void LoRaMacTestSetDutyCycleOn( bool enable )
{
VerifyParams_t verify;
verify.DutyCycle = enable;
if( RegionVerify( MacCtx.NvmCtx->Region, &verify, PHY_DUTY_CYCLE ) == true )
{
MacCtx.NvmCtx->DutyCycleOn = enable;
}
}
LoRaMacStatus_t LoRaMacDeInitialization( void )
{
// Check the current state of the LoRaMac
if ( LoRaMacStop( ) == LORAMAC_STATUS_OK )
{
// Stop Timers
TimerStop( &MacCtx.TxDelayedTimer );
TimerStop( &MacCtx.RxWindowTimer1 );
TimerStop( &MacCtx.RxWindowTimer2 );
TimerStop( &MacCtx.AckTimeoutTimer );
// Take care about class B
LoRaMacClassBHaltBeaconing( );
// Reset Mac parameters
ResetMacParameters( );
// Switch off Radio
Radio.Sleep( );
// Return success
return LORAMAC_STATUS_OK;
}
else
{
return LORAMAC_STATUS_BUSY;
}
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_3927_0 |
crossvul-cpp_data_bad_1769_0 | /* -*- mode: c; c-file-style: "openbsd" -*- */
/*
* Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "lldpd.h"
#include "frame.h"
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <time.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
inline static int
lldpd_af_to_lldp_proto(int af)
{
switch (af) {
case LLDPD_AF_IPV4:
return LLDP_MGMT_ADDR_IP4;
case LLDPD_AF_IPV6:
return LLDP_MGMT_ADDR_IP6;
default:
return LLDP_MGMT_ADDR_NONE;
}
}
inline static int
lldpd_af_from_lldp_proto(int proto)
{
switch (proto) {
case LLDP_MGMT_ADDR_IP4:
return LLDPD_AF_IPV4;
case LLDP_MGMT_ADDR_IP6:
return LLDPD_AF_IPV6;
default:
return LLDPD_AF_UNSPEC;
}
}
static int _lldp_send(struct lldpd *global,
struct lldpd_hardware *hardware,
u_int8_t c_id_subtype,
char *c_id,
int c_id_len,
u_int8_t p_id_subtype,
char *p_id,
int p_id_len,
int shutdown)
{
struct lldpd_port *port;
struct lldpd_chassis *chassis;
struct lldpd_frame *frame;
int length;
u_int8_t *packet, *pos, *tlv;
struct lldpd_mgmt *mgmt;
int proto;
u_int8_t mcastaddr[] = LLDP_MULTICAST_ADDR;
#ifdef ENABLE_DOT1
const u_int8_t dot1[] = LLDP_TLV_ORG_DOT1;
struct lldpd_vlan *vlan;
struct lldpd_ppvid *ppvid;
struct lldpd_pi *pi;
#endif
#ifdef ENABLE_DOT3
const u_int8_t dot3[] = LLDP_TLV_ORG_DOT3;
#endif
#ifdef ENABLE_LLDPMED
int i;
const u_int8_t med[] = LLDP_TLV_ORG_MED;
#endif
#ifdef ENABLE_CUSTOM
struct lldpd_custom *custom;
#endif
port = &hardware->h_lport;
chassis = port->p_chassis;
length = hardware->h_mtu;
if ((packet = (u_int8_t*)calloc(1, length)) == NULL)
return ENOMEM;
pos = packet;
/* Ethernet header */
if (!(
/* LLDP multicast address */
POKE_BYTES(mcastaddr, sizeof(mcastaddr)) &&
/* Source MAC address */
POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) &&
/* LLDP frame */
POKE_UINT16(ETHERTYPE_LLDP)))
goto toobig;
/* Chassis ID */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_CHASSIS_ID) &&
POKE_UINT8(c_id_subtype) &&
POKE_BYTES(c_id, c_id_len) &&
POKE_END_LLDP_TLV))
goto toobig;
/* Port ID */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_PORT_ID) &&
POKE_UINT8(p_id_subtype) &&
POKE_BYTES(p_id, p_id_len) &&
POKE_END_LLDP_TLV))
goto toobig;
/* Time to live */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_TTL) &&
POKE_UINT16(shutdown?0:chassis->c_ttl) &&
POKE_END_LLDP_TLV))
goto toobig;
if (shutdown)
goto end;
/* System name */
if (chassis->c_name && *chassis->c_name != '\0') {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_NAME) &&
POKE_BYTES(chassis->c_name, strlen(chassis->c_name)) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* System description (skip it if empty) */
if (chassis->c_descr && *chassis->c_descr != '\0') {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_DESCR) &&
POKE_BYTES(chassis->c_descr, strlen(chassis->c_descr)) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* System capabilities */
if (global->g_config.c_cap_advertise && chassis->c_cap_available) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_CAP) &&
POKE_UINT16(chassis->c_cap_available) &&
POKE_UINT16(chassis->c_cap_enabled) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* Management addresses */
TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) {
proto = lldpd_af_to_lldp_proto(mgmt->m_family);
assert(proto != LLDP_MGMT_ADDR_NONE);
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_MGMT_ADDR) &&
/* Size of the address, including its type */
POKE_UINT8(mgmt->m_addrsize + 1) &&
POKE_UINT8(proto) &&
POKE_BYTES(&mgmt->m_addr, mgmt->m_addrsize)))
goto toobig;
/* Interface port type, OID */
if (mgmt->m_iface == 0) {
if (!(
/* We don't know the management interface */
POKE_UINT8(LLDP_MGMT_IFACE_UNKNOWN) &&
POKE_UINT32(0)))
goto toobig;
} else {
if (!(
/* We have the index of the management interface */
POKE_UINT8(LLDP_MGMT_IFACE_IFINDEX) &&
POKE_UINT32(mgmt->m_iface)))
goto toobig;
}
if (!(
/* We don't provide an OID for management */
POKE_UINT8(0) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* Port description */
if (port->p_descr && *port->p_descr != '\0') {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_PORT_DESCR) &&
POKE_BYTES(port->p_descr, strlen(port->p_descr)) &&
POKE_END_LLDP_TLV))
goto toobig;
}
#ifdef ENABLE_DOT1
/* Port VLAN ID */
if(port->p_pvid != 0) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot1, sizeof(dot1)) &&
POKE_UINT8(LLDP_TLV_DOT1_PVID) &&
POKE_UINT16(port->p_pvid) &&
POKE_END_LLDP_TLV)) {
goto toobig;
}
}
/* Port and Protocol VLAN IDs */
TAILQ_FOREACH(ppvid, &port->p_ppvids, p_entries) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot1, sizeof(dot1)) &&
POKE_UINT8(LLDP_TLV_DOT1_PPVID) &&
POKE_UINT8(ppvid->p_cap_status) &&
POKE_UINT16(ppvid->p_ppvid) &&
POKE_END_LLDP_TLV)) {
goto toobig;
}
}
/* VLANs */
TAILQ_FOREACH(vlan, &port->p_vlans, v_entries) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot1, sizeof(dot1)) &&
POKE_UINT8(LLDP_TLV_DOT1_VLANNAME) &&
POKE_UINT16(vlan->v_vid) &&
POKE_UINT8(strlen(vlan->v_name)) &&
POKE_BYTES(vlan->v_name, strlen(vlan->v_name)) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* Protocol Identities */
TAILQ_FOREACH(pi, &port->p_pids, p_entries) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot1, sizeof(dot1)) &&
POKE_UINT8(LLDP_TLV_DOT1_PI) &&
POKE_UINT8(pi->p_pi_len) &&
POKE_BYTES(pi->p_pi, pi->p_pi_len) &&
POKE_END_LLDP_TLV))
goto toobig;
}
#endif
#ifdef ENABLE_DOT3
/* Aggregation status */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot3, sizeof(dot3)) &&
POKE_UINT8(LLDP_TLV_DOT3_LA) &&
/* Bit 0 = capability ; Bit 1 = status */
POKE_UINT8((port->p_aggregid) ? 3:1) &&
POKE_UINT32(port->p_aggregid) &&
POKE_END_LLDP_TLV))
goto toobig;
/* MAC/PHY */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot3, sizeof(dot3)) &&
POKE_UINT8(LLDP_TLV_DOT3_MAC) &&
POKE_UINT8(port->p_macphy.autoneg_support |
(port->p_macphy.autoneg_enabled << 1)) &&
POKE_UINT16(port->p_macphy.autoneg_advertised) &&
POKE_UINT16(port->p_macphy.mau_type) &&
POKE_END_LLDP_TLV))
goto toobig;
/* MFS */
if (port->p_mfs) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot3, sizeof(dot3)) &&
POKE_UINT8(LLDP_TLV_DOT3_MFS) &&
POKE_UINT16(port->p_mfs) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* Power */
if (port->p_power.devicetype) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot3, sizeof(dot3)) &&
POKE_UINT8(LLDP_TLV_DOT3_POWER) &&
POKE_UINT8((
(((2 - port->p_power.devicetype) %(1<< 1))<<0) |
(( port->p_power.supported %(1<< 1))<<1) |
(( port->p_power.enabled %(1<< 1))<<2) |
(( port->p_power.paircontrol %(1<< 1))<<3))) &&
POKE_UINT8(port->p_power.pairs) &&
POKE_UINT8(port->p_power.class)))
goto toobig;
/* 802.3at */
if (port->p_power.powertype != LLDP_DOT3_POWER_8023AT_OFF) {
if (!(
POKE_UINT8((
(((port->p_power.powertype ==
LLDP_DOT3_POWER_8023AT_TYPE1)?1:0) << 7) |
(((port->p_power.devicetype ==
LLDP_DOT3_POWER_PSE)?0:1) << 6) |
((port->p_power.source %(1<< 2))<<4) |
((port->p_power.priority %(1<< 2))<<0))) &&
POKE_UINT16(port->p_power.requested) &&
POKE_UINT16(port->p_power.allocated)))
goto toobig;
}
if (!(POKE_END_LLDP_TLV))
goto toobig;
}
#endif
#ifdef ENABLE_LLDPMED
if (port->p_med_cap_enabled) {
/* LLDP-MED cap */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(med, sizeof(med)) &&
POKE_UINT8(LLDP_TLV_MED_CAP) &&
POKE_UINT16(chassis->c_med_cap_available) &&
POKE_UINT8(chassis->c_med_type) &&
POKE_END_LLDP_TLV))
goto toobig;
/* LLDP-MED inventory */
#define LLDP_INVENTORY(value, subtype) \
if (value) { \
if (!( \
POKE_START_LLDP_TLV(LLDP_TLV_ORG) && \
POKE_BYTES(med, sizeof(med)) && \
POKE_UINT8(subtype) && \
POKE_BYTES(value, \
(strlen(value)>32)?32:strlen(value)) && \
POKE_END_LLDP_TLV)) \
goto toobig; \
}
if (port->p_med_cap_enabled & LLDP_MED_CAP_IV) {
LLDP_INVENTORY(chassis->c_med_hw,
LLDP_TLV_MED_IV_HW);
LLDP_INVENTORY(chassis->c_med_fw,
LLDP_TLV_MED_IV_FW);
LLDP_INVENTORY(chassis->c_med_sw,
LLDP_TLV_MED_IV_SW);
LLDP_INVENTORY(chassis->c_med_sn,
LLDP_TLV_MED_IV_SN);
LLDP_INVENTORY(chassis->c_med_manuf,
LLDP_TLV_MED_IV_MANUF);
LLDP_INVENTORY(chassis->c_med_model,
LLDP_TLV_MED_IV_MODEL);
LLDP_INVENTORY(chassis->c_med_asset,
LLDP_TLV_MED_IV_ASSET);
}
/* LLDP-MED location */
for (i = 0; i < LLDP_MED_LOCFORMAT_LAST; i++) {
if (port->p_med_location[i].format == i + 1) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(med, sizeof(med)) &&
POKE_UINT8(LLDP_TLV_MED_LOCATION) &&
POKE_UINT8(port->p_med_location[i].format) &&
POKE_BYTES(port->p_med_location[i].data,
port->p_med_location[i].data_len) &&
POKE_END_LLDP_TLV))
goto toobig;
}
}
/* LLDP-MED network policy */
for (i = 0; i < LLDP_MED_APPTYPE_LAST; i++) {
if (port->p_med_policy[i].type == i + 1) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(med, sizeof(med)) &&
POKE_UINT8(LLDP_TLV_MED_POLICY) &&
POKE_UINT32((
((port->p_med_policy[i].type %(1<< 8))<<24) |
((port->p_med_policy[i].unknown %(1<< 1))<<23) |
((port->p_med_policy[i].tagged %(1<< 1))<<22) |
/*((0 %(1<< 1))<<21) |*/
((port->p_med_policy[i].vid %(1<<12))<< 9) |
((port->p_med_policy[i].priority %(1<< 3))<< 6) |
((port->p_med_policy[i].dscp %(1<< 6))<< 0) )) &&
POKE_END_LLDP_TLV))
goto toobig;
}
}
/* LLDP-MED POE-MDI */
if ((port->p_med_power.devicetype == LLDP_MED_POW_TYPE_PSE) ||
(port->p_med_power.devicetype == LLDP_MED_POW_TYPE_PD)) {
int devicetype = 0, source = 0;
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(med, sizeof(med)) &&
POKE_UINT8(LLDP_TLV_MED_MDI)))
goto toobig;
switch (port->p_med_power.devicetype) {
case LLDP_MED_POW_TYPE_PSE:
devicetype = 0;
switch (port->p_med_power.source) {
case LLDP_MED_POW_SOURCE_PRIMARY: source = 1; break;
case LLDP_MED_POW_SOURCE_BACKUP: source = 2; break;
case LLDP_MED_POW_SOURCE_RESERVED: source = 3; break;
default: source = 0; break;
}
break;
case LLDP_MED_POW_TYPE_PD:
devicetype = 1;
switch (port->p_med_power.source) {
case LLDP_MED_POW_SOURCE_PSE: source = 1; break;
case LLDP_MED_POW_SOURCE_LOCAL: source = 2; break;
case LLDP_MED_POW_SOURCE_BOTH: source = 3; break;
default: source = 0; break;
}
break;
}
if (!(
POKE_UINT8((
((devicetype %(1<< 2))<<6) |
((source %(1<< 2))<<4) |
((port->p_med_power.priority %(1<< 4))<<0) )) &&
POKE_UINT16(port->p_med_power.val) &&
POKE_END_LLDP_TLV))
goto toobig;
}
}
#endif
#ifdef ENABLE_CUSTOM
TAILQ_FOREACH(custom, &port->p_custom_list, next) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(custom->oui, sizeof(custom->oui)) &&
POKE_UINT8(custom->subtype) &&
POKE_BYTES(custom->oui_info, custom->oui_info_len) &&
POKE_END_LLDP_TLV))
goto toobig;
}
#endif
end:
/* END */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_END) &&
POKE_END_LLDP_TLV))
goto toobig;
if (interfaces_send_helper(global, hardware,
(char *)packet, pos - packet) == -1) {
log_warn("lldp", "unable to send packet on real device for %s",
hardware->h_ifname);
free(packet);
return ENETDOWN;
}
hardware->h_tx_cnt++;
/* We assume that LLDP frame is the reference */
if (!shutdown && (frame = (struct lldpd_frame*)malloc(
sizeof(int) + pos - packet)) != NULL) {
frame->size = pos - packet;
memcpy(&frame->frame, packet, frame->size);
if ((hardware->h_lport.p_lastframe == NULL) ||
(hardware->h_lport.p_lastframe->size != frame->size) ||
(memcmp(hardware->h_lport.p_lastframe->frame, frame->frame,
frame->size) != 0)) {
free(hardware->h_lport.p_lastframe);
hardware->h_lport.p_lastframe = frame;
hardware->h_lport.p_lastchange = time(NULL);
} else free(frame);
}
free(packet);
return 0;
toobig:
free(packet);
return E2BIG;
}
/* Send a shutdown LLDPDU. */
int
lldp_send_shutdown(struct lldpd *global,
struct lldpd_hardware *hardware)
{
if (hardware->h_lchassis_previous_id == NULL ||
hardware->h_lport_previous_id == NULL)
return 0;
return _lldp_send(global, hardware,
hardware->h_lchassis_previous_id_subtype,
hardware->h_lchassis_previous_id,
hardware->h_lchassis_previous_id_len,
hardware->h_lport_previous_id_subtype,
hardware->h_lport_previous_id,
hardware->h_lport_previous_id_len,
1);
}
int
lldp_send(struct lldpd *global,
struct lldpd_hardware *hardware)
{
struct lldpd_port *port = &hardware->h_lport;
struct lldpd_chassis *chassis = port->p_chassis;
int ret;
/* Check if we have a change. */
if (hardware->h_lchassis_previous_id != NULL &&
hardware->h_lport_previous_id != NULL &&
(hardware->h_lchassis_previous_id_subtype != chassis->c_id_subtype ||
hardware->h_lchassis_previous_id_len != chassis->c_id_len ||
hardware->h_lport_previous_id_subtype != port->p_id_subtype ||
hardware->h_lport_previous_id_len != port->p_id_len ||
memcmp(hardware->h_lchassis_previous_id,
chassis->c_id, chassis->c_id_len) ||
memcmp(hardware->h_lport_previous_id,
port->p_id, port->p_id_len))) {
log_info("lldp", "MSAP has changed for port %s, sending a shutdown LLDPDU",
hardware->h_ifname);
if ((ret = lldp_send_shutdown(global, hardware)) != 0)
return ret;
}
log_debug("lldp", "send LLDP PDU to %s",
hardware->h_ifname);
if ((ret = _lldp_send(global, hardware,
chassis->c_id_subtype,
chassis->c_id,
chassis->c_id_len,
port->p_id_subtype,
port->p_id,
port->p_id_len,
0)) != 0)
return ret;
/* Record current chassis and port ID */
free(hardware->h_lchassis_previous_id);
hardware->h_lchassis_previous_id_subtype = chassis->c_id_subtype;
hardware->h_lchassis_previous_id_len = chassis->c_id_len;
if ((hardware->h_lchassis_previous_id = malloc(chassis->c_id_len)) != NULL)
memcpy(hardware->h_lchassis_previous_id, chassis->c_id,
chassis->c_id_len);
free(hardware->h_lport_previous_id);
hardware->h_lport_previous_id_subtype = port->p_id_subtype;
hardware->h_lport_previous_id_len = port->p_id_len;
if ((hardware->h_lport_previous_id = malloc(port->p_id_len)) != NULL)
memcpy(hardware->h_lport_previous_id, port->p_id,
port->p_id_len);
return 0;
}
#define CHECK_TLV_SIZE(x, name) \
do { if (tlv_size < (x)) { \
log_warnx("lldp", name " TLV too short received on %s", \
hardware->h_ifname); \
goto malformed; \
} } while (0)
int
lldp_decode(struct lldpd *cfg, char *frame, int s,
struct lldpd_hardware *hardware,
struct lldpd_chassis **newchassis, struct lldpd_port **newport)
{
struct lldpd_chassis *chassis;
struct lldpd_port *port;
const char lldpaddr[] = LLDP_MULTICAST_ADDR;
const char dot1[] = LLDP_TLV_ORG_DOT1;
const char dot3[] = LLDP_TLV_ORG_DOT3;
const char med[] = LLDP_TLV_ORG_MED;
const char dcbx[] = LLDP_TLV_ORG_DCBX;
unsigned char orgid[3];
int length, gotend = 0, ttl_received = 0;
int tlv_size, tlv_type, tlv_subtype;
u_int8_t *pos, *tlv;
char *b;
#ifdef ENABLE_DOT1
struct lldpd_vlan *vlan = NULL;
int vlan_len;
struct lldpd_ppvid *ppvid;
struct lldpd_pi *pi = NULL;
#endif
struct lldpd_mgmt *mgmt;
int af;
u_int8_t addr_str_length, addr_str_buffer[32];
u_int8_t addr_family, addr_length, *addr_ptr, iface_subtype;
u_int32_t iface_number, iface;
#ifdef ENABLE_CUSTOM
struct lldpd_custom *custom = NULL;
#endif
log_debug("lldp", "receive LLDP PDU on %s",
hardware->h_ifname);
if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) {
log_warn("lldp", "failed to allocate remote chassis");
return -1;
}
TAILQ_INIT(&chassis->c_mgmt);
if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) {
log_warn("lldp", "failed to allocate remote port");
free(chassis);
return -1;
}
#ifdef ENABLE_DOT1
TAILQ_INIT(&port->p_vlans);
TAILQ_INIT(&port->p_ppvids);
TAILQ_INIT(&port->p_pids);
#endif
#ifdef ENABLE_CUSTOM
TAILQ_INIT(&port->p_custom_list);
#endif
length = s;
pos = (u_int8_t*)frame;
if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t)) {
log_warnx("lldp", "too short frame received on %s", hardware->h_ifname);
goto malformed;
}
if (PEEK_CMP(lldpaddr, ETHER_ADDR_LEN) != 0) {
log_info("lldp", "frame not targeted at LLDP multicast address received on %s",
hardware->h_ifname);
goto malformed;
}
PEEK_DISCARD(ETHER_ADDR_LEN); /* Skip source address */
if (PEEK_UINT16 != ETHERTYPE_LLDP) {
log_info("lldp", "non LLDP frame received on %s",
hardware->h_ifname);
goto malformed;
}
while (length && (!gotend)) {
if (length < 2) {
log_warnx("lldp", "tlv header too short received on %s",
hardware->h_ifname);
goto malformed;
}
tlv_size = PEEK_UINT16;
tlv_type = tlv_size >> 9;
tlv_size = tlv_size & 0x1ff;
(void)PEEK_SAVE(tlv);
if (length < tlv_size) {
log_warnx("lldp", "frame too short for tlv received on %s",
hardware->h_ifname);
goto malformed;
}
switch (tlv_type) {
case LLDP_TLV_END:
if (tlv_size != 0) {
log_warnx("lldp", "lldp end received with size not null on %s",
hardware->h_ifname);
goto malformed;
}
if (length)
log_debug("lldp", "extra data after lldp end on %s",
hardware->h_ifname);
gotend = 1;
break;
case LLDP_TLV_CHASSIS_ID:
case LLDP_TLV_PORT_ID:
CHECK_TLV_SIZE(2, "Port Id");
tlv_subtype = PEEK_UINT8;
if ((tlv_subtype == 0) || (tlv_subtype > 7)) {
log_warnx("lldp", "unknown subtype for tlv id received on %s",
hardware->h_ifname);
goto malformed;
}
if ((b = (char *)calloc(1, tlv_size - 1)) == NULL) {
log_warn("lldp", "unable to allocate memory for id tlv "
"received on %s",
hardware->h_ifname);
goto malformed;
}
PEEK_BYTES(b, tlv_size - 1);
if (tlv_type == LLDP_TLV_PORT_ID) {
port->p_id_subtype = tlv_subtype;
port->p_id = b;
port->p_id_len = tlv_size - 1;
} else {
chassis->c_id_subtype = tlv_subtype;
chassis->c_id = b;
chassis->c_id_len = tlv_size - 1;
}
break;
case LLDP_TLV_TTL:
CHECK_TLV_SIZE(2, "TTL");
chassis->c_ttl = PEEK_UINT16;
ttl_received = 1;
break;
case LLDP_TLV_PORT_DESCR:
case LLDP_TLV_SYSTEM_NAME:
case LLDP_TLV_SYSTEM_DESCR:
if (tlv_size < 1) {
log_debug("lldp", "empty tlv received on %s",
hardware->h_ifname);
break;
}
if ((b = (char *)calloc(1, tlv_size + 1)) == NULL) {
log_warn("lldp", "unable to allocate memory for string tlv "
"received on %s",
hardware->h_ifname);
goto malformed;
}
PEEK_BYTES(b, tlv_size);
if (tlv_type == LLDP_TLV_PORT_DESCR)
port->p_descr = b;
else if (tlv_type == LLDP_TLV_SYSTEM_NAME)
chassis->c_name = b;
else chassis->c_descr = b;
break;
case LLDP_TLV_SYSTEM_CAP:
CHECK_TLV_SIZE(4, "System capabilities");
chassis->c_cap_available = PEEK_UINT16;
chassis->c_cap_enabled = PEEK_UINT16;
break;
case LLDP_TLV_MGMT_ADDR:
CHECK_TLV_SIZE(1, "Management address");
addr_str_length = PEEK_UINT8;
CHECK_TLV_SIZE(1 + addr_str_length, "Management address");
PEEK_BYTES(addr_str_buffer, addr_str_length);
addr_length = addr_str_length - 1;
addr_family = addr_str_buffer[0];
addr_ptr = &addr_str_buffer[1];
CHECK_TLV_SIZE(1 + addr_str_length + 5, "Management address");
iface_subtype = PEEK_UINT8;
iface_number = PEEK_UINT32;
af = lldpd_af_from_lldp_proto(addr_family);
if (af == LLDPD_AF_UNSPEC)
break;
if (iface_subtype == LLDP_MGMT_IFACE_IFINDEX)
iface = iface_number;
else
iface = 0;
mgmt = lldpd_alloc_mgmt(af, addr_ptr, addr_length, iface);
if (mgmt == NULL) {
assert(errno == ENOMEM);
log_warn("lldp", "unable to allocate memory "
"for management address");
goto malformed;
}
TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries);
break;
case LLDP_TLV_ORG:
CHECK_TLV_SIZE(4, "Organisational");
PEEK_BYTES(orgid, sizeof(orgid));
tlv_subtype = PEEK_UINT8;
if (memcmp(dot1, orgid, sizeof(orgid)) == 0) {
#ifndef ENABLE_DOT1
hardware->h_rx_unrecognized_cnt++;
#else
/* Dot1 */
switch (tlv_subtype) {
case LLDP_TLV_DOT1_VLANNAME:
CHECK_TLV_SIZE(7, "VLAN");
if ((vlan = (struct lldpd_vlan *)calloc(1,
sizeof(struct lldpd_vlan))) == NULL) {
log_warn("lldp", "unable to alloc vlan "
"structure for "
"tlv received on %s",
hardware->h_ifname);
goto malformed;
}
vlan->v_vid = PEEK_UINT16;
vlan_len = PEEK_UINT8;
CHECK_TLV_SIZE(7 + vlan_len, "VLAN");
if ((vlan->v_name =
(char *)calloc(1, vlan_len + 1)) == NULL) {
log_warn("lldp", "unable to alloc vlan name for "
"tlv received on %s",
hardware->h_ifname);
goto malformed;
}
PEEK_BYTES(vlan->v_name, vlan_len);
TAILQ_INSERT_TAIL(&port->p_vlans,
vlan, v_entries);
vlan = NULL;
break;
case LLDP_TLV_DOT1_PVID:
CHECK_TLV_SIZE(6, "PVID");
port->p_pvid = PEEK_UINT16;
break;
case LLDP_TLV_DOT1_PPVID:
CHECK_TLV_SIZE(7, "PPVID");
/* validation needed */
/* PPVID has to be unique if more than
one PPVID TLVs are received -
discard if duplicate */
/* if support bit is not set and
enabled bit is set - PPVID TLV is
considered error and discarded */
/* if PPVID > 4096 - bad and discard */
if ((ppvid = (struct lldpd_ppvid *)calloc(1,
sizeof(struct lldpd_ppvid))) == NULL) {
log_warn("lldp", "unable to alloc ppvid "
"structure for "
"tlv received on %s",
hardware->h_ifname);
goto malformed;
}
ppvid->p_cap_status = PEEK_UINT8;
ppvid->p_ppvid = PEEK_UINT16;
TAILQ_INSERT_TAIL(&port->p_ppvids,
ppvid, p_entries);
break;
case LLDP_TLV_DOT1_PI:
/* validation needed */
/* PI has to be unique if more than
one PI TLVs are received - discard
if duplicate ?? */
CHECK_TLV_SIZE(5, "PI");
if ((pi = (struct lldpd_pi *)calloc(1,
sizeof(struct lldpd_pi))) == NULL) {
log_warn("lldp", "unable to alloc PI "
"structure for "
"tlv received on %s",
hardware->h_ifname);
goto malformed;
}
pi->p_pi_len = PEEK_UINT8;
CHECK_TLV_SIZE(5 + pi->p_pi_len, "PI");
if ((pi->p_pi =
(char *)calloc(1, pi->p_pi_len)) == NULL) {
log_warn("lldp", "unable to alloc pid name for "
"tlv received on %s",
hardware->h_ifname);
goto malformed;
}
PEEK_BYTES(pi->p_pi, pi->p_pi_len);
TAILQ_INSERT_TAIL(&port->p_pids,
pi, p_entries);
pi = NULL;
break;
default:
/* Unknown Dot1 TLV, ignore it */
hardware->h_rx_unrecognized_cnt++;
}
#endif
} else if (memcmp(dot3, orgid, sizeof(orgid)) == 0) {
#ifndef ENABLE_DOT3
hardware->h_rx_unrecognized_cnt++;
#else
/* Dot3 */
switch (tlv_subtype) {
case LLDP_TLV_DOT3_MAC:
CHECK_TLV_SIZE(9, "MAC/PHY");
port->p_macphy.autoneg_support = PEEK_UINT8;
port->p_macphy.autoneg_enabled =
(port->p_macphy.autoneg_support & 0x2) >> 1;
port->p_macphy.autoneg_support =
port->p_macphy.autoneg_support & 0x1;
port->p_macphy.autoneg_advertised =
PEEK_UINT16;
port->p_macphy.mau_type = PEEK_UINT16;
break;
case LLDP_TLV_DOT3_LA:
CHECK_TLV_SIZE(9, "Link aggregation");
PEEK_DISCARD_UINT8;
port->p_aggregid = PEEK_UINT32;
break;
case LLDP_TLV_DOT3_MFS:
CHECK_TLV_SIZE(6, "MFS");
port->p_mfs = PEEK_UINT16;
break;
case LLDP_TLV_DOT3_POWER:
CHECK_TLV_SIZE(7, "Power");
port->p_power.devicetype = PEEK_UINT8;
port->p_power.supported =
(port->p_power.devicetype & 0x2) >> 1;
port->p_power.enabled =
(port->p_power.devicetype & 0x4) >> 2;
port->p_power.paircontrol =
(port->p_power.devicetype & 0x8) >> 3;
port->p_power.devicetype =
(port->p_power.devicetype & 0x1)?
LLDP_DOT3_POWER_PSE:LLDP_DOT3_POWER_PD;
port->p_power.pairs = PEEK_UINT8;
port->p_power.class = PEEK_UINT8;
/* 802.3at? */
if (tlv_size >= 12) {
port->p_power.powertype = PEEK_UINT8;
port->p_power.source =
(port->p_power.powertype & (1<<5 | 1<<4)) >> 4;
port->p_power.priority =
(port->p_power.powertype & (1<<1 | 1<<0));
port->p_power.powertype =
(port->p_power.powertype & (1<<7))?
LLDP_DOT3_POWER_8023AT_TYPE1:
LLDP_DOT3_POWER_8023AT_TYPE2;
port->p_power.requested = PEEK_UINT16;
port->p_power.allocated = PEEK_UINT16;
} else
port->p_power.powertype =
LLDP_DOT3_POWER_8023AT_OFF;
break;
default:
/* Unknown Dot3 TLV, ignore it */
hardware->h_rx_unrecognized_cnt++;
}
#endif
} else if (memcmp(med, orgid, sizeof(orgid)) == 0) {
/* LLDP-MED */
#ifndef ENABLE_LLDPMED
hardware->h_rx_unrecognized_cnt++;
#else
u_int32_t policy;
unsigned loctype;
unsigned power;
switch (tlv_subtype) {
case LLDP_TLV_MED_CAP:
CHECK_TLV_SIZE(7, "LLDP-MED capabilities");
chassis->c_med_cap_available = PEEK_UINT16;
chassis->c_med_type = PEEK_UINT8;
port->p_med_cap_enabled |=
LLDP_MED_CAP_CAP;
break;
case LLDP_TLV_MED_POLICY:
CHECK_TLV_SIZE(8, "LLDP-MED policy");
policy = PEEK_UINT32;
if (((policy >> 24) < 1) ||
((policy >> 24) > LLDP_MED_APPTYPE_LAST)) {
log_info("lldp", "unknown policy field %d "
"received on %s",
policy,
hardware->h_ifname);
break;
}
port->p_med_policy[(policy >> 24) - 1].type =
(policy >> 24);
port->p_med_policy[(policy >> 24) - 1].unknown =
((policy & 0x800000) != 0);
port->p_med_policy[(policy >> 24) - 1].tagged =
((policy & 0x400000) != 0);
port->p_med_policy[(policy >> 24) - 1].vid =
(policy & 0x001FFE00) >> 9;
port->p_med_policy[(policy >> 24) - 1].priority =
(policy & 0x1C0) >> 6;
port->p_med_policy[(policy >> 24) - 1].dscp =
policy & 0x3F;
port->p_med_cap_enabled |=
LLDP_MED_CAP_POLICY;
break;
case LLDP_TLV_MED_LOCATION:
CHECK_TLV_SIZE(5, "LLDP-MED Location");
loctype = PEEK_UINT8;
if ((loctype < 1) ||
(loctype > LLDP_MED_LOCFORMAT_LAST)) {
log_info("lldp", "unknown location type "
"received on %s",
hardware->h_ifname);
break;
}
if ((port->p_med_location[loctype - 1].data =
(char*)malloc(tlv_size - 5)) == NULL) {
log_warn("lldp", "unable to allocate memory "
"for LLDP-MED location for "
"frame received on %s",
hardware->h_ifname);
goto malformed;
}
PEEK_BYTES(port->p_med_location[loctype - 1].data,
tlv_size - 5);
port->p_med_location[loctype - 1].data_len =
tlv_size - 5;
port->p_med_location[loctype - 1].format = loctype;
port->p_med_cap_enabled |=
LLDP_MED_CAP_LOCATION;
break;
case LLDP_TLV_MED_MDI:
CHECK_TLV_SIZE(7, "LLDP-MED PoE-MDI");
power = PEEK_UINT8;
switch (power & 0xC0) {
case 0x0:
port->p_med_power.devicetype = LLDP_MED_POW_TYPE_PSE;
port->p_med_cap_enabled |=
LLDP_MED_CAP_MDI_PSE;
switch (power & 0x30) {
case 0x0:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_UNKNOWN;
break;
case 0x10:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_PRIMARY;
break;
case 0x20:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_BACKUP;
break;
default:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_RESERVED;
}
break;
case 0x40:
port->p_med_power.devicetype = LLDP_MED_POW_TYPE_PD;
port->p_med_cap_enabled |=
LLDP_MED_CAP_MDI_PD;
switch (power & 0x30) {
case 0x0:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_UNKNOWN;
break;
case 0x10:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_PSE;
break;
case 0x20:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_LOCAL;
break;
default:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_BOTH;
}
break;
default:
port->p_med_power.devicetype =
LLDP_MED_POW_TYPE_RESERVED;
}
if ((power & 0x0F) > LLDP_MED_POW_PRIO_LOW)
port->p_med_power.priority =
LLDP_MED_POW_PRIO_UNKNOWN;
else
port->p_med_power.priority =
power & 0x0F;
port->p_med_power.val = PEEK_UINT16;
break;
case LLDP_TLV_MED_IV_HW:
case LLDP_TLV_MED_IV_SW:
case LLDP_TLV_MED_IV_FW:
case LLDP_TLV_MED_IV_SN:
case LLDP_TLV_MED_IV_MANUF:
case LLDP_TLV_MED_IV_MODEL:
case LLDP_TLV_MED_IV_ASSET:
if (tlv_size <= 4)
b = NULL;
else {
if ((b = (char*)malloc(tlv_size - 3)) ==
NULL) {
log_warn("lldp", "unable to allocate "
"memory for LLDP-MED "
"inventory for frame "
"received on %s",
hardware->h_ifname);
goto malformed;
}
PEEK_BYTES(b, tlv_size - 4);
b[tlv_size - 4] = '\0';
}
switch (tlv_subtype) {
case LLDP_TLV_MED_IV_HW:
chassis->c_med_hw = b;
break;
case LLDP_TLV_MED_IV_FW:
chassis->c_med_fw = b;
break;
case LLDP_TLV_MED_IV_SW:
chassis->c_med_sw = b;
break;
case LLDP_TLV_MED_IV_SN:
chassis->c_med_sn = b;
break;
case LLDP_TLV_MED_IV_MANUF:
chassis->c_med_manuf = b;
break;
case LLDP_TLV_MED_IV_MODEL:
chassis->c_med_model = b;
break;
case LLDP_TLV_MED_IV_ASSET:
chassis->c_med_asset = b;
break;
}
port->p_med_cap_enabled |=
LLDP_MED_CAP_IV;
break;
default:
/* Unknown LLDP MED, ignore it */
hardware->h_rx_unrecognized_cnt++;
}
#endif /* ENABLE_LLDPMED */
} else if (memcmp(dcbx, orgid, sizeof(orgid)) == 0) {
log_debug("lldp", "unsupported DCBX tlv received on %s - ignore",
hardware->h_ifname);
hardware->h_rx_unrecognized_cnt++;
} else {
log_debug("lldp", "unknown org tlv [%02x:%02x:%02x] received on %s",
orgid[0], orgid[1], orgid[2],
hardware->h_ifname);
hardware->h_rx_unrecognized_cnt++;
#ifdef ENABLE_CUSTOM
custom = (struct lldpd_custom*)calloc(1, sizeof(struct lldpd_custom));
if (!custom) {
log_warn("lldp",
"unable to allocate memory for custom TLV");
goto malformed;
}
custom->oui_info_len = tlv_size > 4 ? tlv_size - 4 : 0;
memcpy(custom->oui, orgid, sizeof(custom->oui));
custom->subtype = tlv_subtype;
if (custom->oui_info_len > 0) {
custom->oui_info = malloc(custom->oui_info_len);
if (!custom->oui_info) {
log_warn("lldp",
"unable to allocate memory for custom TLV data");
goto malformed;
}
PEEK_BYTES(custom->oui_info, custom->oui_info_len);
}
TAILQ_INSERT_TAIL(&port->p_custom_list, custom, next);
custom = NULL;
#endif
}
break;
default:
log_warnx("lldp", "unknown tlv (%d) received on %s",
tlv_type, hardware->h_ifname);
goto malformed;
}
if (pos > tlv + tlv_size) {
log_warnx("lldp", "BUG: already past TLV!");
goto malformed;
}
PEEK_DISCARD(tlv + tlv_size - pos);
}
/* Some random check */
if ((chassis->c_id == NULL) ||
(port->p_id == NULL) ||
(!ttl_received) ||
(gotend == 0)) {
log_warnx("lldp", "some mandatory tlv are missing for frame received on %s",
hardware->h_ifname);
goto malformed;
}
*newchassis = chassis;
*newport = port;
return 1;
malformed:
#ifdef ENABLE_CUSTOM
free(custom);
#endif
#ifdef ENABLE_DOT1
free(vlan);
free(pi);
#endif
lldpd_chassis_cleanup(chassis, 1);
lldpd_port_cleanup(port, 1);
free(port);
return -1;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_1769_0 |
crossvul-cpp_data_bad_4523_3 | /* NetHack 3.6 unixmain.c $NHDT-Date: 1570408210 2019/10/07 00:30:10 $ $NHDT-Branch: NetHack-3.6 $:$NHDT-Revision: 1.70 $ */
/* Copyright (c) Stichting Mathematisch Centrum, Amsterdam, 1985. */
/*-Copyright (c) Robert Patrick Rankin, 2011. */
/* NetHack may be freely redistributed. See license for details. */
/* main.c - Unix NetHack */
#include "hack.h"
#include "dlb.h"
#include <ctype.h>
#include <sys/stat.h>
#include <signal.h>
#include <pwd.h>
#ifndef O_RDONLY
#include <fcntl.h>
#endif
#if !defined(_BULL_SOURCE) && !defined(__sgi) && !defined(_M_UNIX)
#if !defined(SUNOS4) && !(defined(ULTRIX) && defined(__GNUC__))
#if defined(POSIX_TYPES) || defined(SVR4) || defined(HPUX)
extern struct passwd *FDECL(getpwuid, (uid_t));
#else
extern struct passwd *FDECL(getpwuid, (int));
#endif
#endif
#endif
extern struct passwd *FDECL(getpwnam, (const char *));
#ifdef CHDIR
static void FDECL(chdirx, (const char *, BOOLEAN_P));
#endif /* CHDIR */
static boolean NDECL(whoami);
static void FDECL(process_options, (int, char **));
#ifdef _M_UNIX
extern void NDECL(check_sco_console);
extern void NDECL(init_sco_cons);
#endif
#ifdef __linux__
extern void NDECL(check_linux_console);
extern void NDECL(init_linux_cons);
#endif
static void NDECL(wd_message);
static boolean wiz_error_flag = FALSE;
static struct passwd *NDECL(get_unix_pw);
int
main(argc, argv)
int argc;
char *argv[];
{
register int fd;
#ifdef CHDIR
register char *dir;
#endif
boolean exact_username;
boolean resuming = FALSE; /* assume new game */
boolean plsel_once = FALSE;
sys_early_init();
#if defined(__APPLE__)
{
/* special hack to change working directory to a resource fork when
running from finder --sam */
#define MAC_PATH_VALUE ".app/Contents/MacOS/"
char mac_cwd[1024], *mac_exe = argv[0], *mac_tmp;
int arg0_len = strlen(mac_exe), mac_tmp_len, mac_lhs_len = 0;
getcwd(mac_cwd, 1024);
if (mac_exe[0] == '/' && !strcmp(mac_cwd, "/")) {
if ((mac_exe = strrchr(mac_exe, '/')))
mac_exe++;
else
mac_exe = argv[0];
mac_tmp_len = (strlen(mac_exe) * 2) + strlen(MAC_PATH_VALUE);
if (mac_tmp_len <= arg0_len) {
mac_tmp = malloc(mac_tmp_len + 1);
sprintf(mac_tmp, "%s%s%s", mac_exe, MAC_PATH_VALUE, mac_exe);
if (!strcmp(argv[0] + (arg0_len - mac_tmp_len), mac_tmp)) {
mac_lhs_len =
(arg0_len - mac_tmp_len) + strlen(mac_exe) + 5;
if (mac_lhs_len > mac_tmp_len - 1)
mac_tmp = realloc(mac_tmp, mac_lhs_len);
strncpy(mac_tmp, argv[0], mac_lhs_len);
mac_tmp[mac_lhs_len] = '\0';
chdir(mac_tmp);
}
free(mac_tmp);
}
}
}
#endif
hname = argv[0];
hackpid = getpid();
(void) umask(0777 & ~FCMASK);
choose_windows(DEFAULT_WINDOW_SYS);
#ifdef CHDIR /* otherwise no chdir() */
/*
* See if we must change directory to the playground.
* (Perhaps hack runs suid and playground is inaccessible
* for the player.)
* The environment variable HACKDIR is overridden by a
* -d command line option (must be the first option given).
*/
dir = nh_getenv("NETHACKDIR");
if (!dir)
dir = nh_getenv("HACKDIR");
if (argc > 1) {
if (argcheck(argc, argv, ARG_VERSION) == 2)
exit(EXIT_SUCCESS);
if (argcheck(argc, argv, ARG_SHOWPATHS) == 2) {
#ifdef CHDIR
chdirx((char *) 0, 0);
#endif
iflags.initoptions_noterminate = TRUE;
initoptions();
iflags.initoptions_noterminate = FALSE;
reveal_paths();
exit(EXIT_SUCCESS);
}
if (argcheck(argc, argv, ARG_DEBUG) == 1) {
argc--;
argv++;
}
if (argc > 1 && !strncmp(argv[1], "-d", 2) && argv[1][2] != 'e') {
/* avoid matching "-dec" for DECgraphics; since the man page
* says -d directory, hope nobody's using -desomething_else
*/
argc--;
argv++;
dir = argv[0] + 2;
if (*dir == '=' || *dir == ':')
dir++;
if (!*dir && argc > 1) {
argc--;
argv++;
dir = argv[0];
}
if (!*dir)
error("Flag -d must be followed by a directory name.");
}
}
#endif /* CHDIR */
if (argc > 1) {
/*
* Now we know the directory containing 'record' and
* may do a prscore(). Exclude `-style' - it's a Qt option.
*/
if (!strncmp(argv[1], "-s", 2) && strncmp(argv[1], "-style", 6)) {
#ifdef CHDIR
chdirx(dir, 0);
#endif
#ifdef SYSCF
initoptions();
#endif
#ifdef PANICTRACE
ARGV0 = hname; /* save for possible stack trace */
#ifndef NO_SIGNAL
panictrace_setsignals(TRUE);
#endif
#endif
prscore(argc, argv);
/* FIXME: shouldn't this be using nh_terminate() to free
up any memory allocated by initoptions() */
exit(EXIT_SUCCESS);
}
} /* argc > 1 */
/*
* Change directories before we initialize the window system so
* we can find the tile file.
*/
#ifdef CHDIR
chdirx(dir, 1);
#endif
#ifdef _M_UNIX
check_sco_console();
#endif
#ifdef __linux__
check_linux_console();
#endif
initoptions();
#ifdef PANICTRACE
ARGV0 = hname; /* save for possible stack trace */
#ifndef NO_SIGNAL
panictrace_setsignals(TRUE);
#endif
#endif
exact_username = whoami();
/*
* It seems you really want to play.
*/
u.uhp = 1; /* prevent RIP on early quits */
program_state.preserve_locks = 1;
#ifndef NO_SIGNAL
sethanguphandler((SIG_RET_TYPE) hangup);
#endif
process_options(argc, argv); /* command line options */
#ifdef WINCHAIN
commit_windowchain();
#endif
init_nhwindows(&argc, argv); /* now we can set up window system */
#ifdef _M_UNIX
init_sco_cons();
#endif
#ifdef __linux__
init_linux_cons();
#endif
#ifdef DEF_PAGER
if (!(catmore = nh_getenv("HACKPAGER"))
&& !(catmore = nh_getenv("PAGER")))
catmore = DEF_PAGER;
#endif
#ifdef MAIL
getmailstatus();
#endif
/* wizard mode access is deferred until here */
set_playmode(); /* sets plname to "wizard" for wizard mode */
if (exact_username) {
/*
* FIXME: this no longer works, ever since 3.3.0
* when plnamesuffix() was changed to find
* Name-Role-Race-Gender-Alignment. It removes
* all dashes rather than just the last one,
* regardless of whether whatever follows each
* dash matches role, race, gender, or alignment.
*/
/* guard against user names with hyphens in them */
int len = (int) strlen(plname);
/* append the current role, if any, so that last dash is ours */
if (++len < (int) sizeof plname)
(void) strncat(strcat(plname, "-"), pl_character,
sizeof plname - len - 1);
}
/* strip role,race,&c suffix; calls askname() if plname[] is empty
or holds a generic user name like "player" or "games" */
plnamesuffix();
if (wizard) {
/* use character name rather than lock letter for file names */
locknum = 0;
} else {
/* suppress interrupts while processing lock file */
(void) signal(SIGQUIT, SIG_IGN);
(void) signal(SIGINT, SIG_IGN);
}
dlb_init(); /* must be before newgame() */
/*
* Initialize the vision system. This must be before mklev() on a
* new game or before a level restore on a saved game.
*/
vision_init();
display_gamewindows();
/*
* First, try to find and restore a save file for specified character.
* We'll return here if new game player_selection() renames the hero.
*/
attempt_restore:
/*
* getlock() complains and quits if there is already a game
* in progress for current character name (when locknum == 0)
* or if there are too many active games (when locknum > 0).
* When proceeding, it creates an empty <lockname>.0 file to
* designate the current game.
* getlock() constructs <lockname> based on the character
* name (for !locknum) or on first available of alock, block,
* clock, &c not currently in use in the playground directory
* (for locknum > 0).
*/
if (*plname) {
getlock();
program_state.preserve_locks = 0; /* after getlock() */
}
if (*plname && (fd = restore_saved_game()) >= 0) {
const char *fq_save = fqname(SAVEF, SAVEPREFIX, 1);
(void) chmod(fq_save, 0); /* disallow parallel restores */
#ifndef NO_SIGNAL
(void) signal(SIGINT, (SIG_RET_TYPE) done1);
#endif
#ifdef NEWS
if (iflags.news) {
display_file(NEWS, FALSE);
iflags.news = FALSE; /* in case dorecover() fails */
}
#endif
pline("Restoring save file...");
mark_synch(); /* flush output */
if (dorecover(fd)) {
resuming = TRUE; /* not starting new game */
wd_message();
if (discover || wizard) {
/* this seems like a candidate for paranoid_confirmation... */
if (yn("Do you want to keep the save file?") == 'n') {
(void) delete_savefile();
} else {
(void) chmod(fq_save, FCMASK); /* back to readable */
nh_compress(fq_save);
}
}
}
}
if (!resuming) {
boolean neednewlock = (!*plname);
/* new game: start by choosing role, race, etc;
player might change the hero's name while doing that,
in which case we try to restore under the new name
and skip selection this time if that didn't succeed */
if (!iflags.renameinprogress || iflags.defer_plname || neednewlock) {
if (!plsel_once)
player_selection();
plsel_once = TRUE;
if (neednewlock && *plname)
goto attempt_restore;
if (iflags.renameinprogress) {
/* player has renamed the hero while selecting role;
if locking alphabetically, the existing lock file
can still be used; otherwise, discard current one
and create another for the new character name */
if (!locknum) {
delete_levelfile(0); /* remove empty lock file */
getlock();
}
goto attempt_restore;
}
}
newgame();
wd_message();
}
/* moveloop() never returns but isn't flagged NORETURN */
moveloop(resuming);
exit(EXIT_SUCCESS);
/*NOTREACHED*/
return 0;
}
static void
process_options(argc, argv)
int argc;
char *argv[];
{
int i, l;
/*
* Process options.
*/
while (argc > 1 && argv[1][0] == '-') {
argv++;
argc--;
l = (int) strlen(*argv);
/* must supply at least 4 chars to match "-XXXgraphics" */
if (l < 4)
l = 4;
switch (argv[0][1]) {
case 'D':
case 'd':
if ((argv[0][1] == 'D' && !argv[0][2])
|| !strcmpi(*argv, "-debug")) {
wizard = TRUE, discover = FALSE;
} else if (!strncmpi(*argv, "-DECgraphics", l)) {
load_symset("DECGraphics", PRIMARY);
switch_symbols(TRUE);
} else {
raw_printf("Unknown option: %s", *argv);
}
break;
case 'X':
discover = TRUE, wizard = FALSE;
break;
#ifdef NEWS
case 'n':
iflags.news = FALSE;
break;
#endif
case 'u':
if (argv[0][2]) {
(void) strncpy(plname, argv[0] + 2, sizeof plname - 1);
} else if (argc > 1) {
argc--;
argv++;
(void) strncpy(plname, argv[0], sizeof plname - 1);
} else {
raw_print("Player name expected after -u");
}
break;
case 'I':
case 'i':
if (!strncmpi(*argv, "-IBMgraphics", l)) {
load_symset("IBMGraphics", PRIMARY);
load_symset("RogueIBM", ROGUESET);
switch_symbols(TRUE);
} else {
raw_printf("Unknown option: %s", *argv);
}
break;
case 'p': /* profession (role) */
if (argv[0][2]) {
if ((i = str2role(&argv[0][2])) >= 0)
flags.initrole = i;
} else if (argc > 1) {
argc--;
argv++;
if ((i = str2role(argv[0])) >= 0)
flags.initrole = i;
}
break;
case 'r': /* race */
if (argv[0][2]) {
if ((i = str2race(&argv[0][2])) >= 0)
flags.initrace = i;
} else if (argc > 1) {
argc--;
argv++;
if ((i = str2race(argv[0])) >= 0)
flags.initrace = i;
}
break;
case 'w': /* windowtype */
config_error_init(FALSE, "command line", FALSE);
choose_windows(&argv[0][2]);
config_error_done();
break;
case '@':
flags.randomall = 1;
break;
default:
if ((i = str2role(&argv[0][1])) >= 0) {
flags.initrole = i;
break;
}
/* else raw_printf("Unknown option: %s", *argv); */
}
}
#ifdef SYSCF
if (argc > 1)
raw_printf("MAXPLAYERS are set in sysconf file.\n");
#else
/* XXX This is deprecated in favor of SYSCF with MAXPLAYERS */
if (argc > 1)
locknum = atoi(argv[1]);
#endif
#ifdef MAX_NR_OF_PLAYERS
/* limit to compile-time limit */
if (!locknum || locknum > MAX_NR_OF_PLAYERS)
locknum = MAX_NR_OF_PLAYERS;
#endif
#ifdef SYSCF
/* let syscf override compile-time limit */
if (!locknum || (sysopt.maxplayers && locknum > sysopt.maxplayers))
locknum = sysopt.maxplayers;
#endif
}
#ifdef CHDIR
static void
chdirx(dir, wr)
const char *dir;
boolean wr;
{
if (dir /* User specified directory? */
#ifdef HACKDIR
&& strcmp(dir, HACKDIR) /* and not the default? */
#endif
) {
#ifdef SECURE
(void) setgid(getgid());
(void) setuid(getuid()); /* Ron Wessels */
#endif
} else {
/* non-default data files is a sign that scores may not be
* compatible, or perhaps that a binary not fitting this
* system's layout is being used.
*/
#ifdef VAR_PLAYGROUND
int len = strlen(VAR_PLAYGROUND);
fqn_prefix[SCOREPREFIX] = (char *) alloc(len + 2);
Strcpy(fqn_prefix[SCOREPREFIX], VAR_PLAYGROUND);
if (fqn_prefix[SCOREPREFIX][len - 1] != '/') {
fqn_prefix[SCOREPREFIX][len] = '/';
fqn_prefix[SCOREPREFIX][len + 1] = '\0';
}
#endif
}
#ifdef HACKDIR
if (dir == (const char *) 0)
dir = HACKDIR;
#endif
if (dir && chdir(dir) < 0) {
perror(dir);
error("Cannot chdir to %s.", dir);
}
/* warn the player if we can't write the record file
* perhaps we should also test whether . is writable
* unfortunately the access system-call is worthless.
*/
if (wr) {
#ifdef VAR_PLAYGROUND
fqn_prefix[LEVELPREFIX] = fqn_prefix[SCOREPREFIX];
fqn_prefix[SAVEPREFIX] = fqn_prefix[SCOREPREFIX];
fqn_prefix[BONESPREFIX] = fqn_prefix[SCOREPREFIX];
fqn_prefix[LOCKPREFIX] = fqn_prefix[SCOREPREFIX];
fqn_prefix[TROUBLEPREFIX] = fqn_prefix[SCOREPREFIX];
#endif
check_recordfile(dir);
}
}
#endif /* CHDIR */
/* returns True iff we set plname[] to username which contains a hyphen */
static boolean
whoami()
{
/*
* Who am i? Algorithm: 1. Use name as specified in NETHACKOPTIONS
* 2. Use $USER or $LOGNAME (if 1. fails)
* 3. Use getlogin() (if 2. fails)
* The resulting name is overridden by command line options.
* If everything fails, or if the resulting name is some generic
* account like "games", "play", "player", "hack" then eventually
* we'll ask him.
* Note that we trust the user here; it is possible to play under
* somebody else's name.
*/
if (!*plname) {
register const char *s;
s = nh_getenv("USER");
if (!s || !*s)
s = nh_getenv("LOGNAME");
if (!s || !*s)
s = getlogin();
if (s && *s) {
(void) strncpy(plname, s, sizeof plname - 1);
if (index(plname, '-'))
return TRUE;
}
}
return FALSE;
}
void
sethanguphandler(handler)
void FDECL((*handler), (int));
{
#ifdef SA_RESTART
/* don't want reads to restart. If SA_RESTART is defined, we know
* sigaction exists and can be used to ensure reads won't restart.
* If it's not defined, assume reads do not restart. If reads restart
* and a signal occurs, the game won't do anything until the read
* succeeds (or the stream returns EOF, which might not happen if
* reading from, say, a window manager). */
struct sigaction sact;
(void) memset((genericptr_t) &sact, 0, sizeof sact);
sact.sa_handler = (SIG_RET_TYPE) handler;
(void) sigaction(SIGHUP, &sact, (struct sigaction *) 0);
#ifdef SIGXCPU
(void) sigaction(SIGXCPU, &sact, (struct sigaction *) 0);
#endif
#else /* !SA_RESTART */
(void) signal(SIGHUP, (SIG_RET_TYPE) handler);
#ifdef SIGXCPU
(void) signal(SIGXCPU, (SIG_RET_TYPE) handler);
#endif
#endif /* ?SA_RESTART */
}
#ifdef PORT_HELP
void
port_help()
{
/*
* Display unix-specific help. Just show contents of the helpfile
* named by PORT_HELP.
*/
display_file(PORT_HELP, TRUE);
}
#endif
/* validate wizard mode if player has requested access to it */
boolean
authorize_wizard_mode()
{
struct passwd *pw = get_unix_pw();
if (pw && sysopt.wizards && sysopt.wizards[0]) {
if (check_user_string(sysopt.wizards))
return TRUE;
}
wiz_error_flag = TRUE; /* not being allowed into wizard mode */
return FALSE;
}
static void
wd_message()
{
if (wiz_error_flag) {
if (sysopt.wizards && sysopt.wizards[0]) {
char *tmp = build_english_list(sysopt.wizards);
pline("Only user%s %s may access debug (wizard) mode.",
index(sysopt.wizards, ' ') ? "s" : "", tmp);
free(tmp);
} else
pline("Entering explore/discovery mode instead.");
wizard = 0, discover = 1; /* (paranoia) */
} else if (discover)
You("are in non-scoring explore/discovery mode.");
}
/*
* Add a slash to any name not ending in /. There must
* be room for the /
*/
void
append_slash(name)
char *name;
{
char *ptr;
if (!*name)
return;
ptr = name + (strlen(name) - 1);
if (*ptr != '/') {
*++ptr = '/';
*++ptr = '\0';
}
return;
}
boolean
check_user_string(optstr)
char *optstr;
{
struct passwd *pw;
int pwlen;
char *eop, *w;
char *pwname = 0;
if (optstr[0] == '*')
return TRUE; /* allow any user */
if (sysopt.check_plname)
pwname = plname;
else if ((pw = get_unix_pw()) != 0)
pwname = pw->pw_name;
if (!pwname || !*pwname)
return FALSE;
pwlen = (int) strlen(pwname);
eop = eos(optstr);
w = optstr;
while (w + pwlen <= eop) {
if (!*w)
break;
if (isspace(*w)) {
w++;
continue;
}
if (!strncmp(w, pwname, pwlen)) {
if (!w[pwlen] || isspace(w[pwlen]))
return TRUE;
}
while (*w && !isspace(*w))
w++;
}
return FALSE;
}
static struct passwd *
get_unix_pw()
{
char *user;
unsigned uid;
static struct passwd *pw = (struct passwd *) 0;
if (pw)
return pw; /* cache answer */
uid = (unsigned) getuid();
user = getlogin();
if (user) {
pw = getpwnam(user);
if (pw && ((unsigned) pw->pw_uid != uid))
pw = 0;
}
if (pw == 0) {
user = nh_getenv("USER");
if (user) {
pw = getpwnam(user);
if (pw && ((unsigned) pw->pw_uid != uid))
pw = 0;
}
if (pw == 0) {
pw = getpwuid(uid);
}
}
return pw;
}
char *
get_login_name()
{
static char buf[BUFSZ];
struct passwd *pw = get_unix_pw();
buf[0] = '\0';
if (pw)
(void)strcpy(buf, pw->pw_name);
return buf;
}
#ifdef __APPLE__
extern int errno;
void
port_insert_pastebuf(buf)
char *buf;
{
/* This should be replaced when there is a Cocoa port. */
const char *errfmt;
size_t len;
FILE *PB = popen("/usr/bin/pbcopy", "w");
if (!PB) {
errfmt = "Unable to start pbcopy (%d)\n";
goto error;
}
len = strlen(buf);
/* Remove the trailing \n, carefully. */
if (buf[len - 1] == '\n')
len--;
/* XXX Sorry, I'm too lazy to write a loop for output this short. */
if (len != fwrite(buf, 1, len, PB)) {
errfmt = "Error sending data to pbcopy (%d)\n";
goto error;
}
if (pclose(PB) != -1) {
return;
}
errfmt = "Error finishing pbcopy (%d)\n";
error:
raw_printf(errfmt, strerror(errno));
}
#endif /* __APPLE__ */
unsigned long
sys_random_seed()
{
unsigned long seed = 0L;
unsigned long pid = (unsigned long) getpid();
boolean no_seed = TRUE;
#ifdef DEV_RANDOM
FILE *fptr;
fptr = fopen(DEV_RANDOM, "r");
if (fptr) {
fread(&seed, sizeof (long), 1, fptr);
has_strong_rngseed = TRUE; /* decl.c */
no_seed = FALSE;
(void) fclose(fptr);
} else {
/* leaves clue, doesn't exit */
paniclog("sys_random_seed", "falling back to weak seed");
}
#endif
if (no_seed) {
seed = (unsigned long) getnow(); /* time((TIME_type) 0) */
/* Quick dirty band-aid to prevent PRNG prediction */
if (pid) {
if (!(pid & 3L))
pid -= 1L;
seed *= pid;
}
}
return seed;
}
/*unixmain.c*/
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_4523_3 |
crossvul-cpp_data_bad_4523_1 | /* NetHack 3.6 topten.c $NHDT-Date: 1450451497 2015/12/18 15:11:37 $ $NHDT-Branch: NetHack-3.6.0 $:$NHDT-Revision: 1.44 $ */
/* Copyright (c) Stichting Mathematisch Centrum, Amsterdam, 1985. */
/*-Copyright (c) Robert Patrick Rankin, 2012. */
/* NetHack may be freely redistributed. See license for details. */
#include "hack.h"
#include "dlb.h"
#ifdef SHORT_FILENAMES
#include "patchlev.h"
#else
#include "patchlevel.h"
#endif
#ifdef VMS
/* We don't want to rewrite the whole file, because that entails
creating a new version which requires that the old one be deletable. */
#define UPDATE_RECORD_IN_PLACE
#endif
/*
* Updating in place can leave junk at the end of the file in some
* circumstances (if it shrinks and the O.S. doesn't have a straightforward
* way to truncate it). The trailing junk is harmless and the code
* which reads the scores will ignore it.
*/
#ifdef UPDATE_RECORD_IN_PLACE
static long final_fpos;
#endif
#define done_stopprint program_state.stopprint
#define newttentry() (struct toptenentry *) alloc(sizeof (struct toptenentry))
#define dealloc_ttentry(ttent) free((genericptr_t) (ttent))
#ifndef NAMSZ
/* Changing NAMSZ can break your existing record/logfile */
#define NAMSZ 10
#endif
#define DTHSZ 100
#define ROLESZ 3
struct toptenentry {
struct toptenentry *tt_next;
#ifdef UPDATE_RECORD_IN_PLACE
long fpos;
#endif
long points;
int deathdnum, deathlev;
int maxlvl, hp, maxhp, deaths;
int ver_major, ver_minor, patchlevel;
long deathdate, birthdate;
int uid;
char plrole[ROLESZ + 1];
char plrace[ROLESZ + 1];
char plgend[ROLESZ + 1];
char plalign[ROLESZ + 1];
char name[NAMSZ + 1];
char death[DTHSZ + 1];
} * tt_head;
/* size big enough to read in all the string fields at once; includes
room for separating space or trailing newline plus string terminator */
#define SCANBUFSZ (4 * (ROLESZ + 1) + (NAMSZ + 1) + (DTHSZ + 1) + 1)
STATIC_DCL void FDECL(topten_print, (const char *));
STATIC_DCL void FDECL(topten_print_bold, (const char *));
STATIC_DCL void NDECL(outheader);
STATIC_DCL void FDECL(outentry, (int, struct toptenentry *, BOOLEAN_P));
STATIC_DCL void FDECL(discardexcess, (FILE *));
STATIC_DCL void FDECL(readentry, (FILE *, struct toptenentry *));
STATIC_DCL void FDECL(writeentry, (FILE *, struct toptenentry *));
#ifdef XLOGFILE
STATIC_DCL void FDECL(writexlentry, (FILE *, struct toptenentry *, int));
STATIC_DCL long NDECL(encodexlogflags);
STATIC_DCL long NDECL(encodeconduct);
STATIC_DCL long NDECL(encodeachieve);
#endif
STATIC_DCL void FDECL(free_ttlist, (struct toptenentry *));
STATIC_DCL int FDECL(classmon, (char *, BOOLEAN_P));
STATIC_DCL int FDECL(score_wanted, (BOOLEAN_P, int, struct toptenentry *, int,
const char **, int));
#ifdef NO_SCAN_BRACK
STATIC_DCL void FDECL(nsb_mung_line, (char *));
STATIC_DCL void FDECL(nsb_unmung_line, (char *));
#endif
static winid toptenwin = WIN_ERR;
/* "killed by",&c ["an"] 'killer.name' */
void
formatkiller(buf, siz, how, incl_helpless)
char *buf;
unsigned siz;
int how;
boolean incl_helpless;
{
static NEARDATA const char *const killed_by_prefix[] = {
/* DIED, CHOKING, POISONING, STARVING, */
"killed by ", "choked on ", "poisoned by ", "died of ",
/* DROWNING, BURNING, DISSOLVED, CRUSHING, */
"drowned in ", "burned by ", "dissolved in ", "crushed to death by ",
/* STONING, TURNED_SLIME, GENOCIDED, */
"petrified by ", "turned to slime by ", "killed by ",
/* PANICKED, TRICKED, QUIT, ESCAPED, ASCENDED */
"", "", "", "", ""
};
unsigned l;
char c, *kname = killer.name;
buf[0] = '\0'; /* lint suppression */
switch (killer.format) {
default:
impossible("bad killer format? (%d)", killer.format);
/*FALLTHRU*/
case NO_KILLER_PREFIX:
break;
case KILLED_BY_AN:
kname = an(kname);
/*FALLTHRU*/
case KILLED_BY:
(void) strncat(buf, killed_by_prefix[how], siz - 1);
l = strlen(buf);
buf += l, siz -= l;
break;
}
/* Copy kname into buf[].
* Object names and named fruit have already been sanitized, but
* monsters can have "called 'arbitrary text'" attached to them,
* so make sure that that text can't confuse field splitting when
* record, logfile, or xlogfile is re-read at some later point.
*/
while (--siz > 0) {
c = *kname++;
if (!c)
break;
else if (c == ',')
c = ';';
/* 'xlogfile' doesn't really need protection for '=', but
fixrecord.awk for corrupted 3.6.0 'record' does (only
if using xlogfile rather than logfile to repair record) */
else if (c == '=')
c = '_';
/* tab is not possible due to use of mungspaces() when naming;
it would disrupt xlogfile parsing if it were present */
else if (c == '\t')
c = ' ';
*buf++ = c;
}
*buf = '\0';
if (incl_helpless && multi) {
/* X <= siz: 'sizeof "string"' includes 1 for '\0' terminator */
if (multi_reason && strlen(multi_reason) + sizeof ", while " <= siz)
Sprintf(buf, ", while %s", multi_reason);
/* either multi_reason wasn't specified or wouldn't fit */
else if (sizeof ", while helpless" <= siz)
Strcpy(buf, ", while helpless");
/* else extra death info won't fit, so leave it out */
}
}
STATIC_OVL void
topten_print(x)
const char *x;
{
if (toptenwin == WIN_ERR)
raw_print(x);
else
putstr(toptenwin, ATR_NONE, x);
}
STATIC_OVL void
topten_print_bold(x)
const char *x;
{
if (toptenwin == WIN_ERR)
raw_print_bold(x);
else
putstr(toptenwin, ATR_BOLD, x);
}
int
observable_depth(lev)
d_level *lev;
{
#if 0
/* if we ever randomize the order of the elemental planes, we
must use a constant external representation in the record file */
if (In_endgame(lev)) {
if (Is_astralevel(lev))
return -5;
else if (Is_waterlevel(lev))
return -4;
else if (Is_firelevel(lev))
return -3;
else if (Is_airlevel(lev))
return -2;
else if (Is_earthlevel(lev))
return -1;
else
return 0; /* ? */
} else
#endif
return depth(lev);
}
/* throw away characters until current record has been entirely consumed */
STATIC_OVL void
discardexcess(rfile)
FILE *rfile;
{
int c;
do {
c = fgetc(rfile);
} while (c != '\n' && c != EOF);
}
STATIC_OVL void
readentry(rfile, tt)
FILE *rfile;
struct toptenentry *tt;
{
char inbuf[SCANBUFSZ], s1[SCANBUFSZ], s2[SCANBUFSZ], s3[SCANBUFSZ],
s4[SCANBUFSZ], s5[SCANBUFSZ], s6[SCANBUFSZ];
#ifdef NO_SCAN_BRACK /* Version_ Pts DgnLevs_ Hp___ Died__Born id */
static const char fmt[] = "%d %d %d %ld %d %d %d %d %d %d %ld %ld %d%*c";
static const char fmt32[] = "%c%c %s %s%*c";
static const char fmt33[] = "%s %s %s %s %s %s%*c";
#else
static const char fmt[] = "%d.%d.%d %ld %d %d %d %d %d %d %ld %ld %d ";
static const char fmt32[] = "%c%c %[^,],%[^\n]%*c";
static const char fmt33[] = "%s %s %s %s %[^,],%[^\n]%*c";
#endif
#ifdef UPDATE_RECORD_IN_PLACE
/* note: input below must read the record's terminating newline */
final_fpos = tt->fpos = ftell(rfile);
#endif
#define TTFIELDS 13
if (fscanf(rfile, fmt, &tt->ver_major, &tt->ver_minor, &tt->patchlevel,
&tt->points, &tt->deathdnum, &tt->deathlev, &tt->maxlvl,
&tt->hp, &tt->maxhp, &tt->deaths, &tt->deathdate,
&tt->birthdate, &tt->uid) != TTFIELDS) {
#undef TTFIELDS
tt->points = 0;
discardexcess(rfile);
} else {
/* load remainder of record into a local buffer;
this imposes an implicit length limit of SCANBUFSZ
on every string field extracted from the buffer */
if (!fgets(inbuf, sizeof inbuf, rfile)) {
/* sscanf will fail and tt->points will be set to 0 */
*inbuf = '\0';
} else if (!index(inbuf, '\n')) {
Strcpy(&inbuf[sizeof inbuf - 2], "\n");
discardexcess(rfile);
}
/* Check for backwards compatibility */
if (tt->ver_major < 3 || (tt->ver_major == 3 && tt->ver_minor < 3)) {
int i;
if (sscanf(inbuf, fmt32, tt->plrole, tt->plgend, s1, s2) == 4) {
tt->plrole[1] = tt->plgend[1] = '\0'; /* read via %c */
copynchars(tt->name, s1, (int) (sizeof tt->name) - 1);
copynchars(tt->death, s2, (int) (sizeof tt->death) - 1);
} else
tt->points = 0;
tt->plrole[1] = '\0';
if ((i = str2role(tt->plrole)) >= 0)
Strcpy(tt->plrole, roles[i].filecode);
Strcpy(tt->plrace, "?");
Strcpy(tt->plgend, (tt->plgend[0] == 'M') ? "Mal" : "Fem");
Strcpy(tt->plalign, "?");
} else if (sscanf(inbuf, fmt33, s1, s2, s3, s4, s5, s6) == 6) {
copynchars(tt->plrole, s1, (int) (sizeof tt->plrole) - 1);
copynchars(tt->plrace, s2, (int) (sizeof tt->plrace) - 1);
copynchars(tt->plgend, s3, (int) (sizeof tt->plgend) - 1);
copynchars(tt->plalign, s4, (int) (sizeof tt->plalign) - 1);
copynchars(tt->name, s5, (int) (sizeof tt->name) - 1);
copynchars(tt->death, s6, (int) (sizeof tt->death) - 1);
} else
tt->points = 0;
#ifdef NO_SCAN_BRACK
if (tt->points > 0) {
nsb_unmung_line(tt->name);
nsb_unmung_line(tt->death);
}
#endif
}
/* check old score entries for Y2K problem and fix whenever found */
if (tt->points > 0) {
if (tt->birthdate < 19000000L)
tt->birthdate += 19000000L;
if (tt->deathdate < 19000000L)
tt->deathdate += 19000000L;
}
}
STATIC_OVL void
writeentry(rfile, tt)
FILE *rfile;
struct toptenentry *tt;
{
static const char fmt32[] = "%c%c "; /* role,gender */
static const char fmt33[] = "%s %s %s %s "; /* role,race,gndr,algn */
#ifndef NO_SCAN_BRACK
static const char fmt0[] = "%d.%d.%d %ld %d %d %d %d %d %d %ld %ld %d ";
static const char fmtX[] = "%s,%s\n";
#else /* NO_SCAN_BRACK */
static const char fmt0[] = "%d %d %d %ld %d %d %d %d %d %d %ld %ld %d ";
static const char fmtX[] = "%s %s\n";
nsb_mung_line(tt->name);
nsb_mung_line(tt->death);
#endif
(void) fprintf(rfile, fmt0, tt->ver_major, tt->ver_minor, tt->patchlevel,
tt->points, tt->deathdnum, tt->deathlev, tt->maxlvl,
tt->hp, tt->maxhp, tt->deaths, tt->deathdate,
tt->birthdate, tt->uid);
if (tt->ver_major < 3 || (tt->ver_major == 3 && tt->ver_minor < 3))
(void) fprintf(rfile, fmt32, tt->plrole[0], tt->plgend[0]);
else
(void) fprintf(rfile, fmt33, tt->plrole, tt->plrace, tt->plgend,
tt->plalign);
(void) fprintf(rfile, fmtX, onlyspace(tt->name) ? "_" : tt->name,
tt->death);
#ifdef NO_SCAN_BRACK
nsb_unmung_line(tt->name);
nsb_unmung_line(tt->death);
#endif
}
#ifdef XLOGFILE
/* as tab is never used in eg. plname or death, no need to mangle those. */
STATIC_OVL void
writexlentry(rfile, tt, how)
FILE *rfile;
struct toptenentry *tt;
int how;
{
#define Fprintf (void) fprintf
#define XLOG_SEP '\t' /* xlogfile field separator. */
char buf[BUFSZ], tmpbuf[DTHSZ + 1];
Sprintf(buf, "version=%d.%d.%d", tt->ver_major, tt->ver_minor,
tt->patchlevel);
Sprintf(eos(buf), "%cpoints=%ld%cdeathdnum=%d%cdeathlev=%d", XLOG_SEP,
tt->points, XLOG_SEP, tt->deathdnum, XLOG_SEP, tt->deathlev);
Sprintf(eos(buf), "%cmaxlvl=%d%chp=%d%cmaxhp=%d", XLOG_SEP, tt->maxlvl,
XLOG_SEP, tt->hp, XLOG_SEP, tt->maxhp);
Sprintf(eos(buf), "%cdeaths=%d%cdeathdate=%ld%cbirthdate=%ld%cuid=%d",
XLOG_SEP, tt->deaths, XLOG_SEP, tt->deathdate, XLOG_SEP,
tt->birthdate, XLOG_SEP, tt->uid);
Fprintf(rfile, "%s", buf);
Sprintf(buf, "%crole=%s%crace=%s%cgender=%s%calign=%s", XLOG_SEP,
tt->plrole, XLOG_SEP, tt->plrace, XLOG_SEP, tt->plgend, XLOG_SEP,
tt->plalign);
/* make a copy of death reason that doesn't include ", while helpless" */
formatkiller(tmpbuf, sizeof tmpbuf, how, FALSE);
Fprintf(rfile, "%s%cname=%s%cdeath=%s",
buf, /* (already includes separator) */
XLOG_SEP, plname, XLOG_SEP, tmpbuf);
if (multi)
Fprintf(rfile, "%cwhile=%s", XLOG_SEP,
multi_reason ? multi_reason : "helpless");
Fprintf(rfile, "%cconduct=0x%lx%cturns=%ld%cachieve=0x%lx", XLOG_SEP,
encodeconduct(), XLOG_SEP, moves, XLOG_SEP, encodeachieve());
Fprintf(rfile, "%crealtime=%ld%cstarttime=%ld%cendtime=%ld", XLOG_SEP,
(long) urealtime.realtime, XLOG_SEP,
(long) ubirthday, XLOG_SEP, (long) urealtime.finish_time);
Fprintf(rfile, "%cgender0=%s%calign0=%s", XLOG_SEP,
genders[flags.initgend].filecode, XLOG_SEP,
aligns[1 - u.ualignbase[A_ORIGINAL]].filecode);
Fprintf(rfile, "%cflags=0x%lx", XLOG_SEP, encodexlogflags());
Fprintf(rfile, "\n");
#undef XLOG_SEP
}
STATIC_OVL long
encodexlogflags()
{
long e = 0L;
if (wizard)
e |= 1L << 0;
if (discover)
e |= 1L << 1;
if (!u.uroleplay.numbones)
e |= 1L << 2;
return e;
}
STATIC_OVL long
encodeconduct()
{
long e = 0L;
if (!u.uconduct.food)
e |= 1L << 0;
if (!u.uconduct.unvegan)
e |= 1L << 1;
if (!u.uconduct.unvegetarian)
e |= 1L << 2;
if (!u.uconduct.gnostic)
e |= 1L << 3;
if (!u.uconduct.weaphit)
e |= 1L << 4;
if (!u.uconduct.killer)
e |= 1L << 5;
if (!u.uconduct.literate)
e |= 1L << 6;
if (!u.uconduct.polypiles)
e |= 1L << 7;
if (!u.uconduct.polyselfs)
e |= 1L << 8;
if (!u.uconduct.wishes)
e |= 1L << 9;
if (!u.uconduct.wisharti)
e |= 1L << 10;
if (!num_genocides())
e |= 1L << 11;
return e;
}
STATIC_OVL long
encodeachieve()
{
long r = 0L;
if (u.uachieve.bell)
r |= 1L << 0;
if (u.uachieve.enter_gehennom)
r |= 1L << 1;
if (u.uachieve.menorah)
r |= 1L << 2;
if (u.uachieve.book)
r |= 1L << 3;
if (u.uevent.invoked)
r |= 1L << 4;
if (u.uachieve.amulet)
r |= 1L << 5;
if (In_endgame(&u.uz))
r |= 1L << 6;
if (Is_astralevel(&u.uz))
r |= 1L << 7;
if (u.uachieve.ascended)
r |= 1L << 8;
if (u.uachieve.mines_luckstone)
r |= 1L << 9;
if (u.uachieve.finish_sokoban)
r |= 1L << 10;
if (u.uachieve.killed_medusa)
r |= 1L << 11;
if (u.uroleplay.blind)
r |= 1L << 12;
if (u.uroleplay.nudist)
r |= 1L << 13;
return r;
}
#endif /* XLOGFILE */
STATIC_OVL void
free_ttlist(tt)
struct toptenentry *tt;
{
struct toptenentry *ttnext;
while (tt->points > 0) {
ttnext = tt->tt_next;
dealloc_ttentry(tt);
tt = ttnext;
}
dealloc_ttentry(tt);
}
void
topten(how, when)
int how;
time_t when;
{
int uid = getuid();
int rank, rank0 = -1, rank1 = 0;
int occ_cnt = sysopt.persmax;
register struct toptenentry *t0, *tprev;
struct toptenentry *t1;
FILE *rfile;
register int flg = 0;
boolean t0_used;
#ifdef LOGFILE
FILE *lfile;
#endif /* LOGFILE */
#ifdef XLOGFILE
FILE *xlfile;
#endif /* XLOGFILE */
#ifdef _DCC
/* Under DICE 3.0, this crashes the system consistently, apparently due to
* corruption of *rfile somewhere. Until I figure this out, just cut out
* topten support entirely - at least then the game exits cleanly. --AC
*/
return;
#endif
/* If we are in the midst of a panic, cut out topten entirely.
* topten uses alloc() several times, which will lead to
* problems if the panic was the result of an alloc() failure.
*/
if (program_state.panicking)
return;
if (iflags.toptenwin) {
toptenwin = create_nhwindow(NHW_TEXT);
}
#if defined(UNIX) || defined(VMS) || defined(__EMX__)
#define HUP if (!program_state.done_hup)
#else
#define HUP
#endif
#ifdef TOS
restore_colors(); /* make sure the screen is black on white */
#endif
/* create a new 'topten' entry */
t0_used = FALSE;
t0 = newttentry();
t0->ver_major = VERSION_MAJOR;
t0->ver_minor = VERSION_MINOR;
t0->patchlevel = PATCHLEVEL;
t0->points = u.urexp;
t0->deathdnum = u.uz.dnum;
/* deepest_lev_reached() is in terms of depth(), and reporting the
* deepest level reached in the dungeon death occurred in doesn't
* seem right, so we have to report the death level in depth() terms
* as well (which also seems reasonable since that's all the player
* sees on the screen anyway)
*/
t0->deathlev = observable_depth(&u.uz);
t0->maxlvl = deepest_lev_reached(TRUE);
t0->hp = u.uhp;
t0->maxhp = u.uhpmax;
t0->deaths = u.umortality;
t0->uid = uid;
copynchars(t0->plrole, urole.filecode, ROLESZ);
copynchars(t0->plrace, urace.filecode, ROLESZ);
copynchars(t0->plgend, genders[flags.female].filecode, ROLESZ);
copynchars(t0->plalign, aligns[1 - u.ualign.type].filecode, ROLESZ);
copynchars(t0->name, plname, NAMSZ);
formatkiller(t0->death, sizeof t0->death, how, TRUE);
t0->birthdate = yyyymmdd(ubirthday);
t0->deathdate = yyyymmdd(when);
t0->tt_next = 0;
#ifdef UPDATE_RECORD_IN_PLACE
t0->fpos = -1L;
#endif
#ifdef LOGFILE /* used for debugging (who dies of what, where) */
if (lock_file(LOGFILE, SCOREPREFIX, 10)) {
if (!(lfile = fopen_datafile(LOGFILE, "a", SCOREPREFIX))) {
HUP raw_print("Cannot open log file!");
} else {
writeentry(lfile, t0);
(void) fclose(lfile);
}
unlock_file(LOGFILE);
}
#endif /* LOGFILE */
#ifdef XLOGFILE
if (lock_file(XLOGFILE, SCOREPREFIX, 10)) {
if (!(xlfile = fopen_datafile(XLOGFILE, "a", SCOREPREFIX))) {
HUP raw_print("Cannot open extended log file!");
} else {
writexlentry(xlfile, t0, how);
(void) fclose(xlfile);
}
unlock_file(XLOGFILE);
}
#endif /* XLOGFILE */
if (wizard || discover) {
if (how != PANICKED)
HUP {
char pbuf[BUFSZ];
topten_print("");
Sprintf(pbuf,
"Since you were in %s mode, the score list will not be checked.",
wizard ? "wizard" : "discover");
topten_print(pbuf);
}
goto showwin;
}
if (!lock_file(RECORD, SCOREPREFIX, 60))
goto destroywin;
#ifdef UPDATE_RECORD_IN_PLACE
rfile = fopen_datafile(RECORD, "r+", SCOREPREFIX);
#else
rfile = fopen_datafile(RECORD, "r", SCOREPREFIX);
#endif
if (!rfile) {
HUP raw_print("Cannot open record file!");
unlock_file(RECORD);
goto destroywin;
}
HUP topten_print("");
/* assure minimum number of points */
if (t0->points < sysopt.pointsmin)
t0->points = 0;
t1 = tt_head = newttentry();
tprev = 0;
/* rank0: -1 undefined, 0 not_on_list, n n_th on list */
for (rank = 1;;) {
readentry(rfile, t1);
if (t1->points < sysopt.pointsmin)
t1->points = 0;
if (rank0 < 0 && t1->points < t0->points) {
rank0 = rank++;
if (tprev == 0)
tt_head = t0;
else
tprev->tt_next = t0;
t0->tt_next = t1;
#ifdef UPDATE_RECORD_IN_PLACE
t0->fpos = t1->fpos; /* insert here */
#endif
t0_used = TRUE;
occ_cnt--;
flg++; /* ask for a rewrite */
} else
tprev = t1;
if (t1->points == 0)
break;
if ((sysopt.pers_is_uid ? t1->uid == t0->uid
: strncmp(t1->name, t0->name, NAMSZ) == 0)
&& !strncmp(t1->plrole, t0->plrole, ROLESZ) && --occ_cnt <= 0) {
if (rank0 < 0) {
rank0 = 0;
rank1 = rank;
HUP {
char pbuf[BUFSZ];
Sprintf(pbuf,
"You didn't beat your previous score of %ld points.",
t1->points);
topten_print(pbuf);
topten_print("");
}
}
if (occ_cnt < 0) {
flg++;
continue;
}
}
if (rank <= sysopt.entrymax) {
t1->tt_next = newttentry();
t1 = t1->tt_next;
rank++;
}
if (rank > sysopt.entrymax) {
t1->points = 0;
break;
}
}
if (flg) { /* rewrite record file */
#ifdef UPDATE_RECORD_IN_PLACE
(void) fseek(rfile, (t0->fpos >= 0 ? t0->fpos : final_fpos),
SEEK_SET);
#else
(void) fclose(rfile);
if (!(rfile = fopen_datafile(RECORD, "w", SCOREPREFIX))) {
HUP raw_print("Cannot write record file");
unlock_file(RECORD);
free_ttlist(tt_head);
goto destroywin;
}
#endif /* UPDATE_RECORD_IN_PLACE */
if (!done_stopprint)
if (rank0 > 0) {
if (rank0 <= 10) {
topten_print("You made the top ten list!");
} else {
char pbuf[BUFSZ];
Sprintf(pbuf,
"You reached the %d%s place on the top %d list.",
rank0, ordin(rank0), sysopt.entrymax);
topten_print(pbuf);
}
topten_print("");
}
}
if (rank0 == 0)
rank0 = rank1;
if (rank0 <= 0)
rank0 = rank;
if (!done_stopprint)
outheader();
t1 = tt_head;
for (rank = 1; t1->points != 0; rank++, t1 = t1->tt_next) {
if (flg
#ifdef UPDATE_RECORD_IN_PLACE
&& rank >= rank0
#endif
)
writeentry(rfile, t1);
if (done_stopprint)
continue;
if (rank > flags.end_top && (rank < rank0 - flags.end_around
|| rank > rank0 + flags.end_around)
&& (!flags.end_own
|| (sysopt.pers_is_uid
? t1->uid == t0->uid
: strncmp(t1->name, t0->name, NAMSZ) == 0)))
continue;
if (rank == rank0 - flags.end_around
&& rank0 > flags.end_top + flags.end_around + 1 && !flags.end_own)
topten_print("");
if (rank != rank0)
outentry(rank, t1, FALSE);
else if (!rank1)
outentry(rank, t1, TRUE);
else {
outentry(rank, t1, TRUE);
outentry(0, t0, TRUE);
}
}
if (rank0 >= rank)
if (!done_stopprint)
outentry(0, t0, TRUE);
#ifdef UPDATE_RECORD_IN_PLACE
if (flg) {
#ifdef TRUNCATE_FILE
/* if a reasonable way to truncate a file exists, use it */
truncate_file(rfile);
#else
/* use sentinel record rather than relying on truncation */
t1->points = 0L; /* terminates file when read back in */
t1->ver_major = t1->ver_minor = t1->patchlevel = 0;
t1->uid = t1->deathdnum = t1->deathlev = 0;
t1->maxlvl = t1->hp = t1->maxhp = t1->deaths = 0;
t1->plrole[0] = t1->plrace[0] = t1->plgend[0] = t1->plalign[0] = '-';
t1->plrole[1] = t1->plrace[1] = t1->plgend[1] = t1->plalign[1] = 0;
t1->birthdate = t1->deathdate = yyyymmdd((time_t) 0L);
Strcpy(t1->name, "@");
Strcpy(t1->death, "<eod>\n");
writeentry(rfile, t1);
(void) fflush(rfile);
#endif /* TRUNCATE_FILE */
}
#endif /* UPDATE_RECORD_IN_PLACE */
(void) fclose(rfile);
unlock_file(RECORD);
free_ttlist(tt_head);
showwin:
if (iflags.toptenwin && !done_stopprint)
display_nhwindow(toptenwin, 1);
destroywin:
if (!t0_used)
dealloc_ttentry(t0);
if (iflags.toptenwin) {
destroy_nhwindow(toptenwin);
toptenwin = WIN_ERR;
}
}
STATIC_OVL void
outheader()
{
char linebuf[BUFSZ];
register char *bp;
Strcpy(linebuf, " No Points Name");
bp = eos(linebuf);
while (bp < linebuf + COLNO - 9)
*bp++ = ' ';
Strcpy(bp, "Hp [max]");
topten_print(linebuf);
}
/* so>0: standout line; so=0: ordinary line */
STATIC_OVL void
outentry(rank, t1, so)
struct toptenentry *t1;
int rank;
boolean so;
{
boolean second_line = TRUE;
char linebuf[BUFSZ];
char *bp, hpbuf[24], linebuf3[BUFSZ];
int hppos, lngr;
linebuf[0] = '\0';
if (rank)
Sprintf(eos(linebuf), "%3d", rank);
else
Strcat(linebuf, " ");
Sprintf(eos(linebuf), " %10ld %.10s", t1->points ? t1->points : u.urexp,
t1->name);
Sprintf(eos(linebuf), "-%s", t1->plrole);
if (t1->plrace[0] != '?')
Sprintf(eos(linebuf), "-%s", t1->plrace);
/* Printing of gender and alignment is intentional. It has been
* part of the NetHack Geek Code, and illustrates a proper way to
* specify a character from the command line.
*/
Sprintf(eos(linebuf), "-%s", t1->plgend);
if (t1->plalign[0] != '?')
Sprintf(eos(linebuf), "-%s ", t1->plalign);
else
Strcat(linebuf, " ");
if (!strncmp("escaped", t1->death, 7)) {
Sprintf(eos(linebuf), "escaped the dungeon %s[max level %d]",
!strncmp(" (", t1->death + 7, 2) ? t1->death + 7 + 2 : "",
t1->maxlvl);
/* fixup for closing paren in "escaped... with...Amulet)[max..." */
if ((bp = index(linebuf, ')')) != 0)
*bp = (t1->deathdnum == astral_level.dnum) ? '\0' : ' ';
second_line = FALSE;
} else if (!strncmp("ascended", t1->death, 8)) {
Sprintf(eos(linebuf), "ascended to demigod%s-hood",
(t1->plgend[0] == 'F') ? "dess" : "");
second_line = FALSE;
} else {
if (!strncmp(t1->death, "quit", 4)) {
Strcat(linebuf, "quit");
second_line = FALSE;
} else if (!strncmp(t1->death, "died of st", 10)) {
Strcat(linebuf, "starved to death");
second_line = FALSE;
} else if (!strncmp(t1->death, "choked", 6)) {
Sprintf(eos(linebuf), "choked on h%s food",
(t1->plgend[0] == 'F') ? "er" : "is");
} else if (!strncmp(t1->death, "poisoned", 8)) {
Strcat(linebuf, "was poisoned");
} else if (!strncmp(t1->death, "crushed", 7)) {
Strcat(linebuf, "was crushed to death");
} else if (!strncmp(t1->death, "petrified by ", 13)) {
Strcat(linebuf, "turned to stone");
} else
Strcat(linebuf, "died");
if (t1->deathdnum == astral_level.dnum) {
const char *arg, *fmt = " on the Plane of %s";
switch (t1->deathlev) {
case -5:
fmt = " on the %s Plane";
arg = "Astral";
break;
case -4:
arg = "Water";
break;
case -3:
arg = "Fire";
break;
case -2:
arg = "Air";
break;
case -1:
arg = "Earth";
break;
default:
arg = "Void";
break;
}
Sprintf(eos(linebuf), fmt, arg);
} else {
Sprintf(eos(linebuf), " in %s", dungeons[t1->deathdnum].dname);
if (t1->deathdnum != knox_level.dnum)
Sprintf(eos(linebuf), " on level %d", t1->deathlev);
if (t1->deathlev != t1->maxlvl)
Sprintf(eos(linebuf), " [max %d]", t1->maxlvl);
}
/* kludge for "quit while already on Charon's boat" */
if (!strncmp(t1->death, "quit ", 5))
Strcat(linebuf, t1->death + 4);
}
Strcat(linebuf, ".");
/* Quit, starved, ascended, and escaped contain no second line */
if (second_line)
Sprintf(eos(linebuf), " %c%s.", highc(*(t1->death)), t1->death + 1);
lngr = (int) strlen(linebuf);
if (t1->hp <= 0)
hpbuf[0] = '-', hpbuf[1] = '\0';
else
Sprintf(hpbuf, "%d", t1->hp);
/* beginning of hp column after padding (not actually padded yet) */
hppos = COLNO - (sizeof(" Hp [max]") - 1); /* sizeof(str) includes \0 */
while (lngr >= hppos) {
for (bp = eos(linebuf); !(*bp == ' ' && (bp - linebuf < hppos)); bp--)
;
/* special case: word is too long, wrap in the middle */
if (linebuf + 15 >= bp)
bp = linebuf + hppos - 1;
/* special case: if about to wrap in the middle of maximum
dungeon depth reached, wrap in front of it instead */
if (bp > linebuf + 5 && !strncmp(bp - 5, " [max", 5))
bp -= 5;
if (*bp != ' ')
Strcpy(linebuf3, bp);
else
Strcpy(linebuf3, bp + 1);
*bp = 0;
if (so) {
while (bp < linebuf + (COLNO - 1))
*bp++ = ' ';
*bp = 0;
topten_print_bold(linebuf);
} else
topten_print(linebuf);
Sprintf(linebuf, "%15s %s", "", linebuf3);
lngr = strlen(linebuf);
}
/* beginning of hp column not including padding */
hppos = COLNO - 7 - (int) strlen(hpbuf);
bp = eos(linebuf);
if (bp <= linebuf + hppos) {
/* pad any necessary blanks to the hit point entry */
while (bp < linebuf + hppos)
*bp++ = ' ';
Strcpy(bp, hpbuf);
Sprintf(eos(bp), " %s[%d]",
(t1->maxhp < 10) ? " " : (t1->maxhp < 100) ? " " : "",
t1->maxhp);
}
if (so) {
bp = eos(linebuf);
if (so >= COLNO)
so = COLNO - 1;
while (bp < linebuf + so)
*bp++ = ' ';
*bp = 0;
topten_print_bold(linebuf);
} else
topten_print(linebuf);
}
STATIC_OVL int
score_wanted(current_ver, rank, t1, playerct, players, uid)
boolean current_ver;
int rank;
struct toptenentry *t1;
int playerct;
const char **players;
int uid;
{
int i;
if (current_ver
&& (t1->ver_major != VERSION_MAJOR || t1->ver_minor != VERSION_MINOR
|| t1->patchlevel != PATCHLEVEL))
return 0;
if (sysopt.pers_is_uid && !playerct && t1->uid == uid)
return 1;
for (i = 0; i < playerct; i++) {
if (players[i][0] == '-' && index("pr", players[i][1])
&& players[i][2] == 0 && i + 1 < playerct) {
const char *arg = players[i + 1];
if ((players[i][1] == 'p'
&& str2role(arg) == str2role(t1->plrole))
|| (players[i][1] == 'r'
&& str2race(arg) == str2race(t1->plrace)))
return 1;
i++;
} else if (strcmp(players[i], "all") == 0
|| strncmp(t1->name, players[i], NAMSZ) == 0
|| (players[i][0] == '-' && players[i][1] == t1->plrole[0]
&& players[i][2] == 0)
|| (digit(players[i][0]) && rank <= atoi(players[i])))
return 1;
}
return 0;
}
/*
* print selected parts of score list.
* argc >= 2, with argv[0] untrustworthy (directory names, et al.),
* and argv[1] starting with "-s".
*/
void
prscore(argc, argv)
int argc;
char **argv;
{
const char **players;
int playerct, rank;
boolean current_ver = TRUE, init_done = FALSE;
register struct toptenentry *t1;
FILE *rfile;
boolean match_found = FALSE;
register int i;
char pbuf[BUFSZ];
int uid = -1;
const char *player0;
if (argc < 2 || strncmp(argv[1], "-s", 2)) {
raw_printf("prscore: bad arguments (%d)", argc);
return;
}
rfile = fopen_datafile(RECORD, "r", SCOREPREFIX);
if (!rfile) {
raw_print("Cannot open record file!");
return;
}
#ifdef AMIGA
{
extern winid amii_rawprwin;
init_nhwindows(&argc, argv);
amii_rawprwin = create_nhwindow(NHW_TEXT);
}
#endif
/* If the score list isn't after a game, we never went through
* initialization. */
if (wiz1_level.dlevel == 0) {
dlb_init();
init_dungeons();
init_done = TRUE;
}
if (!argv[1][2]) { /* plain "-s" */
argc--;
argv++;
} else
argv[1] += 2;
if (argc > 1 && !strcmp(argv[1], "-v")) {
current_ver = FALSE;
argc--;
argv++;
}
if (argc <= 1) {
if (sysopt.pers_is_uid) {
uid = getuid();
playerct = 0;
players = (const char **) 0;
} else {
player0 = plname;
if (!*player0)
#ifdef AMIGA
player0 = "all"; /* single user system */
#else
player0 = "hackplayer";
#endif
playerct = 1;
players = &player0;
}
} else {
playerct = --argc;
players = (const char **) ++argv;
}
raw_print("");
t1 = tt_head = newttentry();
for (rank = 1;; rank++) {
readentry(rfile, t1);
if (t1->points == 0)
break;
if (!match_found
&& score_wanted(current_ver, rank, t1, playerct, players, uid))
match_found = TRUE;
t1->tt_next = newttentry();
t1 = t1->tt_next;
}
(void) fclose(rfile);
if (init_done) {
free_dungeons();
dlb_cleanup();
}
if (match_found) {
outheader();
t1 = tt_head;
for (rank = 1; t1->points != 0; rank++, t1 = t1->tt_next) {
if (score_wanted(current_ver, rank, t1, playerct, players, uid))
(void) outentry(rank, t1, FALSE);
}
} else {
Sprintf(pbuf, "Cannot find any %sentries for ",
current_ver ? "current " : "");
if (playerct < 1)
Strcat(pbuf, "you.");
else {
if (playerct > 1)
Strcat(pbuf, "any of ");
for (i = 0; i < playerct; i++) {
/* stop printing players if there are too many to fit */
if (strlen(pbuf) + strlen(players[i]) + 2 >= BUFSZ) {
if (strlen(pbuf) < BUFSZ - 4)
Strcat(pbuf, "...");
else
Strcpy(pbuf + strlen(pbuf) - 4, "...");
break;
}
Strcat(pbuf, players[i]);
if (i < playerct - 1) {
if (players[i][0] == '-' && index("pr", players[i][1])
&& players[i][2] == 0)
Strcat(pbuf, " ");
else
Strcat(pbuf, ":");
}
}
}
raw_print(pbuf);
raw_printf("Usage: %s -s [-v] <playertypes> [maxrank] [playernames]",
hname);
raw_printf("Player types are: [-p role] [-r race]");
}
free_ttlist(tt_head);
#ifdef AMIGA
{
extern winid amii_rawprwin;
display_nhwindow(amii_rawprwin, 1);
destroy_nhwindow(amii_rawprwin);
amii_rawprwin = WIN_ERR;
}
#endif
}
STATIC_OVL int
classmon(plch, fem)
char *plch;
boolean fem;
{
int i;
/* Look for this role in the role table */
for (i = 0; roles[i].name.m; i++)
if (!strncmp(plch, roles[i].filecode, ROLESZ)) {
if (fem && roles[i].femalenum != NON_PM)
return roles[i].femalenum;
else if (roles[i].malenum != NON_PM)
return roles[i].malenum;
else
return PM_HUMAN;
}
/* this might be from a 3.2.x score for former Elf class */
if (!strcmp(plch, "E"))
return PM_RANGER;
impossible("What weird role is this? (%s)", plch);
return PM_HUMAN_MUMMY;
}
/*
* Get a random player name and class from the high score list,
*/
struct toptenentry *
get_rnd_toptenentry()
{
int rank, i;
FILE *rfile;
register struct toptenentry *tt;
static struct toptenentry tt_buf;
rfile = fopen_datafile(RECORD, "r", SCOREPREFIX);
if (!rfile) {
impossible("Cannot open record file!");
return NULL;
}
tt = &tt_buf;
rank = rnd(sysopt.tt_oname_maxrank);
pickentry:
for (i = rank; i; i--) {
readentry(rfile, tt);
if (tt->points == 0)
break;
}
if (tt->points == 0) {
if (rank > 1) {
rank = 1;
rewind(rfile);
goto pickentry;
}
tt = NULL;
}
(void) fclose(rfile);
return tt;
}
/*
* Attach random player name and class from high score list
* to an object (for statues or morgue corpses).
*/
struct obj *
tt_oname(otmp)
struct obj *otmp;
{
struct toptenentry *tt;
if (!otmp)
return (struct obj *) 0;
tt = get_rnd_toptenentry();
if (!tt)
return (struct obj *) 0;
set_corpsenm(otmp, classmon(tt->plrole, (tt->plgend[0] == 'F')));
otmp = oname(otmp, tt->name);
return otmp;
}
#ifdef NO_SCAN_BRACK
/* Lattice scanf isn't up to reading the scorefile. What */
/* follows deals with that; I admit it's ugly. (KL) */
/* Now generally available (KL) */
STATIC_OVL void
nsb_mung_line(p)
char *p;
{
while ((p = index(p, ' ')) != 0)
*p = '|';
}
STATIC_OVL void
nsb_unmung_line(p)
char *p;
{
while ((p = index(p, '|')) != 0)
*p = ' ';
}
#endif /* NO_SCAN_BRACK */
/*topten.c*/
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_4523_1 |
crossvul-cpp_data_good_998_0 | /*
* Marvell Wireless LAN device driver: management IE handling- setting and
* deleting IE.
*
* Copyright (C) 2012-2014, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "main.h"
/* This function checks if current IE index is used by any on other interface.
* Return: -1: yes, current IE index is used by someone else.
* 0: no, current IE index is NOT used by other interface.
*/
static int
mwifiex_ie_index_used_by_other_intf(struct mwifiex_private *priv, u16 idx)
{
int i;
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie *ie;
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i] != priv) {
ie = &adapter->priv[i]->mgmt_ie[idx];
if (ie->mgmt_subtype_mask && ie->ie_length)
return -1;
}
}
return 0;
}
/* Get unused IE index. This index will be used for setting new IE */
static int
mwifiex_ie_get_autoidx(struct mwifiex_private *priv, u16 subtype_mask,
struct mwifiex_ie *ie, u16 *index)
{
u16 mask, len, i;
for (i = 0; i < priv->adapter->max_mgmt_ie_index; i++) {
mask = le16_to_cpu(priv->mgmt_ie[i].mgmt_subtype_mask);
len = le16_to_cpu(ie->ie_length);
if (mask == MWIFIEX_AUTO_IDX_MASK)
continue;
if (mask == subtype_mask) {
if (len > IEEE_MAX_IE_SIZE)
continue;
*index = i;
return 0;
}
if (!priv->mgmt_ie[i].ie_length) {
if (mwifiex_ie_index_used_by_other_intf(priv, i))
continue;
*index = i;
return 0;
}
}
return -1;
}
/* This function prepares IE data buffer for command to be sent to FW */
static int
mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
struct mwifiex_ie_list *ie_list)
{
u16 travel_len, index, mask;
s16 input_len, tlv_len;
struct mwifiex_ie *ie;
u8 *tmp;
input_len = le16_to_cpu(ie_list->len);
travel_len = sizeof(struct mwifiex_ie_types_header);
ie_list->len = 0;
while (input_len >= sizeof(struct mwifiex_ie_types_header)) {
ie = (struct mwifiex_ie *)(((u8 *)ie_list) + travel_len);
tlv_len = le16_to_cpu(ie->ie_length);
travel_len += tlv_len + MWIFIEX_IE_HDR_SIZE;
if (input_len < tlv_len + MWIFIEX_IE_HDR_SIZE)
return -1;
index = le16_to_cpu(ie->ie_index);
mask = le16_to_cpu(ie->mgmt_subtype_mask);
if (index == MWIFIEX_AUTO_IDX_MASK) {
/* automatic addition */
if (mwifiex_ie_get_autoidx(priv, mask, ie, &index))
return -1;
if (index == MWIFIEX_AUTO_IDX_MASK)
return -1;
tmp = (u8 *)&priv->mgmt_ie[index].ie_buffer;
memcpy(tmp, &ie->ie_buffer, le16_to_cpu(ie->ie_length));
priv->mgmt_ie[index].ie_length = ie->ie_length;
priv->mgmt_ie[index].ie_index = cpu_to_le16(index);
priv->mgmt_ie[index].mgmt_subtype_mask =
cpu_to_le16(mask);
ie->ie_index = cpu_to_le16(index);
} else {
if (mask != MWIFIEX_DELETE_MASK)
return -1;
/*
* Check if this index is being used on any
* other interface.
*/
if (mwifiex_ie_index_used_by_other_intf(priv, index))
return -1;
ie->ie_length = 0;
memcpy(&priv->mgmt_ie[index], ie,
sizeof(struct mwifiex_ie));
}
le16_unaligned_add_cpu(&ie_list->len,
le16_to_cpu(
priv->mgmt_ie[index].ie_length) +
MWIFIEX_IE_HDR_SIZE);
input_len -= tlv_len + MWIFIEX_IE_HDR_SIZE;
}
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
return mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
HostCmd_ACT_GEN_SET,
UAP_CUSTOM_IE_I, ie_list, true);
return 0;
}
/* Copy individual custom IEs for beacon, probe response and assoc response
* and prepare single structure for IE setting.
* This function also updates allocated IE indices from driver.
*/
static int
mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
struct mwifiex_ie *beacon_ie, u16 *beacon_idx,
struct mwifiex_ie *pr_ie, u16 *probe_idx,
struct mwifiex_ie *ar_ie, u16 *assoc_idx)
{
struct mwifiex_ie_list *ap_custom_ie;
u8 *pos;
u16 len;
int ret;
ap_custom_ie = kzalloc(sizeof(*ap_custom_ie), GFP_KERNEL);
if (!ap_custom_ie)
return -ENOMEM;
ap_custom_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
pos = (u8 *)ap_custom_ie->ie_list;
if (beacon_ie) {
len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(beacon_ie->ie_length);
memcpy(pos, beacon_ie, len);
pos += len;
le16_unaligned_add_cpu(&ap_custom_ie->len, len);
}
if (pr_ie) {
len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(pr_ie->ie_length);
memcpy(pos, pr_ie, len);
pos += len;
le16_unaligned_add_cpu(&ap_custom_ie->len, len);
}
if (ar_ie) {
len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(ar_ie->ie_length);
memcpy(pos, ar_ie, len);
pos += len;
le16_unaligned_add_cpu(&ap_custom_ie->len, len);
}
ret = mwifiex_update_autoindex_ies(priv, ap_custom_ie);
pos = (u8 *)(&ap_custom_ie->ie_list[0].ie_index);
if (beacon_ie && *beacon_idx == MWIFIEX_AUTO_IDX_MASK) {
/* save beacon ie index after auto-indexing */
*beacon_idx = le16_to_cpu(ap_custom_ie->ie_list[0].ie_index);
len = sizeof(*beacon_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(beacon_ie->ie_length);
pos += len;
}
if (pr_ie && le16_to_cpu(pr_ie->ie_index) == MWIFIEX_AUTO_IDX_MASK) {
/* save probe resp ie index after auto-indexing */
*probe_idx = *((u16 *)pos);
len = sizeof(*pr_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(pr_ie->ie_length);
pos += len;
}
if (ar_ie && le16_to_cpu(ar_ie->ie_index) == MWIFIEX_AUTO_IDX_MASK)
/* save assoc resp ie index after auto-indexing */
*assoc_idx = *((u16 *)pos);
kfree(ap_custom_ie);
return ret;
}
/* This function checks if the vendor specified IE is present in passed buffer
* and copies it to mwifiex_ie structure.
* Function takes pointer to struct mwifiex_ie pointer as argument.
* If the vendor specified IE is present then memory is allocated for
* mwifiex_ie pointer and filled in with IE. Caller should take care of freeing
* this memory.
*/
static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
struct mwifiex_ie **ie_ptr, u16 mask,
unsigned int oui, u8 oui_type)
{
struct ieee_types_header *vs_ie;
struct mwifiex_ie *ie = *ie_ptr;
const u8 *vendor_ie;
vendor_ie = cfg80211_find_vendor_ie(oui, oui_type, ies, ies_len);
if (vendor_ie) {
if (!*ie_ptr) {
*ie_ptr = kzalloc(sizeof(struct mwifiex_ie),
GFP_KERNEL);
if (!*ie_ptr)
return -ENOMEM;
ie = *ie_ptr;
}
vs_ie = (struct ieee_types_header *)vendor_ie;
if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
IEEE_MAX_IE_SIZE)
return -EINVAL;
memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
vs_ie, vs_ie->len + 2);
le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
ie->mgmt_subtype_mask = cpu_to_le16(mask);
ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK);
}
*ie_ptr = ie;
return 0;
}
/* This function parses beacon IEs, probe response IEs, association response IEs
* from cfg80211_ap_settings->beacon and sets these IE to FW.
*/
static int mwifiex_set_mgmt_beacon_data_ies(struct mwifiex_private *priv,
struct cfg80211_beacon_data *data)
{
struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL, *ar_ie = NULL;
u16 beacon_idx = MWIFIEX_AUTO_IDX_MASK, pr_idx = MWIFIEX_AUTO_IDX_MASK;
u16 ar_idx = MWIFIEX_AUTO_IDX_MASK;
int ret = 0;
if (data->beacon_ies && data->beacon_ies_len) {
mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
&beacon_ie, MGMT_MASK_BEACON,
WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPS);
mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
&beacon_ie, MGMT_MASK_BEACON,
WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
}
if (data->proberesp_ies && data->proberesp_ies_len) {
mwifiex_update_vs_ie(data->proberesp_ies,
data->proberesp_ies_len, &pr_ie,
MGMT_MASK_PROBE_RESP, WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPS);
mwifiex_update_vs_ie(data->proberesp_ies,
data->proberesp_ies_len, &pr_ie,
MGMT_MASK_PROBE_RESP,
WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
}
if (data->assocresp_ies && data->assocresp_ies_len) {
mwifiex_update_vs_ie(data->assocresp_ies,
data->assocresp_ies_len, &ar_ie,
MGMT_MASK_ASSOC_RESP |
MGMT_MASK_REASSOC_RESP,
WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPS);
mwifiex_update_vs_ie(data->assocresp_ies,
data->assocresp_ies_len, &ar_ie,
MGMT_MASK_ASSOC_RESP |
MGMT_MASK_REASSOC_RESP, WLAN_OUI_WFA,
WLAN_OUI_TYPE_WFA_P2P);
}
if (beacon_ie || pr_ie || ar_ie) {
ret = mwifiex_update_uap_custom_ie(priv, beacon_ie,
&beacon_idx, pr_ie,
&pr_idx, ar_ie, &ar_idx);
if (ret)
goto done;
}
priv->beacon_idx = beacon_idx;
priv->proberesp_idx = pr_idx;
priv->assocresp_idx = ar_idx;
done:
kfree(beacon_ie);
kfree(pr_ie);
kfree(ar_ie);
return ret;
}
/* This function parses head and tail IEs, from cfg80211_beacon_data and sets
* these IE to FW.
*/
static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
struct cfg80211_beacon_data *info)
{
struct mwifiex_ie *gen_ie;
struct ieee_types_header *hdr;
struct ieee80211_vendor_ie *vendorhdr;
u16 gen_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
int left_len, parsed_len = 0;
unsigned int token_len;
int err = 0;
if (!info->tail || !info->tail_len)
return 0;
gen_ie = kzalloc(sizeof(*gen_ie), GFP_KERNEL);
if (!gen_ie)
return -ENOMEM;
left_len = info->tail_len;
/* Many IEs are generated in FW by parsing bss configuration.
* Let's not add them here; else we may end up duplicating these IEs
*/
while (left_len > sizeof(struct ieee_types_header)) {
hdr = (void *)(info->tail + parsed_len);
token_len = hdr->len + sizeof(struct ieee_types_header);
if (token_len > left_len) {
err = -EINVAL;
goto out;
}
switch (hdr->element_id) {
case WLAN_EID_SSID:
case WLAN_EID_SUPP_RATES:
case WLAN_EID_COUNTRY:
case WLAN_EID_PWR_CONSTRAINT:
case WLAN_EID_ERP_INFO:
case WLAN_EID_EXT_SUPP_RATES:
case WLAN_EID_HT_CAPABILITY:
case WLAN_EID_HT_OPERATION:
case WLAN_EID_VHT_CAPABILITY:
case WLAN_EID_VHT_OPERATION:
break;
case WLAN_EID_VENDOR_SPECIFIC:
/* Skip only Microsoft WMM IE */
if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
(const u8 *)hdr,
token_len))
break;
/* fall through */
default:
if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
err = -EINVAL;
goto out;
}
memcpy(gen_ie->ie_buffer + ie_len, hdr, token_len);
ie_len += token_len;
break;
}
left_len -= token_len;
parsed_len += token_len;
}
/* parse only WPA vendor IE from tail, WMM IE is configured by
* bss_config command
*/
vendorhdr = (void *)cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
info->tail, info->tail_len);
if (vendorhdr) {
token_len = vendorhdr->len + sizeof(struct ieee_types_header);
if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
err = -EINVAL;
goto out;
}
memcpy(gen_ie->ie_buffer + ie_len, vendorhdr, token_len);
ie_len += token_len;
}
if (!ie_len)
goto out;
gen_ie->ie_index = cpu_to_le16(gen_idx);
gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
MGMT_MASK_PROBE_RESP |
MGMT_MASK_ASSOC_RESP);
gen_ie->ie_length = cpu_to_le16(ie_len);
if (mwifiex_update_uap_custom_ie(priv, gen_ie, &gen_idx, NULL, NULL,
NULL, NULL)) {
err = -EINVAL;
goto out;
}
priv->gen_idx = gen_idx;
out:
kfree(gen_ie);
return err;
}
/* This function parses different IEs-head & tail IEs, beacon IEs,
* probe response IEs, association response IEs from cfg80211_ap_settings
* function and sets these IE to FW.
*/
int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
struct cfg80211_beacon_data *info)
{
int ret;
ret = mwifiex_uap_parse_tail_ies(priv, info);
if (ret)
return ret;
return mwifiex_set_mgmt_beacon_data_ies(priv, info);
}
/* This function removes management IE set */
int mwifiex_del_mgmt_ies(struct mwifiex_private *priv)
{
struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL;
struct mwifiex_ie *ar_ie = NULL, *gen_ie = NULL;
int ret = 0;
if (priv->gen_idx != MWIFIEX_AUTO_IDX_MASK) {
gen_ie = kmalloc(sizeof(*gen_ie), GFP_KERNEL);
if (!gen_ie)
return -ENOMEM;
gen_ie->ie_index = cpu_to_le16(priv->gen_idx);
gen_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
gen_ie->ie_length = 0;
if (mwifiex_update_uap_custom_ie(priv, gen_ie, &priv->gen_idx,
NULL, &priv->proberesp_idx,
NULL, &priv->assocresp_idx)) {
ret = -1;
goto done;
}
priv->gen_idx = MWIFIEX_AUTO_IDX_MASK;
}
if (priv->beacon_idx != MWIFIEX_AUTO_IDX_MASK) {
beacon_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
if (!beacon_ie) {
ret = -ENOMEM;
goto done;
}
beacon_ie->ie_index = cpu_to_le16(priv->beacon_idx);
beacon_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
beacon_ie->ie_length = 0;
}
if (priv->proberesp_idx != MWIFIEX_AUTO_IDX_MASK) {
pr_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
if (!pr_ie) {
ret = -ENOMEM;
goto done;
}
pr_ie->ie_index = cpu_to_le16(priv->proberesp_idx);
pr_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
pr_ie->ie_length = 0;
}
if (priv->assocresp_idx != MWIFIEX_AUTO_IDX_MASK) {
ar_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
if (!ar_ie) {
ret = -ENOMEM;
goto done;
}
ar_ie->ie_index = cpu_to_le16(priv->assocresp_idx);
ar_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
ar_ie->ie_length = 0;
}
if (beacon_ie || pr_ie || ar_ie)
ret = mwifiex_update_uap_custom_ie(priv,
beacon_ie, &priv->beacon_idx,
pr_ie, &priv->proberesp_idx,
ar_ie, &priv->assocresp_idx);
done:
kfree(gen_ie);
kfree(beacon_ie);
kfree(pr_ie);
kfree(ar_ie);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_998_0 |
crossvul-cpp_data_bad_4697_3 | /*
* irc-server.c - I/O communication with IRC servers
*
* Copyright (C) 2003-2020 Sébastien Helleu <flashcode@flashtux.org>
* Copyright (C) 2005-2010 Emmanuel Bouthenot <kolter@openics.org>
* Copyright (C) 2012 Simon Arlott
*
* This file is part of WeeChat, the extensible chat client.
*
* WeeChat is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* WeeChat is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with WeeChat. If not, see <https://www.gnu.org/licenses/>.
*/
#include <stdlib.h>
#include <stddef.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <ctype.h>
#include <time.h>
#ifdef _WIN32
#include <winsock.h>
#else
#include <sys/socket.h>
#include <sys/time.h>
#endif /* _WIN32 */
#include <sys/types.h>
#include <netdb.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <arpa/nameser.h>
#include <resolv.h>
#ifdef HAVE_GNUTLS
#include <gnutls/gnutls.h>
#include <gnutls/x509.h>
#endif /* HAVE_GNUTLS */
#include "../weechat-plugin.h"
#include "irc.h"
#include "irc-server.h"
#include "irc-bar-item.h"
#include "irc-buffer.h"
#include "irc-channel.h"
#include "irc-color.h"
#include "irc-command.h"
#include "irc-config.h"
#include "irc-input.h"
#include "irc-message.h"
#include "irc-nick.h"
#include "irc-notify.h"
#include "irc-protocol.h"
#include "irc-raw.h"
#include "irc-redirect.h"
#include "irc-sasl.h"
struct t_irc_server *irc_servers = NULL;
struct t_irc_server *last_irc_server = NULL;
struct t_irc_message *irc_recv_msgq = NULL;
struct t_irc_message *irc_msgq_last_msg = NULL;
char *irc_server_sasl_fail_string[IRC_SERVER_NUM_SASL_FAIL] =
{ "continue", "reconnect", "disconnect" };
char *irc_server_options[IRC_SERVER_NUM_OPTIONS][2] =
{ { "addresses", "" },
{ "proxy", "" },
{ "ipv6", "on" },
{ "ssl", "off" },
{ "ssl_cert", "" },
{ "ssl_password", "" },
{ "ssl_priorities", "NORMAL:-VERS-SSL3.0" },
{ "ssl_dhkey_size", "2048" },
{ "ssl_fingerprint", "" },
{ "ssl_verify", "on" },
{ "password", "" },
{ "capabilities", "" },
{ "sasl_mechanism", "plain" },
{ "sasl_username", "" },
{ "sasl_password", "" },
{ "sasl_key", "", },
{ "sasl_timeout", "15" },
{ "sasl_fail", "continue" },
{ "autoconnect", "off" },
{ "autoreconnect", "on" },
{ "autoreconnect_delay", "10" },
{ "nicks", "" },
{ "nicks_alternate", "on" },
{ "username", "" },
{ "realname", "" },
{ "local_hostname", "" },
{ "usermode", "" },
{ "command", "" },
{ "command_delay", "0" },
{ "autojoin", "" },
{ "autorejoin", "off" },
{ "autorejoin_delay", "30" },
{ "connection_timeout", "60" },
{ "anti_flood_prio_high", "2" },
{ "anti_flood_prio_low", "2" },
{ "away_check", "0" },
{ "away_check_max_nicks", "25" },
{ "msg_kick", "" },
{ "msg_part", "WeeChat ${info:version}" },
{ "msg_quit", "WeeChat ${info:version}" },
{ "notify", "" },
{ "split_msg_max_length", "512" },
{ "charset_message", "message" },
};
char *irc_server_casemapping_string[IRC_SERVER_NUM_CASEMAPPING] =
{ "rfc1459", "strict-rfc1459", "ascii" };
char *irc_server_prefix_modes_default = "ov";
char *irc_server_prefix_chars_default = "@+";
char *irc_server_chanmodes_default = "beI,k,l";
const char *irc_server_send_default_tags = NULL; /* default tags when */
/* sending a message */
#ifdef HAVE_GNUTLS
gnutls_digest_algorithm_t irc_fingerprint_digest_algos[IRC_FINGERPRINT_NUM_ALGOS] =
{ GNUTLS_DIG_SHA1, GNUTLS_DIG_SHA256, GNUTLS_DIG_SHA512 };
char *irc_fingerprint_digest_algos_name[IRC_FINGERPRINT_NUM_ALGOS] =
{ "SHA-1", "SHA-256", "SHA-512" };
int irc_fingerprint_digest_algos_size[IRC_FINGERPRINT_NUM_ALGOS] =
{ 160, 256, 512 };
#endif /* HAVE_GNUTLS */
void irc_server_reconnect (struct t_irc_server *server);
void irc_server_free_data (struct t_irc_server *server);
void irc_server_autojoin_create_buffers (struct t_irc_server *server);
/*
* Checks if a server pointer is valid.
*
* Returns:
* 1: server exists
* 0: server does not exist
*/
int
irc_server_valid (struct t_irc_server *server)
{
struct t_irc_server *ptr_server;
if (!server)
return 0;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
if (ptr_server == server)
return 1;
}
/* server not found */
return 0;
}
/*
* Searches for a server by name.
*
* Returns pointer to server found, NULL if not found.
*/
struct t_irc_server *
irc_server_search (const char *server_name)
{
struct t_irc_server *ptr_server;
if (!server_name)
return NULL;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
if (strcmp (ptr_server->name, server_name) == 0)
return ptr_server;
}
/* server not found */
return NULL;
}
/*
* Searches for a server by name (case insensitive).
*
* Returns pointer to server found, NULL if not found.
*/
struct t_irc_server *
irc_server_casesearch (const char *server_name)
{
struct t_irc_server *ptr_server;
if (!server_name)
return NULL;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
if (weechat_strcasecmp (ptr_server->name, server_name) == 0)
return ptr_server;
}
/* server not found */
return NULL;
}
/*
* Searches for a server option name.
*
* Returns index of option in array "irc_server_option_string", -1 if not found.
*/
int
irc_server_search_option (const char *option_name)
{
int i;
if (!option_name)
return -1;
for (i = 0; i < IRC_SERVER_NUM_OPTIONS; i++)
{
if (weechat_strcasecmp (irc_server_options[i][0], option_name) == 0)
return i;
}
/* server option not found */
return -1;
}
/*
* Searches for a casemapping.
*
* Returns index of casemapping in array "irc_server_casemapping_string", -1 if
* not found.
*/
int
irc_server_search_casemapping (const char *casemapping)
{
int i;
for (i = 0; i < IRC_SERVER_NUM_CASEMAPPING; i++)
{
if (weechat_strcasecmp (irc_server_casemapping_string[i], casemapping) == 0)
return i;
}
/* casemapping not found */
return -1;
}
/*
* Compares two strings on server (case insensitive, depends on casemapping).
*
* Returns:
* < 0: string1 < string2
* 0: string1 == string2
* > 0: string1 > string2
*/
int
irc_server_strcasecmp (struct t_irc_server *server,
const char *string1, const char *string2)
{
int casemapping, rc;
casemapping = (server) ? server->casemapping : IRC_SERVER_CASEMAPPING_RFC1459;
switch (casemapping)
{
case IRC_SERVER_CASEMAPPING_RFC1459:
rc = weechat_strcasecmp_range (string1, string2, 30);
break;
case IRC_SERVER_CASEMAPPING_STRICT_RFC1459:
rc = weechat_strcasecmp_range (string1, string2, 29);
break;
case IRC_SERVER_CASEMAPPING_ASCII:
rc = weechat_strcasecmp (string1, string2);
break;
default:
rc = weechat_strcasecmp_range (string1, string2, 30);
break;
}
return rc;
}
/*
* Compares two strings on server (case insensitive, depends on casemapping) for
* max chars.
*
* Returns:
* < 0: string1 < string2
* 0: string1 == string2
* > 0: string1 > string2
*/
int
irc_server_strncasecmp (struct t_irc_server *server,
const char *string1, const char *string2, int max)
{
int casemapping, rc;
casemapping = (server) ? server->casemapping : IRC_SERVER_CASEMAPPING_RFC1459;
switch (casemapping)
{
case IRC_SERVER_CASEMAPPING_RFC1459:
rc = weechat_strncasecmp_range (string1, string2, max, 30);
break;
case IRC_SERVER_CASEMAPPING_STRICT_RFC1459:
rc = weechat_strncasecmp_range (string1, string2, max, 29);
break;
case IRC_SERVER_CASEMAPPING_ASCII:
rc = weechat_strncasecmp (string1, string2, max);
break;
default:
rc = weechat_strncasecmp_range (string1, string2, max, 30);
break;
}
return rc;
}
/*
* Evaluates a string using the server as context:
* ${irc_server.xxx} and ${server} are replaced by a server option and the
* server name.
*
* Returns the evaluated string.
*
* Note: result must be freed after use.
*/
char *
irc_server_eval_expression (struct t_irc_server *server, const char *string)
{
struct t_hashtable *pointers, *extra_vars;
char *value;
pointers = weechat_hashtable_new (
32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_POINTER,
NULL, NULL);
extra_vars = weechat_hashtable_new (
32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_STRING,
NULL, NULL);
if (server)
{
if (pointers)
weechat_hashtable_set (pointers, "irc_server", server);
if (extra_vars)
weechat_hashtable_set (extra_vars, "server", server->name);
}
value = weechat_string_eval_expression (string,
pointers, extra_vars, NULL);
if (pointers)
weechat_hashtable_free (pointers);
if (extra_vars)
weechat_hashtable_free (extra_vars);
return value;
}
/*
* Evaluates and returns the fingerprint.
*
* Returns the evaluated fingerprint, NULL if the fingerprint option is
* invalid.
*
* Note: result must be freed after use.
*/
char *
irc_server_eval_fingerprint (struct t_irc_server *server)
{
#ifdef HAVE_GNUTLS
const char *ptr_fingerprint;
char *fingerprint_eval, **fingerprints, *str_sizes;
int i, j, rc, algo, length;
ptr_fingerprint = IRC_SERVER_OPTION_STRING(server,
IRC_SERVER_OPTION_SSL_FINGERPRINT);
/* empty fingerprint is just ignored (considered OK) */
if (!ptr_fingerprint || !ptr_fingerprint[0])
return strdup ("");
/* evaluate fingerprint */
fingerprint_eval = irc_server_eval_expression (server, ptr_fingerprint);
if (!fingerprint_eval || !fingerprint_eval[0])
{
weechat_printf (
server->buffer,
_("%s%s: the evaluated fingerprint for server \"%s\" must not be "
"empty"),
weechat_prefix ("error"),
IRC_PLUGIN_NAME,
server->name);
if (fingerprint_eval)
free (fingerprint_eval);
return NULL;
}
/* split fingerprint */
fingerprints = weechat_string_split (fingerprint_eval, ",", NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0, NULL);
if (!fingerprints)
return fingerprint_eval;
rc = 0;
for (i = 0; fingerprints[i]; i++)
{
length = strlen (fingerprints[i]);
algo = irc_server_fingerprint_search_algo_with_size (length * 4);
if (algo < 0)
{
rc = -1;
break;
}
for (j = 0; j < length; j++)
{
if (!isxdigit ((unsigned char)fingerprints[i][j]))
{
rc = -2;
break;
}
}
if (rc < 0)
break;
}
weechat_string_free_split (fingerprints);
switch (rc)
{
case -1: /* invalid size */
str_sizes = irc_server_fingerprint_str_sizes ();
weechat_printf (
server->buffer,
_("%s%s: invalid fingerprint size for server \"%s\", the "
"number of hexadecimal digits must be "
"one of: %s"),
weechat_prefix ("error"),
IRC_PLUGIN_NAME,
server->name,
(str_sizes) ? str_sizes : "?");
if (str_sizes)
free (str_sizes);
free (fingerprint_eval);
return NULL;
case -2: /* invalid content */
weechat_printf (
server->buffer,
_("%s%s: invalid fingerprint for server \"%s\", it must "
"contain only hexadecimal digits (0-9, "
"a-f)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, server->name);
free (fingerprint_eval);
return NULL;
}
return fingerprint_eval;
#else
/* make C compiler happy */
(void) server;
return strdup ("");
#endif /* HAVE_GNUTLS */
}
/*
* Checks if SASL is enabled on server.
*
* Returns:
* 1: SASL is enabled
* 0: SASL is disabled
*/
int
irc_server_sasl_enabled (struct t_irc_server *server)
{
int sasl_mechanism, rc;
char *sasl_username, *sasl_password;
const char *sasl_key;
sasl_mechanism = IRC_SERVER_OPTION_INTEGER(
server, IRC_SERVER_OPTION_SASL_MECHANISM);
sasl_username = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_USERNAME));
sasl_password = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_PASSWORD));
sasl_key = IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_KEY);
/*
* SASL is enabled if one of these conditions is true:
* - mechanism is "external"
* - mechanism is "ecdsa-nist256p-challenge" with username/key set
* - another mechanism with username/password set
*/
rc = ((sasl_mechanism == IRC_SASL_MECHANISM_EXTERNAL)
|| ((sasl_mechanism == IRC_SASL_MECHANISM_ECDSA_NIST256P_CHALLENGE)
&& sasl_username && sasl_username[0]
&& sasl_key && sasl_key[0])
|| (sasl_username && sasl_username[0]
&& sasl_password && sasl_password[0])) ? 1 : 0;
if (sasl_username)
free (sasl_username);
if (sasl_password)
free (sasl_password);
return rc;
}
/*
* Gets name of server without port (ends before first '/' if found).
*
* Note: result must be freed after use.
*/
char *
irc_server_get_name_without_port (const char *name)
{
char *pos;
if (!name)
return NULL;
pos = strchr (name, '/');
if (pos && (pos != name))
return weechat_strndup (name, pos - name);
return strdup (name);
}
/*
* Sets addresses for server.
*
* Returns:
* 1: addresses have been set (changed)
* 0: nothing set (addresses unchanged)
*/
int
irc_server_set_addresses (struct t_irc_server *server, const char *addresses)
{
int i;
char *pos, *error, *addresses_eval;
long number;
addresses_eval = NULL;
if (addresses && addresses[0])
{
addresses_eval = irc_server_eval_expression (server, addresses);
if (server->addresses_eval
&& (strcmp (server->addresses_eval, addresses_eval) == 0))
{
free (addresses_eval);
return 0;
}
}
/* free data */
if (server->addresses_eval)
{
free (server->addresses_eval);
server->addresses_eval = NULL;
}
server->addresses_count = 0;
if (server->addresses_array)
{
weechat_string_free_split (server->addresses_array);
server->addresses_array = NULL;
}
if (server->ports_array)
{
free (server->ports_array);
server->ports_array = NULL;
}
if (server->retry_array)
{
free (server->retry_array);
server->retry_array = NULL;
}
/* set new addresses/ports */
server->addresses_eval = addresses_eval;
if (!addresses_eval)
return 1;
server->addresses_array = weechat_string_split (
addresses_eval,
",",
" ",
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0,
&server->addresses_count);
server->ports_array = malloc (
server->addresses_count * sizeof (server->ports_array[0]));
server->retry_array = malloc (
server->addresses_count * sizeof (server->retry_array[0]));
for (i = 0; i < server->addresses_count; i++)
{
pos = strchr (server->addresses_array[i], '/');
if (pos)
{
pos[0] = 0;
pos++;
error = NULL;
number = strtol (pos, &error, 10);
server->ports_array[i] = (error && !error[0]) ?
number : IRC_SERVER_DEFAULT_PORT;
}
else
{
server->ports_array[i] = IRC_SERVER_DEFAULT_PORT;
}
server->retry_array[i] = 0;
}
return 1;
}
/*
* Sets index of current address for server.
*/
void
irc_server_set_index_current_address (struct t_irc_server *server, int index)
{
int addresses_changed;
addresses_changed = irc_server_set_addresses (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_ADDRESSES));
if (addresses_changed)
{
/* if the addresses have changed, reset the index to 0 */
index = 0;
}
if (server->current_address)
{
free (server->current_address);
server->current_address = NULL;
/* copy current retry value before loading next server */
if (!addresses_changed
&& server->index_current_address < server->addresses_count)
{
server->retry_array[server->index_current_address] = server->current_retry;
}
}
server->current_port = 0;
server->current_retry = 0;
if (server->addresses_count > 0)
{
index %= server->addresses_count;
server->index_current_address = index;
server->current_address = strdup (server->addresses_array[index]);
server->current_port = server->ports_array[index];
server->current_retry = server->retry_array[index];
}
}
/*
* Sets nicks for server.
*/
void
irc_server_set_nicks (struct t_irc_server *server, const char *nicks)
{
char *nicks2;
/* free data */
server->nicks_count = 0;
if (server->nicks_array)
{
weechat_string_free_split (server->nicks_array);
server->nicks_array = NULL;
}
/* evaluate value */
nicks2 = irc_server_eval_expression (server, nicks);
/* set new nicks */
server->nicks_array = weechat_string_split (
(nicks2) ? nicks2 : IRC_SERVER_DEFAULT_NICKS,
",",
NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0,
&server->nicks_count);
if (nicks2)
free (nicks2);
}
/*
* Sets nickname for server.
*/
void
irc_server_set_nick (struct t_irc_server *server, const char *nick)
{
struct t_irc_channel *ptr_channel;
/* if nick is the same, just return */
if ((!server->nick && !nick)
|| (server->nick && nick && strcmp (server->nick, nick) == 0))
{
return;
}
/* update the nick in server */
if (server->nick)
free (server->nick);
server->nick = (nick) ? strdup (nick) : NULL;
/* set local variable "nick" for server and all channels/pv */
weechat_buffer_set (server->buffer, "localvar_set_nick", nick);
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
weechat_buffer_set (ptr_channel->buffer, "localvar_set_nick", nick);
}
weechat_bar_item_update ("input_prompt");
weechat_bar_item_update ("irc_nick");
weechat_bar_item_update ("irc_nick_host");
}
/*
* Sets host for server.
*/
void
irc_server_set_host (struct t_irc_server *server, const char *host)
{
struct t_irc_channel *ptr_channel;
/* if host is the same, just return */
if ((!server->host && !host)
|| (server->host && host && strcmp (server->host, host) == 0))
{
return;
}
/* update the nick host in server */
if (server->host)
free (server->host);
server->host = (host) ? strdup (host) : NULL;
/* set local variable "host" for server and all channels/pv */
weechat_buffer_set (server->buffer, "localvar_set_host", host);
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
weechat_buffer_set (ptr_channel->buffer,
"localvar_set_host", host);
}
weechat_bar_item_update ("irc_host");
weechat_bar_item_update ("irc_nick_host");
}
/*
* Gets index of nick in array "nicks_array".
*
* Returns index of nick in array, -1 if nick is not set or not found in
* "nicks_array".
*/
int
irc_server_get_nick_index (struct t_irc_server *server)
{
int i;
if (!server->nick)
return -1;
for (i = 0; i < server->nicks_count; i++)
{
if (strcmp (server->nick, server->nicks_array[i]) == 0)
{
return i;
}
}
/* nick not found */
return -1;
}
/*
* Gets an alternate nick when the nick is already used on server.
*
* First tries all declared nicks, then builds nicks by adding "_", until
* length of 9.
*
* If all nicks are still used, builds 99 alternate nicks by using number at the
* end.
*
* Example: nicks = "abcde,fghi,jkl"
* => nicks tried: abcde
* fghi
* jkl
* abcde_
* abcde__
* abcde___
* abcde____
* abcde___1
* abcde___2
* ...
* abcde__99
*
* Returns NULL if no more alternate nick is available.
*/
const char *
irc_server_get_alternate_nick (struct t_irc_server *server)
{
static char nick[64];
char str_number[64];
int nick_index, length_nick, length_number;
nick[0] = '\0';
/* we are still trying nicks from option "nicks" */
if (server->nick_alternate_number < 0)
{
nick_index = irc_server_get_nick_index (server);
if (nick_index < 0)
nick_index = 0;
else
{
nick_index = (nick_index + 1) % server->nicks_count;
/* stop loop if first nick tried was not in the list of nicks */
if ((nick_index == 0) && (server->nick_first_tried < 0))
server->nick_first_tried = 0;
}
if (nick_index != server->nick_first_tried)
{
snprintf (nick, sizeof (nick),
"%s", server->nicks_array[nick_index]);
return nick;
}
/* now we have tried all nicks in list */
/* if alternate nicks are disabled, just return NULL */
if (!IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_NICKS_ALTERNATE))
return NULL;
/* use main nick and we will add "_" and then number if needed */
server->nick_alternate_number = 0;
snprintf (nick, sizeof (nick), "%s", server->nicks_array[0]);
}
else
snprintf (nick, sizeof (nick), "%s", server->nick);
/* if length is < 9, just add a "_" */
if (strlen (nick) < 9)
{
strcat (nick, "_");
return nick;
}
server->nick_alternate_number++;
/* number is max 99 */
if (server->nick_alternate_number > 99)
return NULL;
/* be sure the nick has 9 chars max */
nick[9] = '\0';
/* generate number */
snprintf (str_number, sizeof (str_number),
"%d", server->nick_alternate_number);
/* copy number in nick */
length_nick = strlen (nick);
length_number = strlen (str_number);
if (length_number > length_nick)
return NULL;
memcpy (nick + length_nick - length_number, str_number, length_number);
/* return alternate nick */
return nick;
}
/*
* Gets value of a feature item in "isupport" (copy of IRC message 005).
*
* Returns value of feature (empty string if feature has no value, NULL if
* feature is not found).
*/
const char *
irc_server_get_isupport_value (struct t_irc_server *server, const char *feature)
{
char feature2[64], *pos_feature, *pos_equal, *pos_space;
int length;
static char value[256];
if (!server || !server->isupport || !feature)
return NULL;
/* search feature with value */
snprintf (feature2, sizeof (feature2), " %s=", feature);
pos_feature = strstr (server->isupport, feature2);
if (pos_feature)
{
/* feature found with value, return value */
pos_feature++;
pos_equal = strchr (pos_feature, '=');
pos_space = strchr (pos_feature, ' ');
if (pos_space)
length = pos_space - pos_equal - 1;
else
length = strlen (pos_equal) + 1;
if (length > (int)sizeof (value) - 1)
length = (int)sizeof (value) - 1;
memcpy (value, pos_equal + 1, length);
value[length] = '\0';
return value;
}
/* search feature without value */
feature2[strlen (feature2) - 1] = ' ';
pos_feature = strstr (server->isupport, feature2);
if (pos_feature)
{
value[0] = '\0';
return value;
}
/* feature not found in isupport */
return NULL;
}
/*
* Sets "prefix_modes" and "prefix_chars" in server using value of PREFIX in IRC
* message 005.
*
* For example, if prefix is "(ohv)@%+":
* prefix_modes is set to "ohv"
* prefix_chars is set to "@%+".
*/
void
irc_server_set_prefix_modes_chars (struct t_irc_server *server,
const char *prefix)
{
char *pos;
int i, length_modes, length_chars;
if (!server || !prefix)
return;
/* free previous values */
if (server->prefix_modes)
{
free (server->prefix_modes);
server->prefix_modes = NULL;
}
if (server->prefix_chars)
{
free (server->prefix_chars);
server->prefix_chars = NULL;
}
/* assign new values */
pos = strchr (prefix, ')');
if (pos)
{
server->prefix_modes = weechat_strndup (prefix + 1,
pos - prefix - 1);
if (server->prefix_modes)
{
pos++;
length_modes = strlen (server->prefix_modes);
length_chars = strlen (pos);
server->prefix_chars = malloc (length_modes + 1);
if (server->prefix_chars)
{
for (i = 0; i < length_modes; i++)
{
server->prefix_chars[i] = (i < length_chars) ? pos[i] : ' ';
}
server->prefix_chars[length_modes] = '\0';
}
else
{
free (server->prefix_modes);
server->prefix_modes = NULL;
}
}
}
}
/*
* Sets lag in server buffer (local variable), update bar item "lag"
* and send signal "irc_server_lag_changed" for the server.
*/
void
irc_server_set_lag (struct t_irc_server *server)
{
char str_lag[32];
if (server->lag >= weechat_config_integer (irc_config_network_lag_min_show))
{
snprintf (str_lag, sizeof (str_lag),
((server->lag_check_time.tv_sec == 0) || (server->lag < 1000)) ?
"%.3f" : "%.0f",
((float)(server->lag)) / 1000);
weechat_buffer_set (server->buffer, "localvar_set_lag", str_lag);
}
else
{
weechat_buffer_set (server->buffer, "localvar_del_lag", "");
}
weechat_hook_signal_send ("irc_server_lag_changed",
WEECHAT_HOOK_SIGNAL_STRING,
server->name);
weechat_bar_item_update ("lag");
}
/*
* Gets prefix_modes for server (for example: "ohv").
*
* Returns default modes if prefix_modes is not set in server.
*/
const char *
irc_server_get_prefix_modes (struct t_irc_server *server)
{
return (server && server->prefix_modes) ?
server->prefix_modes : irc_server_prefix_modes_default;
}
/*
* Gets prefix_chars for server (for example: "@%+").
*
* Returns default chars if prefix_chars is not set in server.
*/
const char *
irc_server_get_prefix_chars (struct t_irc_server *server)
{
return (server && server->prefix_chars) ?
server->prefix_chars : irc_server_prefix_chars_default;
}
/*
* Gets index of mode in prefix_modes.
*
* The mode is for example 'o' or 'v'.
*
* Returns -1 if mode does not exist in server.
*/
int
irc_server_get_prefix_mode_index (struct t_irc_server *server, char mode)
{
const char *prefix_modes;
char *pos;
if (server)
{
prefix_modes = irc_server_get_prefix_modes (server);
pos = strchr (prefix_modes, mode);
if (pos)
return pos - prefix_modes;
}
return -1;
}
/*
* Gets index of prefix_char in prefix_chars.
*
* The prefix char is for example '@' or '+'.
*
* Returns -1 if prefix_char does not exist in server.
*/
int
irc_server_get_prefix_char_index (struct t_irc_server *server,
char prefix_char)
{
const char *prefix_chars;
char *pos;
if (server)
{
prefix_chars = irc_server_get_prefix_chars (server);
pos = strchr (prefix_chars, prefix_char);
if (pos)
return pos - prefix_chars;
}
return -1;
}
/*
* Gets mode for prefix char.
*
* For example prefix_char '@' can return 'o'.
*
* Returns ' ' (space) if prefix char is not found.
*/
char
irc_server_get_prefix_mode_for_char (struct t_irc_server *server,
char prefix_char)
{
const char *prefix_modes;
int index;
if (server)
{
prefix_modes = irc_server_get_prefix_modes (server);
index = irc_server_get_prefix_char_index (server, prefix_char);
if (index >= 0)
return prefix_modes[index];
}
return ' ';
}
/*
* Gets prefix char for mode.
*
* For example mode 'o' can return '@'.
*
* Returns a space if mode is not found.
*/
char
irc_server_get_prefix_char_for_mode (struct t_irc_server *server, char mode)
{
const char *prefix_chars;
int index;
if (server)
{
prefix_chars = irc_server_get_prefix_chars (server);
index = irc_server_get_prefix_mode_index (server, mode);
if (index >= 0)
return prefix_chars[index];
}
return ' ';
}
/*
* Gets chanmodes for server (for example: "eIb,k,l,imnpstS").
*
* Returns default chanmodes if chanmodes is not set in server.
*/
const char *
irc_server_get_chanmodes (struct t_irc_server *server)
{
return (server && server->chanmodes) ?
server->chanmodes : irc_server_chanmodes_default;
}
/*
* Checks if a prefix char is valid for a status message
* (message sent for example to ops/voiced).
*
* The prefix (for example '@' or '+') must be in STATUSMSG,
* or in "prefix_chars" if STATUSMSG is not defined.
*
* Returns:
* 1: prefix is valid for a status message
* 0: prefix is NOT valid for a status message
*/
int
irc_server_prefix_char_statusmsg (struct t_irc_server *server,
char prefix_char)
{
const char *support_statusmsg;
support_statusmsg = irc_server_get_isupport_value (server, "STATUSMSG");
if (support_statusmsg)
return (strchr (support_statusmsg, prefix_char)) ? 1 : 0;
return (irc_server_get_prefix_char_index (server, prefix_char) >= 0) ?
1 : 0;
}
/*
* Get max modes supported in one command by the server
* (in isupport value, with the format: "MODES=4").
*
* Default is 4 if the info is not given by the server.
*/
int
irc_server_get_max_modes (struct t_irc_server *server)
{
const char *support_modes;
char *error;
long number;
int max_modes;
max_modes = 4;
support_modes = irc_server_get_isupport_value (server, "MODES");
if (support_modes)
{
error = NULL;
number = strtol (support_modes, &error, 10);
if (error && !error[0])
{
max_modes = number;
if (max_modes < 1)
max_modes = 1;
if (max_modes > 128)
max_modes = 128;
}
}
return max_modes;
}
/*
* Gets an evaluated default_msg server option: replaces "%v" by WeeChat
* version if there's no ${...} in string, or just evaluates the string.
*
* Note: result must be freed after use.
*/
char *
irc_server_get_default_msg (const char *default_msg,
struct t_irc_server *server,
const char *channel_name)
{
char *version;
struct t_hashtable *extra_vars;
char *msg, *res;
/*
* "%v" for version is deprecated since WeeChat 1.6, where
* an expression ${info:version} is preferred, so we replace
* the "%v" with version only if there's no "${...}" in string
*/
if (strstr (default_msg, "%v") && !strstr (default_msg, "${"))
{
version = weechat_info_get ("version", "");
res = weechat_string_replace (default_msg, "%v",
(version) ? version : "");
if (version)
free (version);
return res;
}
extra_vars = weechat_hashtable_new (32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_STRING,
NULL,
NULL);
if (extra_vars)
{
weechat_hashtable_set (extra_vars, "server", server->name);
weechat_hashtable_set (extra_vars, "channel",
(channel_name) ? channel_name : "");
weechat_hashtable_set (extra_vars, "nick", server->nick);
}
msg = weechat_string_eval_expression (default_msg, NULL, extra_vars, NULL);
if (extra_vars)
weechat_hashtable_free (extra_vars);
return msg;
}
/*
* Allocates a new server and adds it to the servers queue.
*
* Returns pointer to new server, NULL if error.
*/
struct t_irc_server *
irc_server_alloc (const char *name)
{
struct t_irc_server *new_server;
int i, length;
char *option_name;
if (irc_server_casesearch (name))
return NULL;
/* alloc memory for new server */
new_server = malloc (sizeof (*new_server));
if (!new_server)
{
weechat_printf (NULL,
_("%s%s: error when allocating new server"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
return NULL;
}
/* add new server to queue */
new_server->prev_server = last_irc_server;
new_server->next_server = NULL;
if (last_irc_server)
last_irc_server->next_server = new_server;
else
irc_servers = new_server;
last_irc_server = new_server;
/* set name */
new_server->name = strdup (name);
/* internal vars */
new_server->temp_server = 0;
new_server->reloading_from_config = 0;
new_server->reloaded_from_config = 0;
new_server->addresses_eval = NULL;
new_server->addresses_count = 0;
new_server->addresses_array = NULL;
new_server->ports_array = NULL;
new_server->retry_array = NULL;
new_server->index_current_address = 0;
new_server->current_address = NULL;
new_server->current_ip = NULL;
new_server->current_port = 0;
new_server->current_retry = 0;
new_server->sock = -1;
new_server->hook_connect = NULL;
new_server->hook_fd = NULL;
new_server->hook_timer_connection = NULL;
new_server->hook_timer_sasl = NULL;
new_server->is_connected = 0;
new_server->ssl_connected = 0;
new_server->disconnected = 0;
new_server->unterminated_message = NULL;
new_server->nicks_count = 0;
new_server->nicks_array = NULL;
new_server->nick_first_tried = 0;
new_server->nick_alternate_number = -1;
new_server->nick = NULL;
new_server->nick_modes = NULL;
new_server->host = NULL;
new_server->checking_cap_ls = 0;
new_server->cap_ls = weechat_hashtable_new (32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_STRING,
NULL,
NULL);
new_server->checking_cap_list = 0;
new_server->cap_list = weechat_hashtable_new (32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_STRING,
NULL,
NULL);
new_server->isupport = NULL;
new_server->prefix_modes = NULL;
new_server->prefix_chars = NULL;
new_server->nick_max_length = 0;
new_server->user_max_length = 0;
new_server->host_max_length = 0;
new_server->casemapping = IRC_SERVER_CASEMAPPING_RFC1459;
new_server->chantypes = NULL;
new_server->chanmodes = NULL;
new_server->monitor = 0;
new_server->monitor_time = 0;
new_server->reconnect_delay = 0;
new_server->reconnect_start = 0;
new_server->command_time = 0;
new_server->reconnect_join = 0;
new_server->disable_autojoin = 0;
new_server->is_away = 0;
new_server->away_message = NULL;
new_server->away_time = 0;
new_server->lag = 0;
new_server->lag_displayed = -1;
new_server->lag_check_time.tv_sec = 0;
new_server->lag_check_time.tv_usec = 0;
new_server->lag_next_check = time (NULL) +
weechat_config_integer (irc_config_network_lag_check);
new_server->lag_last_refresh = 0;
new_server->cmd_list_regexp = NULL;
new_server->last_user_message = 0;
new_server->last_away_check = 0;
new_server->last_data_purge = 0;
for (i = 0; i < IRC_SERVER_NUM_OUTQUEUES_PRIO; i++)
{
new_server->outqueue[i] = NULL;
new_server->last_outqueue[i] = NULL;
}
new_server->redirects = NULL;
new_server->last_redirect = NULL;
new_server->notify_list = NULL;
new_server->last_notify = NULL;
new_server->notify_count = 0;
new_server->join_manual = weechat_hashtable_new (
32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_TIME,
NULL, NULL);
new_server->join_channel_key = weechat_hashtable_new (
32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_STRING,
NULL, NULL);
new_server->join_noswitch = weechat_hashtable_new (
32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_TIME,
NULL, NULL);
new_server->buffer = NULL;
new_server->buffer_as_string = NULL;
new_server->channels = NULL;
new_server->last_channel = NULL;
/* create options with null value */
for (i = 0; i < IRC_SERVER_NUM_OPTIONS; i++)
{
length = strlen (new_server->name) + 1 +
strlen (irc_server_options[i][0]) +
512 + /* inherited option name (irc.server_default.xxx) */
1;
option_name = malloc (length);
if (option_name)
{
snprintf (option_name, length, "%s.%s << irc.server_default.%s",
new_server->name,
irc_server_options[i][0],
irc_server_options[i][0]);
new_server->options[i] = irc_config_server_new_option (
irc_config_file,
irc_config_section_server,
i,
option_name,
NULL,
NULL,
1,
&irc_config_server_check_value_cb,
irc_server_options[i][0],
NULL,
&irc_config_server_change_cb,
irc_server_options[i][0],
NULL);
irc_config_server_change_cb (irc_server_options[i][0], NULL,
new_server->options[i]);
free (option_name);
}
}
return new_server;
}
/*
* Initializes a server with URL of this form: irc://nick:pass@irc.toto.org:6667
*
* Returns pointer to new server, NULL if error.
*/
struct t_irc_server *
irc_server_alloc_with_url (const char *irc_url)
{
char *irc_url2, *pos_server, *pos_nick, *pos_password;
char *pos_address, *pos_port, *pos_channel, *pos;
char *server_address, *server_nicks, *server_autojoin;
char default_port[16];
int ipv6, ssl, length;
struct t_irc_server *ptr_server;
irc_url2 = strdup (irc_url);
if (!irc_url2)
return NULL;
pos_server = NULL;
pos_nick = NULL;
pos_password = NULL;
pos_address = NULL;
pos_port = NULL;
pos_channel = NULL;
ipv6 = 0;
ssl = 0;
snprintf (default_port, sizeof (default_port),
"%d", IRC_SERVER_DEFAULT_PORT);
pos_server = strstr (irc_url2, "://");
if (!pos_server || !pos_server[3])
{
free (irc_url2);
return NULL;
}
pos_server[0] = '\0';
pos_server += 3;
pos_channel = strstr (pos_server, "/");
if (pos_channel)
{
pos_channel[0] = '\0';
pos_channel++;
while (pos_channel[0] == '/')
{
pos_channel++;
}
}
/* check for SSL / IPv6 */
if (weechat_strcasecmp (irc_url2, "irc6") == 0)
{
ipv6 = 1;
}
else if (weechat_strcasecmp (irc_url2, "ircs") == 0)
{
ssl = 1;
}
else if ((weechat_strcasecmp (irc_url2, "irc6s") == 0)
|| (weechat_strcasecmp (irc_url2, "ircs6") == 0))
{
ipv6 = 1;
ssl = 1;
}
if (ssl)
{
snprintf (default_port, sizeof (default_port),
"%d", IRC_SERVER_DEFAULT_PORT_SSL);
}
/* search for nick, password, address+port */
pos_address = strchr (pos_server, '@');
if (pos_address)
{
pos_address[0] = '\0';
pos_address++;
pos_nick = pos_server;
pos_password = strchr (pos_server, ':');
if (pos_password)
{
pos_password[0] = '\0';
pos_password++;
}
}
else
pos_address = pos_server;
/*
* search for port in address, and skip optional [ ] around address
* (can be used to indicate IPv6 port, after ']')
*/
if (pos_address[0] == '[')
{
pos_address++;
pos = strchr (pos_address, ']');
if (!pos)
{
free (irc_url2);
return NULL;
}
pos[0] = '\0';
pos++;
pos_port = strchr (pos, ':');
if (pos_port)
{
pos_port[0] = '\0';
pos_port++;
}
}
else
{
pos_port = strchr (pos_address, ':');
if (pos_port)
{
pos_port[0] = '\0';
pos_port++;
}
}
ptr_server = irc_server_alloc (pos_address);
if (ptr_server)
{
ptr_server->temp_server = 1;
if (pos_address && pos_address[0])
{
length = strlen (pos_address) + 1 +
((pos_port) ? strlen (pos_port) : 16) + 1;
server_address = malloc (length);
if (server_address)
{
snprintf (server_address, length,
"%s/%s",
pos_address,
(pos_port && pos_port[0]) ? pos_port : default_port);
weechat_config_option_set (
ptr_server->options[IRC_SERVER_OPTION_ADDRESSES],
server_address,
1);
free (server_address);
}
}
weechat_config_option_set (ptr_server->options[IRC_SERVER_OPTION_IPV6],
(ipv6) ? "on" : "off",
1);
weechat_config_option_set (ptr_server->options[IRC_SERVER_OPTION_SSL],
(ssl) ? "on" : "off",
1);
if (pos_nick && pos_nick[0])
{
length = ((strlen (pos_nick) + 2) * 5) + 1;
server_nicks = malloc (length);
if (server_nicks)
{
snprintf (server_nicks, length,
"%s,%s1,%s2,%s3,%s4",
pos_nick, pos_nick, pos_nick, pos_nick, pos_nick);
weechat_config_option_set (
ptr_server->options[IRC_SERVER_OPTION_NICKS],
server_nicks,
1);
free (server_nicks);
}
}
if (pos_password && pos_password[0])
{
weechat_config_option_set (
ptr_server->options[IRC_SERVER_OPTION_PASSWORD],
pos_password,
1);
}
weechat_config_option_set (
ptr_server->options[IRC_SERVER_OPTION_AUTOCONNECT],
"on",
1);
/* autojoin */
if (pos_channel && pos_channel[0])
{
if (irc_channel_is_channel (ptr_server, pos_channel))
server_autojoin = strdup (pos_channel);
else
{
server_autojoin = malloc (strlen (pos_channel) + 2);
if (server_autojoin)
{
strcpy (server_autojoin, "#");
strcat (server_autojoin, pos_channel);
}
}
if (server_autojoin)
{
weechat_config_option_set (
ptr_server->options[IRC_SERVER_OPTION_AUTOJOIN],
server_autojoin,
1);
free (server_autojoin);
}
}
}
free (irc_url2);
return ptr_server;
}
/*
* Applies command line options to a server.
*
* For example: -ssl -nossl -password=test -proxy=myproxy
*/
void
irc_server_apply_command_line_options (struct t_irc_server *server,
int argc, char **argv)
{
int i, index_option;
char *pos, *option_name, *ptr_value, *value_boolean[2] = { "off", "on" };
for (i = 0; i < argc; i++)
{
if (argv[i][0] == '-')
{
pos = strchr (argv[i], '=');
if (pos)
{
option_name = weechat_strndup (argv[i] + 1, pos - argv[i] - 1);
ptr_value = pos + 1;
}
else
{
option_name = strdup (argv[i] + 1);
ptr_value = value_boolean[1];
}
if (option_name)
{
if (weechat_strcasecmp (option_name, "temp") == 0)
{
/* temporary server, not saved */
server->temp_server = 1;
}
else
{
index_option = irc_server_search_option (option_name);
if (index_option < 0)
{
/* look if option is negative, like "-noxxx" */
if (weechat_strncasecmp (argv[i], "-no", 3) == 0)
{
free (option_name);
option_name = strdup (argv[i] + 3);
index_option = irc_server_search_option (option_name);
ptr_value = value_boolean[0];
}
}
if (index_option >= 0)
{
weechat_config_option_set (server->options[index_option],
ptr_value, 1);
}
}
free (option_name);
}
}
}
}
/*
* Adds a message in out queue.
*/
void
irc_server_outqueue_add (struct t_irc_server *server, int priority,
const char *command, const char *msg1,
const char *msg2, int modified, const char *tags,
struct t_irc_redirect *redirect)
{
struct t_irc_outqueue *new_outqueue;
new_outqueue = malloc (sizeof (*new_outqueue));
if (new_outqueue)
{
new_outqueue->command = (command) ? strdup (command) : strdup ("unknown");
new_outqueue->message_before_mod = (msg1) ? strdup (msg1) : NULL;
new_outqueue->message_after_mod = (msg2) ? strdup (msg2) : NULL;
new_outqueue->modified = modified;
new_outqueue->tags = (tags) ? strdup (tags) : NULL;
new_outqueue->redirect = redirect;
new_outqueue->prev_outqueue = server->last_outqueue[priority];
new_outqueue->next_outqueue = NULL;
if (server->last_outqueue[priority])
server->last_outqueue[priority]->next_outqueue = new_outqueue;
else
server->outqueue[priority] = new_outqueue;
server->last_outqueue[priority] = new_outqueue;
}
}
/*
* Frees a message in out queue.
*/
void
irc_server_outqueue_free (struct t_irc_server *server,
int priority,
struct t_irc_outqueue *outqueue)
{
struct t_irc_outqueue *new_outqueue;
if (!server || !outqueue)
return;
/* remove outqueue message */
if (server->last_outqueue[priority] == outqueue)
server->last_outqueue[priority] = outqueue->prev_outqueue;
if (outqueue->prev_outqueue)
{
(outqueue->prev_outqueue)->next_outqueue = outqueue->next_outqueue;
new_outqueue = server->outqueue[priority];
}
else
new_outqueue = outqueue->next_outqueue;
if (outqueue->next_outqueue)
(outqueue->next_outqueue)->prev_outqueue = outqueue->prev_outqueue;
/* free data */
if (outqueue->command)
free (outqueue->command);
if (outqueue->message_before_mod)
free (outqueue->message_before_mod);
if (outqueue->message_after_mod)
free (outqueue->message_after_mod);
if (outqueue->tags)
free (outqueue->tags);
free (outqueue);
/* set new head */
server->outqueue[priority] = new_outqueue;
}
/*
* Frees all messages in out queue.
*/
void
irc_server_outqueue_free_all (struct t_irc_server *server, int priority)
{
while (server->outqueue[priority])
{
irc_server_outqueue_free (server, priority,
server->outqueue[priority]);
}
}
/*
* Frees server data.
*/
void
irc_server_free_data (struct t_irc_server *server)
{
int i;
if (!server)
return;
/* free linked lists */
for (i = 0; i < IRC_SERVER_NUM_OUTQUEUES_PRIO; i++)
{
irc_server_outqueue_free_all (server, i);
}
irc_redirect_free_all (server);
irc_notify_free_all (server);
irc_channel_free_all (server);
/* free hashtables */
weechat_hashtable_free (server->join_manual);
weechat_hashtable_free (server->join_channel_key);
weechat_hashtable_free (server->join_noswitch);
/* free server data */
for (i = 0; i < IRC_SERVER_NUM_OPTIONS; i++)
{
if (server->options[i])
weechat_config_option_free (server->options[i]);
}
if (server->name)
free (server->name);
if (server->addresses_eval)
free (server->addresses_eval);
if (server->addresses_array)
weechat_string_free_split (server->addresses_array);
if (server->ports_array)
free (server->ports_array);
if (server->retry_array)
free (server->retry_array);
if (server->current_address)
free (server->current_address);
if (server->current_ip)
free (server->current_ip);
if (server->hook_connect)
weechat_unhook (server->hook_connect);
if (server->hook_fd)
weechat_unhook (server->hook_fd);
if (server->hook_timer_connection)
weechat_unhook (server->hook_timer_connection);
if (server->hook_timer_sasl)
weechat_unhook (server->hook_timer_sasl);
if (server->unterminated_message)
free (server->unterminated_message);
if (server->nicks_array)
weechat_string_free_split (server->nicks_array);
if (server->nick)
free (server->nick);
if (server->nick_modes)
free (server->nick_modes);
if (server->host)
free (server->host);
if (server->cap_ls)
weechat_hashtable_free (server->cap_ls);
if (server->cap_list)
weechat_hashtable_free (server->cap_list);
if (server->isupport)
free (server->isupport);
if (server->prefix_modes)
free (server->prefix_modes);
if (server->prefix_chars)
free (server->prefix_chars);
if (server->chantypes)
free (server->chantypes);
if (server->chanmodes)
free (server->chanmodes);
if (server->away_message)
free (server->away_message);
if (server->cmd_list_regexp)
{
regfree (server->cmd_list_regexp);
free (server->cmd_list_regexp);
}
if (server->buffer_as_string)
free (server->buffer_as_string);
}
/*
* Frees a server and remove it from list of servers.
*/
void
irc_server_free (struct t_irc_server *server)
{
struct t_irc_server *new_irc_servers;
if (!server)
return;
/*
* close server buffer (and all channels/privates)
* (only if we are not in a /upgrade, because during upgrade we want to
* keep connections and closing server buffer would disconnect from server)
*/
if (server->buffer && !irc_signal_upgrade_received)
weechat_buffer_close (server->buffer);
/* remove server from queue */
if (last_irc_server == server)
last_irc_server = server->prev_server;
if (server->prev_server)
{
(server->prev_server)->next_server = server->next_server;
new_irc_servers = irc_servers;
}
else
new_irc_servers = server->next_server;
if (server->next_server)
(server->next_server)->prev_server = server->prev_server;
irc_server_free_data (server);
free (server);
irc_servers = new_irc_servers;
}
/*
* Frees all servers.
*/
void
irc_server_free_all ()
{
/* for each server in memory, remove it */
while (irc_servers)
{
irc_server_free (irc_servers);
}
}
/*
* Copies a server.
*
* Returns pointer to new server, NULL if error.
*/
struct t_irc_server *
irc_server_copy (struct t_irc_server *server, const char *new_name)
{
struct t_irc_server *new_server;
struct t_infolist *infolist;
char *mask, *pos;
const char *option_name;
int length, index_option;
/* check if another server exists with this name */
if (irc_server_casesearch (new_name))
return NULL;
new_server = irc_server_alloc (new_name);
if (new_server)
{
/* duplicate options */
length = 32 + strlen (server->name) + 1;
mask = malloc (length);
if (!mask)
return 0;
snprintf (mask, length, "irc.server.%s.*", server->name);
infolist = weechat_infolist_get ("option", NULL, mask);
free (mask);
if (infolist)
{
while (weechat_infolist_next (infolist))
{
if (!weechat_infolist_integer (infolist, "value_is_null"))
{
option_name = weechat_infolist_string (infolist,
"option_name");
pos = strrchr (option_name, '.');
if (pos)
{
index_option = irc_server_search_option (pos + 1);
if (index_option >= 0)
{
weechat_config_option_set (
new_server->options[index_option],
weechat_infolist_string (infolist, "value"),
1);
}
}
}
}
weechat_infolist_free (infolist);
}
}
return new_server;
}
/*
* Renames a server (internal name).
*
* Returns:
* 1: OK
* 0: error
*/
int
irc_server_rename (struct t_irc_server *server, const char *new_name)
{
int length;
char *mask, *pos_option, *new_option_name, charset_modifier[256];
const char *buffer_name, *option_name;
struct t_infolist *infolist;
struct t_config_option *ptr_option;
struct t_irc_channel *ptr_channel;
/* check if another server exists with this name */
if (irc_server_casesearch (new_name))
return 0;
/* rename options */
length = 32 + strlen (server->name) + 1;
mask = malloc (length);
if (!mask)
return 0;
snprintf (mask, length, "irc.server.%s.*", server->name);
infolist = weechat_infolist_get ("option", NULL, mask);
free (mask);
if (infolist)
{
while (weechat_infolist_next (infolist))
{
ptr_option = weechat_config_get (
weechat_infolist_string (infolist, "full_name"));
if (ptr_option)
{
option_name = weechat_infolist_string (infolist, "option_name");
if (option_name)
{
pos_option = strrchr (option_name, '.');
if (pos_option)
{
pos_option++;
length = strlen (new_name) + 1 + strlen (pos_option) + 1;
new_option_name = malloc (length);
if (new_option_name)
{
snprintf (new_option_name, length,
"%s.%s", new_name, pos_option);
weechat_config_option_rename (ptr_option, new_option_name);
free (new_option_name);
}
}
}
}
}
weechat_infolist_free (infolist);
}
/* rename server */
if (server->name)
free (server->name);
server->name = strdup (new_name);
/* change name and local variables on buffers */
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if (ptr_channel->buffer)
{
buffer_name = irc_buffer_build_name (server->name,
ptr_channel->name);
weechat_buffer_set (ptr_channel->buffer, "name", buffer_name);
weechat_buffer_set (ptr_channel->buffer, "localvar_set_server",
server->name);
}
}
if (server->buffer)
{
buffer_name = irc_buffer_build_name (server->name, NULL);
weechat_buffer_set (server->buffer, "name", buffer_name);
weechat_buffer_set (server->buffer, "short_name", server->name);
weechat_buffer_set (server->buffer, "localvar_set_server",
server->name);
weechat_buffer_set (server->buffer, "localvar_set_channel",
server->name);
snprintf (charset_modifier, sizeof (charset_modifier),
"irc.%s", server->name);
weechat_buffer_set (server->buffer, "localvar_set_charset_modifier",
charset_modifier);
}
return 1;
}
/*
* Reorders list of servers.
*
* Returns the number of servers moved in the list (>= 0).
*/
int
irc_server_reorder (const char **servers, int num_servers)
{
struct t_irc_server *ptr_server, *ptr_server2;
int i, num_moved;
ptr_server = irc_servers;
num_moved = 0;
for (i = 0; ptr_server && (i < num_servers); i++)
{
for (ptr_server2 = ptr_server; ptr_server2;
ptr_server2 = ptr_server2->next_server)
{
if (strcmp (ptr_server2->name, servers[i]) == 0)
break;
}
if (ptr_server2 == ptr_server)
{
ptr_server = ptr_server->next_server;
}
else if (ptr_server2)
{
/* extract server from list */
if (ptr_server2 == irc_servers)
irc_servers = ptr_server2->next_server;
if (ptr_server2 == last_irc_server)
last_irc_server = ptr_server2->prev_server;
if (ptr_server2->prev_server)
(ptr_server2->prev_server)->next_server = ptr_server2->next_server;
if (ptr_server2->next_server)
(ptr_server2->next_server)->prev_server = ptr_server2->prev_server;
/* set pointers in ptr_server2 */
ptr_server2->prev_server = ptr_server->prev_server;
ptr_server2->next_server = ptr_server;
/* insert ptr_server2 before ptr_server */
if (ptr_server->prev_server)
(ptr_server->prev_server)->next_server = ptr_server2;
ptr_server->prev_server = ptr_server2;
/* adjust list of servers if needed */
if (ptr_server == irc_servers)
irc_servers = ptr_server2;
num_moved++;
}
}
return num_moved;
}
/*
* Sends a signal for an IRC message (received or sent).
*/
void
irc_server_send_signal (struct t_irc_server *server, const char *signal,
const char *command, const char *full_message,
const char *tags)
{
int length;
char *str_signal, *full_message_tags;
length = strlen (server->name) + 1 + strlen (signal) + 1 + strlen (command) + 1;
str_signal = malloc (length);
if (str_signal)
{
snprintf (str_signal, length,
"%s,%s_%s", server->name, signal, command);
if (tags)
{
length = strlen (tags) + 1 + strlen (full_message) + 1;
full_message_tags = malloc (length);
if (full_message_tags)
{
snprintf (full_message_tags, length,
"%s;%s", tags, full_message);
(void) weechat_hook_signal_send (str_signal,
WEECHAT_HOOK_SIGNAL_STRING,
(void *)full_message_tags);
free (full_message_tags);
}
}
else
{
(void) weechat_hook_signal_send (str_signal,
WEECHAT_HOOK_SIGNAL_STRING,
(void *)full_message);
}
free (str_signal);
}
}
/*
* Sends data to IRC server.
*
* Returns number of bytes sent, -1 if error.
*/
int
irc_server_send (struct t_irc_server *server, const char *buffer, int size_buf)
{
int rc;
if (!server)
{
weechat_printf (
NULL,
_("%s%s: sending data to server: null pointer (please report "
"problem to developers)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
return 0;
}
if (size_buf <= 0)
{
weechat_printf (
server->buffer,
_("%s%s: sending data to server: empty buffer (please report "
"problem to developers)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
return 0;
}
#ifdef HAVE_GNUTLS
if (server->ssl_connected)
rc = gnutls_record_send (server->gnutls_sess, buffer, size_buf);
else
#endif /* HAVE_GNUTLS */
rc = send (server->sock, buffer, size_buf, 0);
if (rc < 0)
{
#ifdef HAVE_GNUTLS
if (server->ssl_connected)
{
weechat_printf (
server->buffer,
_("%s%s: sending data to server: error %d %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
rc, gnutls_strerror (rc));
}
else
#endif /* HAVE_GNUTLS */
{
weechat_printf (
server->buffer,
_("%s%s: sending data to server: error %d %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
errno, strerror (errno));
}
}
return rc;
}
/*
* Sets default tags used when sending message.
*/
void
irc_server_set_send_default_tags (const char *tags)
{
irc_server_send_default_tags = tags;
}
/*
* Gets tags to send by concatenation of tags and irc_server_send_default_tags
* (if set).
*
* Note: result must be freed after use.
*/
char *
irc_server_get_tags_to_send (const char *tags)
{
int length;
char *buf;
if (!tags && !irc_server_send_default_tags)
return NULL;
if (!tags)
return strdup (irc_server_send_default_tags);
if (!irc_server_send_default_tags)
return strdup (tags);
/* concatenate tags and irc_server_send_default_tags */
length = strlen (tags) + 1 + strlen (irc_server_send_default_tags) + 1;
buf = malloc (length);
if (buf)
snprintf (buf, length, "%s,%s", tags, irc_server_send_default_tags);
return buf;
}
/*
* Sends a message from out queue.
*/
void
irc_server_outqueue_send (struct t_irc_server *server)
{
time_t time_now;
char *pos, *tags_to_send;
int priority, anti_flood;
time_now = time (NULL);
/* detect if system clock has been changed (now lower than before) */
if (server->last_user_message > time_now)
server->last_user_message = time_now;
for (priority = 0; priority < IRC_SERVER_NUM_OUTQUEUES_PRIO; priority++)
{
switch (priority)
{
case 0:
anti_flood = IRC_SERVER_OPTION_INTEGER(
server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_HIGH);
break;
default:
anti_flood = IRC_SERVER_OPTION_INTEGER(
server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_LOW);
break;
}
if (server->outqueue[priority]
&& (time_now >= server->last_user_message + anti_flood))
{
if (server->outqueue[priority]->message_before_mod)
{
pos = strchr (server->outqueue[priority]->message_before_mod,
'\r');
if (pos)
pos[0] = '\0';
irc_raw_print (server, IRC_RAW_FLAG_SEND,
server->outqueue[priority]->message_before_mod);
if (pos)
pos[0] = '\r';
}
if (server->outqueue[priority]->message_after_mod)
{
pos = strchr (server->outqueue[priority]->message_after_mod,
'\r');
if (pos)
pos[0] = '\0';
irc_raw_print (server, IRC_RAW_FLAG_SEND |
((server->outqueue[priority]->modified) ? IRC_RAW_FLAG_MODIFIED : 0),
server->outqueue[priority]->message_after_mod);
if (pos)
pos[0] = '\r';
/* send signal with command that will be sent to server */
irc_server_send_signal (
server, "irc_out",
server->outqueue[priority]->command,
server->outqueue[priority]->message_after_mod,
NULL);
tags_to_send = irc_server_get_tags_to_send (
server->outqueue[priority]->tags);
irc_server_send_signal (
server, "irc_outtags",
server->outqueue[priority]->command,
server->outqueue[priority]->message_after_mod,
(tags_to_send) ? tags_to_send : "");
if (tags_to_send)
free (tags_to_send);
/* send command */
irc_server_send (
server, server->outqueue[priority]->message_after_mod,
strlen (server->outqueue[priority]->message_after_mod));
server->last_user_message = time_now;
/* start redirection if redirect is set */
if (server->outqueue[priority]->redirect)
{
irc_redirect_init_command (
server->outqueue[priority]->redirect,
server->outqueue[priority]->message_after_mod);
}
}
irc_server_outqueue_free (server, priority,
server->outqueue[priority]);
break;
}
}
}
/*
* Sends one message to IRC server.
*
* If flag contains outqueue priority value, then messages are in a queue and
* sent slowly (to be sure there will not be any "excess flood"), value of
* queue_msg is priority:
* 1 = higher priority, for user messages
* 2 = lower priority, for other messages (like auto reply to CTCP queries)
*
* Returns:
* 1: OK
* 0: error
*/
int
irc_server_send_one_msg (struct t_irc_server *server, int flags,
const char *message, const char *nick,
const char *command, const char *channel,
const char *tags)
{
static char buffer[4096];
const char *ptr_msg, *ptr_chan_nick;
char *new_msg, *pos, *tags_to_send, *msg_encoded;
char str_modifier[128], modifier_data[256];
int rc, queue_msg, add_to_queue, first_message, anti_flood;
int pos_channel, pos_text, pos_encode;
time_t time_now;
struct t_irc_redirect *ptr_redirect;
rc = 1;
/* run modifier "irc_out_xxx" */
snprintf (str_modifier, sizeof (str_modifier),
"irc_out_%s",
(command) ? command : "unknown");
new_msg = weechat_hook_modifier_exec (str_modifier,
server->name,
message);
/* no changes in new message */
if (new_msg && (strcmp (message, new_msg) == 0))
{
free (new_msg);
new_msg = NULL;
}
/* message not dropped? */
if (!new_msg || new_msg[0])
{
first_message = 1;
ptr_msg = (new_msg) ? new_msg : message;
msg_encoded = NULL;
irc_message_parse (server, ptr_msg, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, &pos_channel,
&pos_text);
switch (IRC_SERVER_OPTION_INTEGER(server,
IRC_SERVER_OPTION_CHARSET_MESSAGE))
{
case IRC_SERVER_CHARSET_MESSAGE_MESSAGE:
pos_encode = 0;
break;
case IRC_SERVER_CHARSET_MESSAGE_CHANNEL:
pos_encode = (pos_channel >= 0) ? pos_channel : pos_text;
break;
case IRC_SERVER_CHARSET_MESSAGE_TEXT:
pos_encode = pos_text;
break;
default:
pos_encode = 0;
break;
}
if (pos_encode >= 0)
{
ptr_chan_nick = (channel) ? channel : nick;
if (ptr_chan_nick)
{
snprintf (modifier_data, sizeof (modifier_data),
"%s.%s.%s",
weechat_plugin->name,
server->name,
ptr_chan_nick);
}
else
{
snprintf (modifier_data, sizeof (modifier_data),
"%s.%s",
weechat_plugin->name,
server->name);
}
msg_encoded = irc_message_convert_charset (ptr_msg, pos_encode,
"charset_encode",
modifier_data);
}
if (msg_encoded)
ptr_msg = msg_encoded;
while (rc && ptr_msg && ptr_msg[0])
{
pos = strchr (ptr_msg, '\n');
if (pos)
pos[0] = '\0';
snprintf (buffer, sizeof (buffer), "%s\r\n", ptr_msg);
/* anti-flood: look whether we should queue outgoing message or not */
time_now = time (NULL);
/* detect if system clock has been changed (now lower than before) */
if (server->last_user_message > time_now)
server->last_user_message = time_now;
/* get queue from flags */
queue_msg = 0;
if (flags & IRC_SERVER_SEND_OUTQ_PRIO_HIGH)
queue_msg = 1;
else if (flags & IRC_SERVER_SEND_OUTQ_PRIO_LOW)
queue_msg = 2;
switch (queue_msg - 1)
{
case 0:
anti_flood = IRC_SERVER_OPTION_INTEGER(
server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_HIGH);
break;
default:
anti_flood = IRC_SERVER_OPTION_INTEGER(
server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_LOW);
break;
}
add_to_queue = 0;
if ((queue_msg > 0)
&& (server->outqueue[queue_msg - 1]
|| ((anti_flood > 0)
&& (time_now - server->last_user_message < anti_flood))))
{
add_to_queue = queue_msg;
}
tags_to_send = irc_server_get_tags_to_send (tags);
ptr_redirect = irc_redirect_search_available (server);
if (add_to_queue > 0)
{
/* queue message (do not send anything now) */
irc_server_outqueue_add (server, add_to_queue - 1, command,
(new_msg && first_message) ? message : NULL,
buffer,
(new_msg) ? 1 : 0,
tags_to_send,
ptr_redirect);
/* mark redirect as "used" */
if (ptr_redirect)
ptr_redirect->assigned_to_command = 1;
}
else
{
if (first_message)
{
irc_raw_print (server, IRC_RAW_FLAG_SEND, message);
}
if (new_msg)
{
irc_raw_print (server,
IRC_RAW_FLAG_SEND | IRC_RAW_FLAG_MODIFIED,
ptr_msg);
}
/* send signal with command that will be sent to server */
irc_server_send_signal (server, "irc_out",
(command) ? command : "unknown",
ptr_msg,
NULL);
irc_server_send_signal (server, "irc_outtags",
(command) ? command : "unknown",
ptr_msg,
(tags_to_send) ? tags_to_send : "");
if (irc_server_send (server, buffer, strlen (buffer)) <= 0)
rc = 0;
else
{
if (queue_msg > 0)
server->last_user_message = time_now;
}
if (ptr_redirect)
irc_redirect_init_command (ptr_redirect, buffer);
}
if (tags_to_send)
free (tags_to_send);
if (pos)
{
pos[0] = '\n';
ptr_msg = pos + 1;
}
else
ptr_msg = NULL;
first_message = 0;
}
if (msg_encoded)
free (msg_encoded);
}
else
{
irc_raw_print (server, IRC_RAW_FLAG_SEND | IRC_RAW_FLAG_MODIFIED,
_("(message dropped)"));
}
if (new_msg)
free (new_msg);
return rc;
}
/*
* Sends formatted data to IRC server.
*
* Many messages may be sent, separated by '\n'.
*
* If flags contains "IRC_SERVER_SEND_RETURN_HASHTABLE", then a hashtable with
* split of message is returned (see function irc_message_split() in
* irc-message.c)
*
* Note: hashtable must be freed after use.
*/
struct t_hashtable *
irc_server_sendf (struct t_irc_server *server, int flags, const char *tags,
const char *format, ...)
{
char **items, hash_key[32], value[32], *nick, *command, *channel, *new_msg;
char str_modifier[128];
const char *str_message, *str_args;
int i, items_count, number, ret_number, rc;
struct t_hashtable *hashtable, *ret_hashtable;
if (!server)
return NULL;
weechat_va_format (format);
if (!vbuffer)
return NULL;
ret_hashtable = NULL;
ret_number = 1;
if (flags & IRC_SERVER_SEND_RETURN_HASHTABLE)
{
ret_hashtable = weechat_hashtable_new (32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_STRING,
NULL, NULL);
}
rc = 1;
items = weechat_string_split (vbuffer, "\n", NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0, &items_count);
for (i = 0; i < items_count; i++)
{
/* run modifier "irc_out1_xxx" (like "irc_out_xxx", but before split) */
irc_message_parse (server, items[i], NULL, NULL,
&nick, NULL, NULL, &command, &channel, NULL, NULL,
NULL, NULL, NULL, NULL);
snprintf (str_modifier, sizeof (str_modifier),
"irc_out1_%s",
(command) ? command : "unknown");
new_msg = weechat_hook_modifier_exec (str_modifier,
server->name,
items[i]);
/* no changes in new message */
if (new_msg && (strcmp (items[i], new_msg) == 0))
{
free (new_msg);
new_msg = NULL;
}
/* message not dropped? */
if (!new_msg || new_msg[0])
{
/* send signal with command that will be sent to server (before split) */
irc_server_send_signal (server, "irc_out1",
(command) ? command : "unknown",
(new_msg) ? new_msg : items[i],
NULL);
/*
* split message if needed (max is 512 bytes by default,
* including the final "\r\n")
*/
hashtable = irc_message_split (server,
(new_msg) ? new_msg : items[i]);
if (hashtable)
{
number = 1;
while (1)
{
snprintf (hash_key, sizeof (hash_key), "msg%d", number);
str_message = weechat_hashtable_get (hashtable, hash_key);
if (!str_message)
break;
snprintf (hash_key, sizeof (hash_key), "args%d", number);
str_args = weechat_hashtable_get (hashtable, hash_key);
rc = irc_server_send_one_msg (server, flags, str_message,
nick, command, channel, tags);
if (!rc)
break;
if (ret_hashtable)
{
snprintf (hash_key, sizeof (hash_key),
"msg%d", ret_number);
weechat_hashtable_set (ret_hashtable,
hash_key, str_message);
if (str_args)
{
snprintf (hash_key, sizeof (hash_key),
"args%d", ret_number);
weechat_hashtable_set (ret_hashtable,
hash_key, str_args);
}
ret_number++;
}
number++;
}
if (ret_hashtable)
{
snprintf (value, sizeof (value), "%d", ret_number - 1);
weechat_hashtable_set (ret_hashtable, "count", value);
}
weechat_hashtable_free (hashtable);
if (!rc)
break;
}
}
if (nick)
free (nick);
if (command)
free (command);
if (channel)
free (channel);
if (new_msg)
free (new_msg);
}
if (items)
weechat_string_free_split (items);
free (vbuffer);
return ret_hashtable;
}
/*
* Adds a message to received messages queue (at the end).
*/
void
irc_server_msgq_add_msg (struct t_irc_server *server, const char *msg)
{
struct t_irc_message *message;
if (!server->unterminated_message && !msg[0])
return;
message = malloc (sizeof (*message));
if (!message)
{
weechat_printf (server->buffer,
_("%s%s: not enough memory for received message"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
return;
}
message->server = server;
if (server->unterminated_message)
{
message->data = malloc (strlen (server->unterminated_message) +
strlen (msg) + 1);
if (!message->data)
{
weechat_printf (server->buffer,
_("%s%s: not enough memory for received message"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
}
else
{
strcpy (message->data, server->unterminated_message);
strcat (message->data, msg);
}
free (server->unterminated_message);
server->unterminated_message = NULL;
}
else
message->data = strdup (msg);
message->next_message = NULL;
if (irc_msgq_last_msg)
{
irc_msgq_last_msg->next_message = message;
irc_msgq_last_msg = message;
}
else
{
irc_recv_msgq = message;
irc_msgq_last_msg = message;
}
}
/*
* Adds an unterminated message to queue.
*/
void
irc_server_msgq_add_unterminated (struct t_irc_server *server,
const char *string)
{
char *unterminated_message2;
if (!string[0])
return;
if (server->unterminated_message)
{
unterminated_message2 =
realloc (server->unterminated_message,
(strlen (server->unterminated_message) +
strlen (string) + 1));
if (!unterminated_message2)
{
weechat_printf (server->buffer,
_("%s%s: not enough memory for received message"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
free (server->unterminated_message);
server->unterminated_message = NULL;
return;
}
server->unterminated_message = unterminated_message2;
strcat (server->unterminated_message, string);
}
else
{
server->unterminated_message = strdup (string);
if (!server->unterminated_message)
{
weechat_printf (server->buffer,
_("%s%s: not enough memory for received message"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
}
}
}
/*
* Splits received buffer, creating queued messages.
*/
void
irc_server_msgq_add_buffer (struct t_irc_server *server, const char *buffer)
{
char *pos_cr, *pos_lf;
while (buffer[0])
{
pos_cr = strchr (buffer, '\r');
pos_lf = strchr (buffer, '\n');
if (!pos_cr && !pos_lf)
{
/* no CR/LF found => add to unterminated and return */
irc_server_msgq_add_unterminated (server, buffer);
return;
}
if (pos_cr && ((!pos_lf) || (pos_lf > pos_cr)))
{
/* found '\r' first => ignore this char */
pos_cr[0] = '\0';
irc_server_msgq_add_unterminated (server, buffer);
buffer = pos_cr + 1;
}
else
{
/* found: '\n' first => terminate message */
pos_lf[0] = '\0';
irc_server_msgq_add_msg (server, buffer);
buffer = pos_lf + 1;
}
}
}
/*
* Flushes message queue.
*/
void
irc_server_msgq_flush ()
{
struct t_irc_message *next;
char *ptr_data, *new_msg, *new_msg2, *ptr_msg, *ptr_msg2, *pos;
char *nick, *host, *command, *channel, *arguments;
char *msg_decoded, *msg_decoded_without_color;
char str_modifier[128], modifier_data[256];
int pos_channel, pos_text, pos_decode;
while (irc_recv_msgq)
{
if (irc_recv_msgq->data)
{
/* read message only if connection was not lost */
if (irc_recv_msgq->server->sock != -1)
{
ptr_data = irc_recv_msgq->data;
while (ptr_data[0] == ' ')
{
ptr_data++;
}
if (ptr_data[0])
{
irc_raw_print (irc_recv_msgq->server, IRC_RAW_FLAG_RECV,
ptr_data);
irc_message_parse (irc_recv_msgq->server,
ptr_data, NULL, NULL, NULL, NULL, NULL,
&command, NULL, NULL, NULL, NULL, NULL,
NULL, NULL);
snprintf (str_modifier, sizeof (str_modifier),
"irc_in_%s",
(command) ? command : "unknown");
new_msg = weechat_hook_modifier_exec (
str_modifier,
irc_recv_msgq->server->name,
ptr_data);
if (command)
free (command);
/* no changes in new message */
if (new_msg && (strcmp (ptr_data, new_msg) == 0))
{
free (new_msg);
new_msg = NULL;
}
/* message not dropped? */
if (!new_msg || new_msg[0])
{
/* use new message (returned by plugin) */
ptr_msg = (new_msg) ? new_msg : ptr_data;
while (ptr_msg && ptr_msg[0])
{
pos = strchr (ptr_msg, '\n');
if (pos)
pos[0] = '\0';
if (new_msg)
{
irc_raw_print (
irc_recv_msgq->server,
IRC_RAW_FLAG_RECV | IRC_RAW_FLAG_MODIFIED,
ptr_msg);
}
irc_message_parse (irc_recv_msgq->server, ptr_msg,
NULL, NULL, &nick, NULL, &host,
&command, &channel, &arguments,
NULL, NULL, NULL,
&pos_channel, &pos_text);
msg_decoded = NULL;
switch (IRC_SERVER_OPTION_INTEGER(irc_recv_msgq->server,
IRC_SERVER_OPTION_CHARSET_MESSAGE))
{
case IRC_SERVER_CHARSET_MESSAGE_MESSAGE:
pos_decode = 0;
break;
case IRC_SERVER_CHARSET_MESSAGE_CHANNEL:
pos_decode = (pos_channel >= 0) ? pos_channel : pos_text;
break;
case IRC_SERVER_CHARSET_MESSAGE_TEXT:
pos_decode = pos_text;
break;
default:
pos_decode = 0;
break;
}
if (pos_decode >= 0)
{
/* convert charset for message */
if (channel
&& irc_channel_is_channel (irc_recv_msgq->server,
channel))
{
snprintf (modifier_data, sizeof (modifier_data),
"%s.%s.%s",
weechat_plugin->name,
irc_recv_msgq->server->name,
channel);
}
else
{
if (nick && (!host || (strcmp (nick, host) != 0)))
{
snprintf (modifier_data,
sizeof (modifier_data),
"%s.%s.%s",
weechat_plugin->name,
irc_recv_msgq->server->name,
nick);
}
else
{
snprintf (modifier_data,
sizeof (modifier_data),
"%s.%s",
weechat_plugin->name,
irc_recv_msgq->server->name);
}
}
msg_decoded = irc_message_convert_charset (
ptr_msg, pos_decode,
"charset_decode", modifier_data);
}
/* replace WeeChat internal color codes by "?" */
msg_decoded_without_color =
weechat_string_remove_color (
(msg_decoded) ? msg_decoded : ptr_msg,
"?");
/* call modifier after charset */
ptr_msg2 = (msg_decoded_without_color) ?
msg_decoded_without_color : ((msg_decoded) ? msg_decoded : ptr_msg);
snprintf (str_modifier, sizeof (str_modifier),
"irc_in2_%s",
(command) ? command : "unknown");
new_msg2 = weechat_hook_modifier_exec (
str_modifier,
irc_recv_msgq->server->name,
ptr_msg2);
if (new_msg2 && (strcmp (ptr_msg2, new_msg2) == 0))
{
free (new_msg2);
new_msg2 = NULL;
}
/* message not dropped? */
if (!new_msg2 || new_msg2[0])
{
/* use new message (returned by plugin) */
if (new_msg2)
ptr_msg2 = new_msg2;
/* parse and execute command */
if (irc_redirect_message (irc_recv_msgq->server,
ptr_msg2, command,
arguments))
{
/* message redirected, we'll not display it! */
}
else
{
/* message not redirected, display it */
irc_protocol_recv_command (
irc_recv_msgq->server,
ptr_msg2,
command,
channel);
}
}
if (new_msg2)
free (new_msg2);
if (nick)
free (nick);
if (host)
free (host);
if (command)
free (command);
if (channel)
free (channel);
if (arguments)
free (arguments);
if (msg_decoded)
free (msg_decoded);
if (msg_decoded_without_color)
free (msg_decoded_without_color);
if (pos)
{
pos[0] = '\n';
ptr_msg = pos + 1;
}
else
ptr_msg = NULL;
}
}
else
{
irc_raw_print (irc_recv_msgq->server,
IRC_RAW_FLAG_RECV | IRC_RAW_FLAG_MODIFIED,
_("(message dropped)"));
}
if (new_msg)
free (new_msg);
}
}
free (irc_recv_msgq->data);
}
next = irc_recv_msgq->next_message;
free (irc_recv_msgq);
irc_recv_msgq = next;
if (!irc_recv_msgq)
irc_msgq_last_msg = NULL;
}
}
/*
* Receives data from a server.
*/
int
irc_server_recv_cb (const void *pointer, void *data, int fd)
{
struct t_irc_server *server;
static char buffer[4096 + 2];
int num_read, msgq_flush, end_recv;
/* make C compiler happy */
(void) data;
(void) fd;
server = (struct t_irc_server *)pointer;
if (!server)
return WEECHAT_RC_ERROR;
msgq_flush = 0;
end_recv = 0;
while (!end_recv)
{
end_recv = 1;
#ifdef HAVE_GNUTLS
if (server->ssl_connected)
num_read = gnutls_record_recv (server->gnutls_sess, buffer,
sizeof (buffer) - 2);
else
#endif /* HAVE_GNUTLS */
num_read = recv (server->sock, buffer, sizeof (buffer) - 2, 0);
if (num_read > 0)
{
buffer[num_read] = '\0';
irc_server_msgq_add_buffer (server, buffer);
msgq_flush = 1; /* the flush will be done after the loop */
#ifdef HAVE_GNUTLS
if (server->ssl_connected
&& (gnutls_record_check_pending (server->gnutls_sess) > 0))
{
/*
* if there are unread data in the gnutls buffers,
* go on with recv
*/
end_recv = 0;
}
#endif /* HAVE_GNUTLS */
}
else
{
#ifdef HAVE_GNUTLS
if (server->ssl_connected)
{
if ((num_read == 0)
|| ((num_read != GNUTLS_E_AGAIN)
&& (num_read != GNUTLS_E_INTERRUPTED)))
{
weechat_printf (
server->buffer,
_("%s%s: reading data on socket: error %d %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
num_read,
(num_read == 0) ? _("(connection closed by peer)") :
gnutls_strerror (num_read));
weechat_printf (
server->buffer,
_("%s%s: disconnecting from server..."),
weechat_prefix ("network"), IRC_PLUGIN_NAME);
irc_server_disconnect (server, !server->is_connected, 1);
}
}
else
#endif /* HAVE_GNUTLS */
{
if ((num_read == 0)
|| ((errno != EAGAIN) && (errno != EWOULDBLOCK)))
{
weechat_printf (
server->buffer,
_("%s%s: reading data on socket: error %d %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
errno,
(num_read == 0) ? _("(connection closed by peer)") :
strerror (errno));
weechat_printf (
server->buffer,
_("%s%s: disconnecting from server..."),
weechat_prefix ("network"), IRC_PLUGIN_NAME);
irc_server_disconnect (server, !server->is_connected, 1);
}
}
}
}
if (msgq_flush)
irc_server_msgq_flush ();
return WEECHAT_RC_OK;
}
/*
* Callback for server connection: it is called if WeeChat is TCP-connected to
* server, but did not receive message 001.
*/
int
irc_server_timer_connection_cb (const void *pointer, void *data,
int remaining_calls)
{
struct t_irc_server *server;
/* make C compiler happy */
(void) data;
(void) remaining_calls;
server = (struct t_irc_server *)pointer;
if (!server)
return WEECHAT_RC_ERROR;
server->hook_timer_connection = NULL;
if (!server->is_connected)
{
weechat_printf (
server->buffer,
_("%s%s: connection timeout (message 001 not received)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
irc_server_disconnect (server, !server->is_connected, 1);
}
return WEECHAT_RC_OK;
}
/*
* Callback for SASL authentication timer: it is called if there is a timeout
* with SASL authentication (if SASL authentication is OK or failed, then hook
* timer is removed before this callback is called).
*/
int
irc_server_timer_sasl_cb (const void *pointer, void *data, int remaining_calls)
{
struct t_irc_server *server;
int sasl_fail;
/* make C compiler happy */
(void) data;
(void) remaining_calls;
server = (struct t_irc_server *)pointer;
if (!server)
return WEECHAT_RC_ERROR;
server->hook_timer_sasl = NULL;
if (!server->is_connected)
{
weechat_printf (server->buffer,
_("%s%s: SASL authentication timeout"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
sasl_fail = IRC_SERVER_OPTION_INTEGER(server,
IRC_SERVER_OPTION_SASL_FAIL);
if ((sasl_fail == IRC_SERVER_SASL_FAIL_RECONNECT)
|| (sasl_fail == IRC_SERVER_SASL_FAIL_DISCONNECT))
{
irc_server_disconnect (
server, 0,
(sasl_fail == IRC_SERVER_SASL_FAIL_RECONNECT) ? 1 : 0);
}
else
irc_server_sendf (server, 0, NULL, "CAP END");
}
return WEECHAT_RC_OK;
}
/*
* Callback called for each manual join of a server: deletes old channels in the
* hashtable.
*/
void
irc_server_check_join_manual_cb (void *data,
struct t_hashtable *hashtable,
const void *key, const void *value)
{
/* make C compiler happy */
(void) data;
if (*((time_t *)value) + (60 * 10) < time (NULL))
weechat_hashtable_remove (hashtable, key);
}
/*
* Callback called for each join without switch of a server: deletes old channel
* in the hashtable.
*/
void
irc_server_check_join_noswitch_cb (void *data,
struct t_hashtable *hashtable,
const void *key, const void *value)
{
/* make C compiler happy */
(void) data;
if (*((time_t *)value) + (60 * 10) < time (NULL))
weechat_hashtable_remove (hashtable, key);
}
/*
* Callback called for each smart filtered join of a channel: deletes old
* entries in the hashtable.
*/
void
irc_server_check_join_smart_filtered_cb (void *data,
struct t_hashtable *hashtable,
const void *key, const void *value)
{
int unmask_delay;
/* make C compiler happy */
(void) data;
unmask_delay = weechat_config_integer (irc_config_look_smart_filter_join_unmask);
if ((unmask_delay == 0)
|| (*((time_t *)value) < time (NULL) - (unmask_delay * 60)))
{
weechat_hashtable_remove (hashtable, key);
}
}
/*
* Timer called each second to perform some operations on servers.
*/
int
irc_server_timer_cb (const void *pointer, void *data, int remaining_calls)
{
struct t_irc_server *ptr_server;
struct t_irc_channel *ptr_channel;
struct t_irc_redirect *ptr_redirect, *ptr_next_redirect;
time_t current_time;
static struct timeval tv;
int away_check, refresh_lag;
/* make C compiler happy */
(void) pointer;
(void) data;
(void) remaining_calls;
current_time = time (NULL);
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
/* check if reconnection is pending */
if ((!ptr_server->is_connected)
&& (ptr_server->reconnect_start > 0)
&& (current_time >= (ptr_server->reconnect_start + ptr_server->reconnect_delay)))
{
irc_server_reconnect (ptr_server);
}
else
{
if (!ptr_server->is_connected)
continue;
/* send queued messages */
irc_server_outqueue_send (ptr_server);
/* check for lag */
if ((weechat_config_integer (irc_config_network_lag_check) > 0)
&& (ptr_server->lag_check_time.tv_sec == 0)
&& (current_time >= ptr_server->lag_next_check))
{
irc_server_sendf (ptr_server, 0, NULL, "PING %s",
(ptr_server->current_address) ?
ptr_server->current_address : "weechat");
gettimeofday (&(ptr_server->lag_check_time), NULL);
ptr_server->lag = 0;
ptr_server->lag_last_refresh = 0;
}
else
{
/* check away (only if lag check was not done) */
away_check = IRC_SERVER_OPTION_INTEGER(
ptr_server, IRC_SERVER_OPTION_AWAY_CHECK);
if (!weechat_hashtable_has_key (ptr_server->cap_list,
"away-notify")
&& (away_check > 0)
&& ((ptr_server->last_away_check == 0)
|| (current_time >= ptr_server->last_away_check + (away_check * 60))))
{
irc_server_check_away (ptr_server);
}
}
/* check if it's time to autojoin channels (after command delay) */
if ((ptr_server->command_time != 0)
&& (current_time >= ptr_server->command_time +
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_COMMAND_DELAY)))
{
irc_server_autojoin_channels (ptr_server);
ptr_server->command_time = 0;
}
/* check if it's time to send MONITOR command */
if ((ptr_server->monitor_time != 0)
&& (current_time >= ptr_server->monitor_time))
{
if (ptr_server->monitor > 0)
irc_notify_send_monitor (ptr_server);
ptr_server->monitor_time = 0;
}
/* compute lag */
if (ptr_server->lag_check_time.tv_sec != 0)
{
refresh_lag = 0;
gettimeofday (&tv, NULL);
ptr_server->lag = (int)(weechat_util_timeval_diff (&(ptr_server->lag_check_time),
&tv) / 1000);
/* refresh lag item if needed */
if (((ptr_server->lag_last_refresh == 0)
|| (current_time >= ptr_server->lag_last_refresh + weechat_config_integer (irc_config_network_lag_refresh_interval)))
&& (ptr_server->lag >= weechat_config_integer (irc_config_network_lag_min_show)))
{
ptr_server->lag_last_refresh = current_time;
if (ptr_server->lag != ptr_server->lag_displayed)
{
ptr_server->lag_displayed = ptr_server->lag;
refresh_lag = 1;
}
}
/* lag timeout? => disconnect */
if ((weechat_config_integer (irc_config_network_lag_reconnect) > 0)
&& (ptr_server->lag >= weechat_config_integer (irc_config_network_lag_reconnect) * 1000))
{
weechat_printf (
ptr_server->buffer,
_("%s%s: lag is high, reconnecting to server %s%s%s"),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
IRC_COLOR_CHAT_SERVER,
ptr_server->name,
IRC_COLOR_RESET);
irc_server_disconnect (ptr_server, 0, 1);
}
else
{
/* stop lag counting if max lag is reached */
if ((weechat_config_integer (irc_config_network_lag_max) > 0)
&& (ptr_server->lag >= (weechat_config_integer (irc_config_network_lag_max) * 1000)))
{
/* refresh lag item */
ptr_server->lag_last_refresh = current_time;
if (ptr_server->lag != ptr_server->lag_displayed)
{
ptr_server->lag_displayed = ptr_server->lag;
refresh_lag = 1;
}
/* schedule next lag check in 5 seconds */
ptr_server->lag_check_time.tv_sec = 0;
ptr_server->lag_check_time.tv_usec = 0;
ptr_server->lag_next_check = time (NULL) +
weechat_config_integer (irc_config_network_lag_check);
}
}
if (refresh_lag)
irc_server_set_lag (ptr_server);
}
/* remove redirects if timeout occurs */
ptr_redirect = ptr_server->redirects;
while (ptr_redirect)
{
ptr_next_redirect = ptr_redirect->next_redirect;
if ((ptr_redirect->start_time > 0)
&& (ptr_redirect->start_time + ptr_redirect->timeout < current_time))
{
irc_redirect_stop (ptr_redirect, "timeout");
}
ptr_redirect = ptr_next_redirect;
}
/* purge some data (every 10 minutes) */
if (current_time > ptr_server->last_data_purge + (60 * 10))
{
weechat_hashtable_map (ptr_server->join_manual,
&irc_server_check_join_manual_cb,
NULL);
weechat_hashtable_map (ptr_server->join_noswitch,
&irc_server_check_join_noswitch_cb,
NULL);
for (ptr_channel = ptr_server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if (ptr_channel->join_smart_filtered)
{
weechat_hashtable_map (ptr_channel->join_smart_filtered,
&irc_server_check_join_smart_filtered_cb,
NULL);
}
}
ptr_server->last_data_purge = current_time;
}
}
}
return WEECHAT_RC_OK;
}
/*
* Closes server connection.
*/
void
irc_server_close_connection (struct t_irc_server *server)
{
int i;
if (server->hook_timer_connection)
{
weechat_unhook (server->hook_timer_connection);
server->hook_timer_connection = NULL;
}
if (server->hook_timer_sasl)
{
weechat_unhook (server->hook_timer_sasl);
server->hook_timer_sasl = NULL;
}
if (server->hook_fd)
{
weechat_unhook (server->hook_fd);
server->hook_fd = NULL;
}
if (server->hook_connect)
{
weechat_unhook (server->hook_connect);
server->hook_connect = NULL;
}
else
{
#ifdef HAVE_GNUTLS
/* close SSL connection */
if (server->ssl_connected)
{
if (server->sock != -1)
gnutls_bye (server->gnutls_sess, GNUTLS_SHUT_WR);
gnutls_deinit (server->gnutls_sess);
}
#endif /* HAVE_GNUTLS */
}
if (server->sock != -1)
{
#ifdef _WIN32
closesocket (server->sock);
#else
close (server->sock);
#endif /* _WIN32 */
server->sock = -1;
}
/* free any pending message */
if (server->unterminated_message)
{
free (server->unterminated_message);
server->unterminated_message = NULL;
}
for (i = 0; i < IRC_SERVER_NUM_OUTQUEUES_PRIO; i++)
{
irc_server_outqueue_free_all (server, i);
}
/* remove all redirects */
irc_redirect_free_all (server);
/* remove all manual joins */
weechat_hashtable_remove_all (server->join_manual);
/* remove all keys for pending joins */
weechat_hashtable_remove_all (server->join_channel_key);
/* remove all keys for joins without switch */
weechat_hashtable_remove_all (server->join_noswitch);
/* server is now disconnected */
server->is_connected = 0;
server->ssl_connected = 0;
}
/*
* Schedules reconnection on server.
*/
void
irc_server_reconnect_schedule (struct t_irc_server *server)
{
int minutes, seconds;
if (IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_AUTORECONNECT))
{
/* growing reconnect delay */
if (server->reconnect_delay == 0)
server->reconnect_delay = IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_AUTORECONNECT_DELAY);
else
server->reconnect_delay = server->reconnect_delay * weechat_config_integer (irc_config_network_autoreconnect_delay_growing);
if ((weechat_config_integer (irc_config_network_autoreconnect_delay_max) > 0)
&& (server->reconnect_delay > weechat_config_integer (irc_config_network_autoreconnect_delay_max)))
server->reconnect_delay = weechat_config_integer (irc_config_network_autoreconnect_delay_max);
server->reconnect_start = time (NULL);
minutes = server->reconnect_delay / 60;
seconds = server->reconnect_delay % 60;
if ((minutes > 0) && (seconds > 0))
{
weechat_printf (
server->buffer,
_("%s%s: reconnecting to server in %d %s, %d %s"),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
minutes,
NG_("minute", "minutes", minutes),
seconds,
NG_("second", "seconds", seconds));
}
else if (minutes > 0)
{
weechat_printf (
server->buffer,
_("%s%s: reconnecting to server in %d %s"),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
minutes,
NG_("minute", "minutes", minutes));
}
else
{
weechat_printf (
server->buffer,
_("%s%s: reconnecting to server in %d %s"),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
seconds,
NG_("second", "seconds", seconds));
}
}
else
{
server->reconnect_delay = 0;
server->reconnect_start = 0;
}
}
/*
* Logins to server.
*/
void
irc_server_login (struct t_irc_server *server)
{
const char *capabilities;
char *password, *username, *realname, *username2;
password = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_PASSWORD));
username = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_USERNAME));
realname = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_REALNAME));
capabilities = IRC_SERVER_OPTION_STRING(
server, IRC_SERVER_OPTION_CAPABILITIES);
if (password && password[0])
{
irc_server_sendf (
server, 0, NULL,
"PASS %s%s",
((password[0] == ':') || (strchr (password, ' '))) ? ":" : "",
password);
}
if (!server->nick)
{
irc_server_set_nick (server,
(server->nicks_array) ?
server->nicks_array[0] : "weechat");
server->nick_first_tried = 0;
}
else
server->nick_first_tried = irc_server_get_nick_index (server);
server->nick_alternate_number = -1;
if (irc_server_sasl_enabled (server) || (capabilities && capabilities[0]))
{
irc_server_sendf (server, 0, NULL, "CAP LS " IRC_SERVER_VERSION_CAP);
}
username2 = (username && username[0]) ?
weechat_string_replace (username, " ", "_") : strdup ("weechat");
irc_server_sendf (
server, 0, NULL,
"NICK %s%s\n"
"USER %s 0 * :%s",
(server->nick && strchr (server->nick, ':')) ? ":" : "",
server->nick,
(username2) ? username2 : "weechat",
(realname && realname[0]) ? realname : ((username2) ? username2 : "weechat"));
if (username2)
free (username2);
if (server->hook_timer_connection)
weechat_unhook (server->hook_timer_connection);
server->hook_timer_connection = weechat_hook_timer (
IRC_SERVER_OPTION_INTEGER (server, IRC_SERVER_OPTION_CONNECTION_TIMEOUT) * 1000,
0, 1,
&irc_server_timer_connection_cb,
server, NULL);
if (password)
free (password);
if (username)
free (username);
if (realname)
free (realname);
}
/*
* Switches address and tries another (called if connection failed with an
* address/port).
*/
void
irc_server_switch_address (struct t_irc_server *server, int connection)
{
if (server->addresses_count > 1)
{
irc_server_set_index_current_address (
server,
(server->index_current_address + 1) % server->addresses_count);
weechat_printf (
server->buffer,
_("%s%s: switching address to %s/%d"),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
server->current_address,
server->current_port);
if (connection)
{
if (server->index_current_address == 0)
irc_server_reconnect_schedule (server);
else
irc_server_connect (server);
}
}
else
{
if (connection)
irc_server_reconnect_schedule (server);
}
}
/*
* Reads connection status.
*/
int
irc_server_connect_cb (const void *pointer, void *data,
int status, int gnutls_rc, int sock,
const char *error, const char *ip_address)
{
struct t_irc_server *server;
const char *proxy;
/* make C compiler happy */
(void) data;
server = (struct t_irc_server *)pointer;
proxy = IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_PROXY);
server->hook_connect = NULL;
server->sock = sock;
switch (status)
{
case WEECHAT_HOOK_CONNECT_OK:
/* set IP */
if (server->current_ip)
free (server->current_ip);
server->current_ip = (ip_address) ? strdup (ip_address) : NULL;
weechat_printf (
server->buffer,
_("%s%s: connected to %s/%d (%s)"),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
server->current_address,
server->current_port,
(server->current_ip) ? server->current_ip : "?");
server->hook_fd = weechat_hook_fd (server->sock,
1, 0, 0,
&irc_server_recv_cb,
server, NULL);
/* login to server */
irc_server_login (server);
break;
case WEECHAT_HOOK_CONNECT_ADDRESS_NOT_FOUND:
weechat_printf (
server->buffer,
(proxy && proxy[0]) ?
_("%s%s: proxy address \"%s\" not found") :
_("%s%s: address \"%s\" not found"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
server->current_address);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
irc_server_switch_address (server, 1);
break;
case WEECHAT_HOOK_CONNECT_IP_ADDRESS_NOT_FOUND:
weechat_printf (
server->buffer,
(proxy && proxy[0]) ?
_("%s%s: proxy IP address not found") :
_("%s%s: IP address not found"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
irc_server_switch_address (server, 1);
break;
case WEECHAT_HOOK_CONNECT_CONNECTION_REFUSED:
weechat_printf (
server->buffer,
(proxy && proxy[0]) ?
_("%s%s: proxy connection refused") :
_("%s%s: connection refused"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
server->current_retry++;
irc_server_switch_address (server, 1);
break;
case WEECHAT_HOOK_CONNECT_PROXY_ERROR:
weechat_printf (
server->buffer,
_("%s%s: proxy fails to establish connection to server (check "
"username/password if used and if server address/port is "
"allowed by proxy)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
irc_server_switch_address (server, 1);
break;
case WEECHAT_HOOK_CONNECT_LOCAL_HOSTNAME_ERROR:
weechat_printf (
server->buffer,
_("%s%s: unable to set local hostname/IP"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
irc_server_reconnect_schedule (server);
break;
case WEECHAT_HOOK_CONNECT_GNUTLS_INIT_ERROR:
weechat_printf (
server->buffer,
_("%s%s: TLS init error"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
server->current_retry++;
irc_server_reconnect_schedule (server);
break;
case WEECHAT_HOOK_CONNECT_GNUTLS_HANDSHAKE_ERROR:
weechat_printf (
server->buffer,
_("%s%s: TLS handshake failed"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
#ifdef HAVE_GNUTLS
if (gnutls_rc == GNUTLS_E_DH_PRIME_UNACCEPTABLE)
{
weechat_printf (
server->buffer,
_("%s%s: you should play with option "
"irc.server.%s.ssl_dhkey_size (current value is %d, try "
"a lower value like %d or %d)"),
weechat_prefix ("error"),
IRC_PLUGIN_NAME,
server->name,
IRC_SERVER_OPTION_INTEGER (
server, IRC_SERVER_OPTION_SSL_DHKEY_SIZE),
IRC_SERVER_OPTION_INTEGER (
server, IRC_SERVER_OPTION_SSL_DHKEY_SIZE) / 2,
IRC_SERVER_OPTION_INTEGER (
server, IRC_SERVER_OPTION_SSL_DHKEY_SIZE) / 4);
}
#else
(void) gnutls_rc;
#endif /* HAVE_GNUTLS */
irc_server_close_connection (server);
server->current_retry++;
irc_server_switch_address (server, 1);
break;
case WEECHAT_HOOK_CONNECT_MEMORY_ERROR:
weechat_printf (
server->buffer,
_("%s%s: not enough memory (%s)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
(error) ? error : "-");
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
irc_server_reconnect_schedule (server);
break;
case WEECHAT_HOOK_CONNECT_TIMEOUT:
weechat_printf (
server->buffer,
_("%s%s: timeout"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
server->current_retry++;
irc_server_switch_address (server, 1);
break;
case WEECHAT_HOOK_CONNECT_SOCKET_ERROR:
weechat_printf (
server->buffer,
_("%s%s: unable to create socket"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
server->current_retry++;
irc_server_reconnect_schedule (server);
break;
}
return WEECHAT_RC_OK;
}
/*
* Sets the title for a server buffer.
*/
void
irc_server_set_buffer_title (struct t_irc_server *server)
{
char *title;
int length;
if (server && server->buffer)
{
if (server->is_connected)
{
length = 16 +
((server->current_address) ? strlen (server->current_address) : 16) +
16 + ((server->current_ip) ? strlen (server->current_ip) : 16) + 1;
title = malloc (length);
if (title)
{
snprintf (title, length, "IRC: %s/%d (%s)",
server->current_address,
server->current_port,
(server->current_ip) ? server->current_ip : "");
weechat_buffer_set (server->buffer, "title", title);
free (title);
}
}
else
{
weechat_buffer_set (server->buffer, "title", "");
}
}
}
/*
* Creates a buffer for a server.
*
* Returns pointer to buffer, NULL if error.
*/
struct t_gui_buffer *
irc_server_create_buffer (struct t_irc_server *server)
{
char buffer_name[256], charset_modifier[256];
struct t_gui_buffer *ptr_buffer_for_merge;
ptr_buffer_for_merge = NULL;
switch (weechat_config_integer (irc_config_look_server_buffer))
{
case IRC_CONFIG_LOOK_SERVER_BUFFER_MERGE_WITH_CORE:
/* merge with WeeChat core buffer */
ptr_buffer_for_merge = weechat_buffer_search_main ();
break;
case IRC_CONFIG_LOOK_SERVER_BUFFER_MERGE_WITHOUT_CORE:
/* find buffer used to merge all IRC server buffers */
ptr_buffer_for_merge = irc_buffer_search_server_lowest_number ();
break;
}
snprintf (buffer_name, sizeof (buffer_name),
"server.%s", server->name);
server->buffer = weechat_buffer_new (buffer_name,
&irc_input_data_cb, NULL, NULL,
&irc_buffer_close_cb, NULL, NULL);
if (!server->buffer)
return NULL;
if (!weechat_buffer_get_integer (server->buffer, "short_name_is_set"))
weechat_buffer_set (server->buffer, "short_name", server->name);
weechat_buffer_set (server->buffer, "localvar_set_type", "server");
weechat_buffer_set (server->buffer, "localvar_set_server", server->name);
weechat_buffer_set (server->buffer, "localvar_set_channel", server->name);
snprintf (charset_modifier, sizeof (charset_modifier),
"irc.%s", server->name);
weechat_buffer_set (server->buffer, "localvar_set_charset_modifier",
charset_modifier);
(void) weechat_hook_signal_send ("logger_backlog",
WEECHAT_HOOK_SIGNAL_POINTER,
server->buffer);
if (weechat_config_boolean (irc_config_network_send_unknown_commands))
weechat_buffer_set (server->buffer, "input_get_unknown_commands", "1");
/* set highlights settings on server buffer */
weechat_buffer_set (server->buffer, "highlight_words_add",
weechat_config_string (irc_config_look_highlight_server));
if (weechat_config_string (irc_config_look_highlight_tags_restrict)
&& weechat_config_string (irc_config_look_highlight_tags_restrict)[0])
{
weechat_buffer_set (
server->buffer, "highlight_tags_restrict",
weechat_config_string (irc_config_look_highlight_tags_restrict));
}
irc_server_set_buffer_title (server);
/*
* merge buffer if needed: if merge with(out) core set, and if no layout
* number is assigned for this buffer (if layout number is assigned, then
* buffer was already moved/merged by WeeChat core)
*/
if (ptr_buffer_for_merge
&& (weechat_buffer_get_integer (server->buffer, "layout_number") < 1))
{
weechat_buffer_merge (server->buffer, ptr_buffer_for_merge);
}
(void) weechat_hook_signal_send ("irc_server_opened",
WEECHAT_HOOK_SIGNAL_POINTER,
server->buffer);
return server->buffer;
}
/*
* Searches for a fingerprint digest algorithm with the size (in bits).
*
* Returns index of algo in enum t_irc_fingerprint_digest_algo,
* -1 if not found.
*/
#ifdef HAVE_GNUTLS
int
irc_server_fingerprint_search_algo_with_size (int size)
{
int i;
for (i = 0; i < IRC_FINGERPRINT_NUM_ALGOS; i++)
{
if (irc_fingerprint_digest_algos_size[i] == size)
return i;
}
/* digest algorithm not found */
return -1;
}
#endif /* HAVE_GNUTLS */
/*
* Returns a string with sizes of allowed fingerprint,
* in number of hexadecimal digits (== bits / 4).
*
* Example of output: "128=SHA-512, 64=SHA-256, 40=SHA-1".
*
* Note: result must be freed after use.
*/
#ifdef HAVE_GNUTLS
char *
irc_server_fingerprint_str_sizes ()
{
char str_sizes[1024], str_one_size[128];
int i;
str_sizes[0] = '\0';
for (i = IRC_FINGERPRINT_NUM_ALGOS - 1; i >= 0; i--)
{
snprintf (str_one_size, sizeof (str_one_size),
"%d=%s%s",
irc_fingerprint_digest_algos_size[i] / 4,
irc_fingerprint_digest_algos_name[i],
(i > 0) ? ", " : "");
strcat (str_sizes, str_one_size);
}
return strdup (str_sizes);
}
#endif /* HAVE_GNUTLS */
/*
* Compares two fingerprints: one hexadecimal (given by user), the second binary
* (received from IRC server).
*
* Returns:
* 0: fingerprints are the same
* -1: fingerprints are different
*/
#ifdef HAVE_GNUTLS
int
irc_server_compare_fingerprints (const char *fingerprint,
const unsigned char *fingerprint_server,
ssize_t fingerprint_size)
{
ssize_t i;
unsigned int value;
if ((ssize_t)strlen (fingerprint) != fingerprint_size * 2)
return -1;
for (i = 0; i < fingerprint_size; i++)
{
if (sscanf (&fingerprint[i * 2], "%02x", &value) != 1)
return -1;
if (value != fingerprint_server[i])
return -1;
}
/* fingerprints are the same */
return 0;
}
#endif /* HAVE_GNUTLS */
/*
* Checks if a GnuTLS session uses the certificate with a given fingerprint.
*
* Returns:
* 1: certificate has the good fingerprint
* 0: certificate does NOT have the good fingerprint
*/
#ifdef HAVE_GNUTLS
int
irc_server_check_certificate_fingerprint (struct t_irc_server *server,
gnutls_x509_crt_t certificate,
const char *good_fingerprints)
{
unsigned char *fingerprint_server[IRC_FINGERPRINT_NUM_ALGOS];
char **fingerprints;
int i, rc, algo;
size_t size_bits, size_bytes;
for (i = 0; i < IRC_FINGERPRINT_NUM_ALGOS; i++)
{
fingerprint_server[i] = NULL;
}
/* split good_fingerprints */
fingerprints = weechat_string_split (good_fingerprints, ",", NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0, NULL);
if (!fingerprints)
return 0;
rc = 0;
for (i = 0; fingerprints[i]; i++)
{
size_bits = strlen (fingerprints[i]) * 4;
size_bytes = size_bits / 8;
algo = irc_server_fingerprint_search_algo_with_size (size_bits);
if (algo < 0)
continue;
if (!fingerprint_server[algo])
{
fingerprint_server[algo] = malloc (size_bytes);
if (fingerprint_server[algo])
{
/* calculate the fingerprint for the certificate */
if (gnutls_x509_crt_get_fingerprint (
certificate,
irc_fingerprint_digest_algos[algo],
fingerprint_server[algo],
&size_bytes) != GNUTLS_E_SUCCESS)
{
weechat_printf (
server->buffer,
_("%sgnutls: failed to calculate certificate "
"fingerprint (%s)"),
weechat_prefix ("error"),
irc_fingerprint_digest_algos_name[algo]);
free (fingerprint_server[algo]);
fingerprint_server[algo] = NULL;
}
}
else
{
weechat_printf (
server->buffer,
_("%s%s: not enough memory (%s)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
"fingerprint");
}
}
if (fingerprint_server[algo])
{
/* check if the fingerprint matches */
if (irc_server_compare_fingerprints (fingerprints[i],
fingerprint_server[algo],
size_bytes) == 0)
{
rc = 1;
break;
}
}
}
weechat_string_free_split (fingerprints);
for (i = 0; i < IRC_FINGERPRINT_NUM_ALGOS; i++)
{
if (fingerprint_server[i])
free (fingerprint_server[i]);
}
return rc;
}
#endif /* HAVE_GNUTLS */
/*
* GnuTLS callback called during handshake.
*
* Returns:
* 0: certificate OK
* -1: error in certificate
*/
#ifdef HAVE_GNUTLS
int
irc_server_gnutls_callback (const void *pointer, void *data,
gnutls_session_t tls_session,
const gnutls_datum_t *req_ca, int nreq,
const gnutls_pk_algorithm_t *pk_algos,
int pk_algos_len,
#if LIBGNUTLS_VERSION_NUMBER >= 0x020b00 /* 2.11.0 */
gnutls_retr2_st *answer,
#else
gnutls_retr_st *answer,
#endif /* LIBGNUTLS_VERSION_NUMBER >= 0x020b00 */
int action)
{
struct t_irc_server *server;
#if LIBGNUTLS_VERSION_NUMBER >= 0x020b00 /* 2.11.0 */
gnutls_retr2_st tls_struct;
#else
gnutls_retr_st tls_struct;
#endif /* LIBGNUTLS_VERSION_NUMBER >= 0x020b00 */
gnutls_x509_crt_t cert_temp;
const gnutls_datum_t *cert_list;
gnutls_datum_t filedatum;
unsigned int i, cert_list_len, status;
time_t cert_time;
char *cert_path0, *cert_path1, *cert_path2, *cert_str, *fingerprint_eval;
char *weechat_dir, *ssl_password;
const char *ptr_fingerprint;
int rc, ret, fingerprint_match, hostname_match, cert_temp_init;
#if LIBGNUTLS_VERSION_NUMBER >= 0x010706 /* 1.7.6 */
gnutls_datum_t cinfo;
int rinfo;
#endif /* LIBGNUTLS_VERSION_NUMBER >= 0x010706 */
/* make C compiler happy */
(void) data;
(void) req_ca;
(void) nreq;
(void) pk_algos;
(void) pk_algos_len;
rc = 0;
if (!pointer)
return -1;
server = (struct t_irc_server *) pointer;
cert_temp_init = 0;
cert_list = NULL;
cert_list_len = 0;
fingerprint_eval = NULL;
weechat_dir = NULL;
if (action == WEECHAT_HOOK_CONNECT_GNUTLS_CB_VERIFY_CERT)
{
weechat_printf (
server->buffer,
_("%sgnutls: connected using %d-bit Diffie-Hellman shared secret "
"exchange"),
weechat_prefix ("network"),
IRC_SERVER_OPTION_INTEGER (server,
IRC_SERVER_OPTION_SSL_DHKEY_SIZE));
/* initialize the certificate structure */
if (gnutls_x509_crt_init (&cert_temp) != GNUTLS_E_SUCCESS)
{
weechat_printf (
server->buffer,
_("%sgnutls: failed to initialize certificate structure"),
weechat_prefix ("error"));
rc = -1;
goto end;
}
/* flag to do the "deinit" (at the end of function) */
cert_temp_init = 1;
/* get fingerprint option in server */
ptr_fingerprint = IRC_SERVER_OPTION_STRING(server,
IRC_SERVER_OPTION_SSL_FINGERPRINT);
fingerprint_eval = irc_server_eval_fingerprint (server);
if (!fingerprint_eval)
{
rc = -1;
goto end;
}
/* set match options */
fingerprint_match = (ptr_fingerprint && ptr_fingerprint[0]) ? 0 : 1;
hostname_match = 0;
/* get the peer's raw certificate (chain) as sent by the peer */
cert_list = gnutls_certificate_get_peers (tls_session, &cert_list_len);
if (cert_list)
{
weechat_printf (
server->buffer,
NG_("%sgnutls: receiving %d certificate",
"%sgnutls: receiving %d certificates",
cert_list_len),
weechat_prefix ("network"),
cert_list_len);
for (i = 0; i < cert_list_len; i++)
{
if (gnutls_x509_crt_import (cert_temp,
&cert_list[i],
GNUTLS_X509_FMT_DER) != GNUTLS_E_SUCCESS)
{
weechat_printf (
server->buffer,
_("%sgnutls: failed to import certificate[%d]"),
weechat_prefix ("error"), i + 1);
rc = -1;
goto end;
}
/* checks on first certificate received */
if (i == 0)
{
/* check if fingerprint matches the first certificate */
if (fingerprint_eval && fingerprint_eval[0])
{
fingerprint_match = irc_server_check_certificate_fingerprint (
server, cert_temp, fingerprint_eval);
}
/* check if hostname matches in the first certificate */
if (gnutls_x509_crt_check_hostname (cert_temp,
server->current_address) != 0)
{
hostname_match = 1;
}
}
#if LIBGNUTLS_VERSION_NUMBER >= 0x010706 /* 1.7.6 */
/* display infos about certificate */
#if LIBGNUTLS_VERSION_NUMBER < 0x020400 /* 2.4.0 */
rinfo = gnutls_x509_crt_print (cert_temp,
GNUTLS_X509_CRT_ONELINE, &cinfo);
#else
rinfo = gnutls_x509_crt_print (cert_temp,
GNUTLS_CRT_PRINT_ONELINE, &cinfo);
#endif /* LIBGNUTLS_VERSION_NUMBER < 0x020400 */
if (rinfo == 0)
{
weechat_printf (
server->buffer,
_("%s - certificate[%d] info:"),
weechat_prefix ("network"), i + 1);
weechat_printf (
server->buffer,
"%s - %s",
weechat_prefix ("network"), cinfo.data);
gnutls_free (cinfo.data);
}
#endif /* LIBGNUTLS_VERSION_NUMBER >= 0x010706 */
/* check dates, only if fingerprint is not set */
if (!ptr_fingerprint || !ptr_fingerprint[0])
{
/* check expiration date */
cert_time = gnutls_x509_crt_get_expiration_time (cert_temp);
if (cert_time < time (NULL))
{
weechat_printf (
server->buffer,
_("%sgnutls: certificate has expired"),
weechat_prefix ("error"));
rc = -1;
}
/* check activation date */
cert_time = gnutls_x509_crt_get_activation_time (cert_temp);
if (cert_time > time (NULL))
{
weechat_printf (
server->buffer,
_("%sgnutls: certificate is not yet activated"),
weechat_prefix ("error"));
rc = -1;
}
}
}
/*
* if fingerprint is set, display if matches, and don't check
* anything else
*/
if (ptr_fingerprint && ptr_fingerprint[0])
{
if (fingerprint_match)
{
weechat_printf (
server->buffer,
_("%sgnutls: certificate fingerprint matches"),
weechat_prefix ("network"));
}
else
{
weechat_printf (
server->buffer,
_("%sgnutls: certificate fingerprint does NOT match "
"(check value of option "
"irc.server.%s.ssl_fingerprint)"),
weechat_prefix ("error"), server->name);
rc = -1;
}
goto end;
}
if (!hostname_match)
{
weechat_printf (
server->buffer,
_("%sgnutls: the hostname in the certificate does NOT "
"match \"%s\""),
weechat_prefix ("error"), server->current_address);
rc = -1;
}
}
/* verify the peer’s certificate */
if (gnutls_certificate_verify_peers2 (tls_session, &status) < 0)
{
weechat_printf (
server->buffer,
_("%sgnutls: error while checking peer's certificate"),
weechat_prefix ("error"));
rc = -1;
goto end;
}
/* check if certificate is trusted */
if (status & GNUTLS_CERT_INVALID)
{
weechat_printf (
server->buffer,
_("%sgnutls: peer's certificate is NOT trusted"),
weechat_prefix ("error"));
rc = -1;
}
else
{
weechat_printf (
server->buffer,
_("%sgnutls: peer's certificate is trusted"),
weechat_prefix ("network"));
}
/* check if certificate issuer is known */
if (status & GNUTLS_CERT_SIGNER_NOT_FOUND)
{
weechat_printf (
server->buffer,
_("%sgnutls: peer's certificate issuer is unknown"),
weechat_prefix ("error"));
rc = -1;
}
/* check that certificate is not revoked */
if (status & GNUTLS_CERT_REVOKED)
{
weechat_printf (
server->buffer,
_("%sgnutls: the certificate has been revoked"),
weechat_prefix ("error"));
rc = -1;
}
}
else if (action == WEECHAT_HOOK_CONNECT_GNUTLS_CB_SET_CERT)
{
/* using client certificate if it exists */
cert_path0 = (char *) IRC_SERVER_OPTION_STRING(
server, IRC_SERVER_OPTION_SSL_CERT);
if (cert_path0 && cert_path0[0])
{
weechat_dir = weechat_info_get ("weechat_dir", "");
cert_path1 = weechat_string_replace (cert_path0, "%h", weechat_dir);
cert_path2 = (cert_path1) ?
weechat_string_expand_home (cert_path1) : NULL;
if (cert_path2)
{
cert_str = weechat_file_get_content (cert_path2);
if (cert_str)
{
weechat_printf (
server->buffer,
_("%sgnutls: sending one certificate"),
weechat_prefix ("network"));
filedatum.data = (unsigned char *) cert_str;
filedatum.size = strlen (cert_str);
/* certificate */
gnutls_x509_crt_init (&server->tls_cert);
gnutls_x509_crt_import (server->tls_cert, &filedatum,
GNUTLS_X509_FMT_PEM);
/* key password */
ssl_password = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server,
IRC_SERVER_OPTION_SSL_PASSWORD));
/* key */
gnutls_x509_privkey_init (&server->tls_cert_key);
/*
* gnutls_x509_privkey_import2 has no "Since: ..." in GnuTLS manual but
* GnuTLS NEWS file lists it being added in 3.1.0:
* https://gitlab.com/gnutls/gnutls/blob/2b715b9564681acb3008a5574dcf25464de8b038/NEWS#L2552
*/
#if LIBGNUTLS_VERSION_NUMBER >= 0x030100 /* 3.1.0 */
ret = gnutls_x509_privkey_import2 (server->tls_cert_key,
&filedatum,
GNUTLS_X509_FMT_PEM,
ssl_password,
0);
#else
ret = gnutls_x509_privkey_import (server->tls_cert_key,
&filedatum,
GNUTLS_X509_FMT_PEM);
#endif /* LIBGNUTLS_VERSION_NUMBER >= 0x0301000 */
if (ret < 0)
{
ret = gnutls_x509_privkey_import_pkcs8 (
server->tls_cert_key,
&filedatum,
GNUTLS_X509_FMT_PEM,
ssl_password,
GNUTLS_PKCS_PLAIN);
}
if (ret < 0)
{
weechat_printf (
server->buffer,
_("%sgnutls: invalid certificate \"%s\", error: "
"%s"),
weechat_prefix ("error"), cert_path2,
gnutls_strerror (ret));
rc = -1;
}
else
{
#if LIBGNUTLS_VERSION_NUMBER >= 0x020b00 /* 2.11.0 */
tls_struct.cert_type = GNUTLS_CRT_X509;
tls_struct.key_type = GNUTLS_PRIVKEY_X509;
#else
tls_struct.type = GNUTLS_CRT_X509;
#endif /* LIBGNUTLS_VERSION_NUMBER >= 0x020b00 */
tls_struct.ncerts = 1;
tls_struct.deinit_all = 0;
tls_struct.cert.x509 = &server->tls_cert;
tls_struct.key.x509 = server->tls_cert_key;
#if LIBGNUTLS_VERSION_NUMBER >= 0x010706 /* 1.7.6 */
/* client certificate info */
#if LIBGNUTLS_VERSION_NUMBER < 0x020400 /* 2.4.0 */
rinfo = gnutls_x509_crt_print (server->tls_cert,
GNUTLS_X509_CRT_ONELINE,
&cinfo);
#else
rinfo = gnutls_x509_crt_print (server->tls_cert,
GNUTLS_CRT_PRINT_ONELINE,
&cinfo);
#endif /* LIBGNUTLS_VERSION_NUMBER < 0x020400 */
if (rinfo == 0)
{
weechat_printf (
server->buffer,
_("%s - client certificate info (%s):"),
weechat_prefix ("network"), cert_path2);
weechat_printf (
server->buffer, "%s - %s",
weechat_prefix ("network"), cinfo.data);
gnutls_free (cinfo.data);
}
#endif /* LIBGNUTLS_VERSION_NUMBER >= 0x010706 */
memcpy (answer, &tls_struct, sizeof (tls_struct));
free (cert_str);
}
if (ssl_password)
free (ssl_password);
}
else
{
weechat_printf (
server->buffer,
_("%sgnutls: unable to read certificate \"%s\""),
weechat_prefix ("error"), cert_path2);
}
}
if (cert_path1)
free (cert_path1);
if (cert_path2)
free (cert_path2);
}
}
end:
/* an error should stop the handshake unless the user doesn't care */
if ((rc == -1)
&& (IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL_VERIFY) == 0))
{
rc = 0;
}
if (cert_temp_init)
gnutls_x509_crt_deinit (cert_temp);
if (weechat_dir)
free (weechat_dir);
if (fingerprint_eval)
free (fingerprint_eval);
return rc;
}
#endif /* HAVE_GNUTLS */
/*
* Connects to a server.
*
* Returns:
* 1: OK
* 0: error
*/
int
irc_server_connect (struct t_irc_server *server)
{
int length;
char *option_name;
struct t_config_option *proxy_type, *proxy_ipv6, *proxy_address;
struct t_config_option *proxy_port;
const char *proxy, *str_proxy_type, *str_proxy_address;
server->disconnected = 0;
if (!server->buffer)
{
if (!irc_server_create_buffer (server))
return 0;
weechat_buffer_set (server->buffer, "display", "auto");
}
irc_bar_item_update_channel ();
irc_server_set_index_current_address (server,
server->index_current_address);
if (!server->current_address)
{
weechat_printf (
server->buffer,
_("%s%s: unknown address for server \"%s\", cannot connect"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, server->name);
return 0;
}
/* free some old values (from a previous connection to server) */
if (server->isupport)
{
free (server->isupport);
server->isupport = NULL;
}
if (server->prefix_modes)
{
free (server->prefix_modes);
server->prefix_modes = NULL;
}
if (server->prefix_chars)
{
free (server->prefix_chars);
server->prefix_chars = NULL;
}
proxy_type = NULL;
proxy_ipv6 = NULL;
proxy_address = NULL;
proxy_port = NULL;
str_proxy_type = NULL;
str_proxy_address = NULL;
proxy = IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_PROXY);
if (proxy && proxy[0])
{
length = 32 + strlen (proxy) + 1;
option_name = malloc (length);
if (!option_name)
{
weechat_printf (
server->buffer,
_("%s%s: not enough memory (%s)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
"proxy");
return 0;
}
snprintf (option_name, length, "weechat.proxy.%s.type", proxy);
proxy_type = weechat_config_get (option_name);
snprintf (option_name, length, "weechat.proxy.%s.ipv6", proxy);
proxy_ipv6 = weechat_config_get (option_name);
snprintf (option_name, length, "weechat.proxy.%s.address", proxy);
proxy_address = weechat_config_get (option_name);
snprintf (option_name, length, "weechat.proxy.%s.port", proxy);
proxy_port = weechat_config_get (option_name);
free (option_name);
if (!proxy_type || !proxy_address)
{
weechat_printf (
server->buffer,
_("%s%s: proxy \"%s\" not found for server \"%s\", cannot "
"connect"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, proxy, server->name);
return 0;
}
str_proxy_type = weechat_config_string (proxy_type);
str_proxy_address = weechat_config_string (proxy_address);
if (!str_proxy_type[0] || !proxy_ipv6 || !str_proxy_address[0]
|| !proxy_port)
{
weechat_printf (
server->buffer,
_("%s%s: missing proxy settings, check options for proxy "
"\"%s\""),
weechat_prefix ("error"), IRC_PLUGIN_NAME, proxy);
return 0;
}
}
if (!server->nicks_array)
{
weechat_printf (
server->buffer,
_("%s%s: nicks not defined for server \"%s\", cannot connect"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, server->name);
return 0;
}
#ifndef HAVE_GNUTLS
if (IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL))
{
weechat_printf (
server->buffer,
_("%s%s: cannot connect with SSL because WeeChat was not built "
"with GnuTLS support"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
return 0;
}
#endif /* HAVE_GNUTLS */
if (proxy_type)
{
weechat_printf (
server->buffer,
_("%s%s: connecting to server %s/%d%s via %s proxy %s/%d%s..."),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
server->current_address,
server->current_port,
(IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL)) ?
" (SSL)" : "",
str_proxy_type,
str_proxy_address,
weechat_config_integer (proxy_port),
(weechat_config_boolean (proxy_ipv6)) ? " (IPv6)" : "");
weechat_log_printf (
_("Connecting to server %s/%d%s via %s proxy %s/%d%s..."),
server->current_address,
server->current_port,
(IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL)) ?
" (SSL)" : "",
str_proxy_type,
str_proxy_address,
weechat_config_integer (proxy_port),
(weechat_config_boolean (proxy_ipv6)) ? " (IPv6)" : "");
}
else
{
weechat_printf (
server->buffer,
_("%s%s: connecting to server %s/%d%s..."),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
server->current_address,
server->current_port,
(IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL)) ?
" (SSL)" : "");
weechat_log_printf (
_("%s%s: connecting to server %s/%d%s..."),
"",
IRC_PLUGIN_NAME,
server->current_address,
server->current_port,
(IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL)) ?
" (SSL)" : "");
}
/* close connection if opened */
irc_server_close_connection (server);
/* open auto-joined channels now (if needed) */
if (weechat_config_boolean (irc_config_look_buffer_open_before_autojoin)
&& !server->disable_autojoin)
{
irc_server_autojoin_create_buffers (server);
}
/* init SSL if asked and connect */
server->ssl_connected = 0;
#ifdef HAVE_GNUTLS
if (IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL))
server->ssl_connected = 1;
server->hook_connect = weechat_hook_connect (
proxy,
server->current_address,
server->current_port,
proxy_type ? weechat_config_integer (proxy_ipv6) : IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_IPV6),
server->current_retry,
(server->ssl_connected) ? &server->gnutls_sess : NULL,
(server->ssl_connected) ? &irc_server_gnutls_callback : NULL,
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_SSL_DHKEY_SIZE),
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SSL_PRIORITIES),
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_LOCAL_HOSTNAME),
&irc_server_connect_cb,
server,
NULL);
#else
server->hook_connect = weechat_hook_connect (
proxy,
server->current_address,
server->current_port,
proxy_type ? weechat_config_integer (proxy_ipv6) : IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_IPV6),
server->current_retry,
NULL, NULL, 0, NULL,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_LOCAL_HOSTNAME),
&irc_server_connect_cb,
server,
NULL);
#endif /* HAVE_GNUTLS */
/* send signal "irc_server_connecting" with server name */
(void) weechat_hook_signal_send ("irc_server_connecting",
WEECHAT_HOOK_SIGNAL_STRING, server->name);
return 1;
}
/*
* Reconnects to a server (after disconnection).
*/
void
irc_server_reconnect (struct t_irc_server *server)
{
weechat_printf (
server->buffer,
_("%s%s: reconnecting to server..."),
weechat_prefix ("network"), IRC_PLUGIN_NAME);
server->reconnect_start = 0;
if (irc_server_connect (server))
server->reconnect_join = 1;
else
irc_server_reconnect_schedule (server);
}
/*
* Callback for auto-connect to servers (called at startup).
*/
int
irc_server_auto_connect_timer_cb (const void *pointer, void *data,
int remaining_calls)
{
struct t_irc_server *ptr_server;
int auto_connect;
/* make C compiler happy */
(void) data;
(void) remaining_calls;
auto_connect = (pointer) ? 1 : 0;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
if ((auto_connect || ptr_server->temp_server)
&& (IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_AUTOCONNECT)))
{
if (!irc_server_connect (ptr_server))
irc_server_reconnect_schedule (ptr_server);
}
}
return WEECHAT_RC_OK;
}
/*
* Auto-connects to servers (called at startup).
*
* If auto_connect == 1, auto-connects to all servers with flag "autoconnect".
* If auto_connect == 0, auto-connect to temporary servers only.
*/
void
irc_server_auto_connect (int auto_connect)
{
weechat_hook_timer (1, 0, 1,
&irc_server_auto_connect_timer_cb,
(auto_connect) ? (void *)1 : (void *)0,
NULL);
}
/*
* Disconnects from a server.
*/
void
irc_server_disconnect (struct t_irc_server *server, int switch_address,
int reconnect)
{
struct t_irc_channel *ptr_channel;
if (server->is_connected)
{
/*
* remove all nicks and write disconnection message on each
* channel/private buffer
*/
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
irc_nick_free_all (server, ptr_channel);
if (ptr_channel->hook_autorejoin)
{
weechat_unhook (ptr_channel->hook_autorejoin);
ptr_channel->hook_autorejoin = NULL;
}
weechat_buffer_set (ptr_channel->buffer, "localvar_del_away", "");
weechat_printf (
ptr_channel->buffer,
_("%s%s: disconnected from server"),
weechat_prefix ("network"), IRC_PLUGIN_NAME);
}
/* remove away status on server buffer */
weechat_buffer_set (server->buffer, "localvar_del_away", "");
}
irc_server_close_connection (server);
if (server->buffer)
{
weechat_printf (
server->buffer,
_("%s%s: disconnected from server"),
weechat_prefix ("network"), IRC_PLUGIN_NAME);
}
server->current_retry = 0;
if (switch_address)
irc_server_switch_address (server, 0);
else
irc_server_set_index_current_address (server, 0);
if (server->nick_modes)
{
free (server->nick_modes);
server->nick_modes = NULL;
weechat_bar_item_update ("input_prompt");
weechat_bar_item_update ("irc_nick_modes");
}
if (server->host)
{
free (server->host);
server->host = NULL;
weechat_bar_item_update ("irc_host");
weechat_bar_item_update ("irc_nick_host");
}
server->checking_cap_ls = 0;
weechat_hashtable_remove_all (server->cap_ls);
server->checking_cap_list = 0;
weechat_hashtable_remove_all (server->cap_list);
server->is_away = 0;
server->away_time = 0;
server->lag = 0;
server->lag_displayed = -1;
server->lag_check_time.tv_sec = 0;
server->lag_check_time.tv_usec = 0;
server->lag_next_check = time (NULL) +
weechat_config_integer (irc_config_network_lag_check);
server->lag_last_refresh = 0;
irc_server_set_lag (server);
server->monitor = 0;
server->monitor_time = 0;
if (reconnect
&& IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_AUTORECONNECT))
irc_server_reconnect_schedule (server);
else
{
server->reconnect_delay = 0;
server->reconnect_start = 0;
}
/* discard current nick if no reconnection asked */
if (!reconnect && server->nick)
irc_server_set_nick (server, NULL);
irc_server_set_buffer_title (server);
server->disconnected = 1;
/* send signal "irc_server_disconnected" with server name */
(void) weechat_hook_signal_send ("irc_server_disconnected",
WEECHAT_HOOK_SIGNAL_STRING, server->name);
}
/*
* Disconnects from all servers.
*/
void
irc_server_disconnect_all ()
{
struct t_irc_server *ptr_server;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
irc_server_disconnect (ptr_server, 0, 0);
}
}
/*
* Creates buffers for auto-joined channels on a server.
*/
void
irc_server_autojoin_create_buffers (struct t_irc_server *server)
{
const char *pos_space;
char *autojoin, *autojoin2, **channels;
int num_channels, i;
/* buffers are opened only if no channels are currently opened */
if (server->channels)
return;
/* evaluate server option "autojoin" */
autojoin = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_AUTOJOIN));
/* extract channel names from autojoin option */
if (autojoin && autojoin[0])
{
pos_space = strchr (autojoin, ' ');
autojoin2 = (pos_space) ?
weechat_strndup (autojoin, pos_space - autojoin) :
strdup (autojoin);
if (autojoin2)
{
channels = weechat_string_split (
autojoin2,
",",
NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0,
&num_channels);
if (channels)
{
for (i = 0; i < num_channels; i++)
{
irc_channel_create_buffer (
server, IRC_CHANNEL_TYPE_CHANNEL, channels[i],
1, 1);
}
weechat_string_free_split (channels);
}
free (autojoin2);
}
}
if (autojoin)
free (autojoin);
}
/*
* Autojoins (or auto-rejoins) channels.
*/
void
irc_server_autojoin_channels (struct t_irc_server *server)
{
struct t_irc_channel *ptr_channel;
char *autojoin;
/* auto-join after disconnection (only rejoins opened channels) */
if (!server->disable_autojoin && server->reconnect_join && server->channels)
{
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if ((ptr_channel->type == IRC_CHANNEL_TYPE_CHANNEL)
&& !ptr_channel->part)
{
if (ptr_channel->key)
{
irc_server_sendf (server,
IRC_SERVER_SEND_OUTQ_PRIO_HIGH, NULL,
"JOIN %s %s",
ptr_channel->name, ptr_channel->key);
}
else
{
irc_server_sendf (server,
IRC_SERVER_SEND_OUTQ_PRIO_HIGH, NULL,
"JOIN %s",
ptr_channel->name);
}
}
}
server->reconnect_join = 0;
}
else
{
/* auto-join when connecting to server for first time */
autojoin = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_AUTOJOIN));
if (!server->disable_autojoin && autojoin && autojoin[0])
irc_command_join_server (server, autojoin, 0, 0);
if (autojoin)
free (autojoin);
}
server->disable_autojoin = 0;
}
/*
* Returns number of channels for server.
*/
int
irc_server_get_channel_count (struct t_irc_server *server)
{
int count;
struct t_irc_channel *ptr_channel;
count = 0;
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if (ptr_channel->type == IRC_CHANNEL_TYPE_CHANNEL)
count++;
}
return count;
}
/*
* Returns number of pv for server.
*/
int
irc_server_get_pv_count (struct t_irc_server *server)
{
int count;
struct t_irc_channel *ptr_channel;
count = 0;
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if (ptr_channel->type == IRC_CHANNEL_TYPE_PRIVATE)
count++;
}
return count;
}
/*
* Removes away for all channels/nicks (for all servers).
*/
void
irc_server_remove_away (struct t_irc_server *server)
{
struct t_irc_channel *ptr_channel;
if (server->is_connected)
{
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if (ptr_channel->type == IRC_CHANNEL_TYPE_CHANNEL)
irc_channel_remove_away (server, ptr_channel);
}
server->last_away_check = 0;
}
}
/*
* Checks for away on all channels of a server.
*/
void
irc_server_check_away (struct t_irc_server *server)
{
struct t_irc_channel *ptr_channel;
if (server->is_connected)
{
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if (ptr_channel->type == IRC_CHANNEL_TYPE_CHANNEL)
irc_channel_check_whox (server, ptr_channel);
}
server->last_away_check = time (NULL);
}
}
/*
* Sets/unsets away status for a server (all channels).
*/
void
irc_server_set_away (struct t_irc_server *server, const char *nick, int is_away)
{
struct t_irc_channel *ptr_channel;
if (server->is_connected)
{
/* set/del "away" local variable on server buffer */
if (is_away)
{
weechat_buffer_set (server->buffer,
"localvar_set_away", server->away_message);
}
else
{
weechat_buffer_set (server->buffer,
"localvar_del_away", "");
}
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
/* set away flag for nick on channel */
if (ptr_channel->type == IRC_CHANNEL_TYPE_CHANNEL)
irc_channel_set_away (server, ptr_channel, nick, is_away);
/* set/del "away" local variable on channel buffer */
if (is_away)
{
weechat_buffer_set (ptr_channel->buffer,
"localvar_set_away", server->away_message);
}
else
{
weechat_buffer_set (ptr_channel->buffer,
"localvar_del_away", "");
}
}
}
}
/*
* Callback called when user sends (file or chat) to someone and that xfer
* plugin successfully initialized xfer and is ready for sending.
*
* In that case, irc plugin sends message to remote nick and wait for "accept"
* reply.
*/
int
irc_server_xfer_send_ready_cb (const void *pointer, void *data,
const char *signal,
const char *type_data, void *signal_data)
{
struct t_infolist *infolist;
struct t_irc_server *ptr_server;
const char *plugin_name, *plugin_id, *type, *filename, *local_address;
char converted_addr[NI_MAXHOST];
struct addrinfo *ainfo;
struct sockaddr_in *saddr;
int spaces_in_name, rc;
/* make C compiler happy */
(void) pointer;
(void) data;
(void) signal;
(void) type_data;
infolist = (struct t_infolist *)signal_data;
if (weechat_infolist_next (infolist))
{
plugin_name = weechat_infolist_string (infolist, "plugin_name");
plugin_id = weechat_infolist_string (infolist, "plugin_id");
if (plugin_name && (strcmp (plugin_name, IRC_PLUGIN_NAME) == 0)
&& plugin_id)
{
ptr_server = irc_server_search (plugin_id);
if (ptr_server)
{
converted_addr[0] = '\0';
local_address = weechat_infolist_string (infolist,
"local_address");
if (local_address)
{
res_init ();
rc = getaddrinfo (local_address, NULL, NULL, &ainfo);
if ((rc == 0) && ainfo && ainfo->ai_addr)
{
if (ainfo->ai_family == AF_INET)
{
/* transform dotted 4 IP address to ulong string */
saddr = (struct sockaddr_in *)ainfo->ai_addr;
snprintf (converted_addr, sizeof (converted_addr),
"%lu",
(unsigned long)ntohl (saddr->sin_addr.s_addr));
}
else
{
snprintf (converted_addr, sizeof (converted_addr),
"%s", local_address);
}
}
}
type = weechat_infolist_string (infolist, "type_string");
if (type && converted_addr[0])
{
/* send DCC PRIVMSG */
if (strcmp (type, "file_send") == 0)
{
filename = weechat_infolist_string (infolist, "filename");
spaces_in_name = (strchr (filename, ' ') != NULL);
irc_server_sendf (
ptr_server,
IRC_SERVER_SEND_OUTQ_PRIO_HIGH, NULL,
"PRIVMSG %s :\01DCC SEND %s%s%s "
"%s %d %s\01",
weechat_infolist_string (infolist, "remote_nick"),
(spaces_in_name) ? "\"" : "",
filename,
(spaces_in_name) ? "\"" : "",
converted_addr,
weechat_infolist_integer (infolist, "port"),
weechat_infolist_string (infolist, "size"));
}
else if (strcmp (type, "chat_send") == 0)
{
irc_server_sendf (
ptr_server,
IRC_SERVER_SEND_OUTQ_PRIO_HIGH, NULL,
"PRIVMSG %s :\01DCC CHAT chat %s %d\01",
weechat_infolist_string (infolist, "remote_nick"),
converted_addr,
weechat_infolist_integer (infolist, "port"));
}
}
}
}
}
weechat_infolist_reset_item_cursor (infolist);
return WEECHAT_RC_OK;
}
/*
* Callback called when user receives a file and that resume is possible (file
* is partially received).
*
* In that case, irc plugin sends message to remote nick with resume position.
*/
int
irc_server_xfer_resume_ready_cb (const void *pointer, void *data,
const char *signal,
const char *type_data, void *signal_data)
{
struct t_infolist *infolist;
struct t_irc_server *ptr_server;
const char *plugin_name, *plugin_id, *filename;
int spaces_in_name;
/* make C compiler happy */
(void) pointer;
(void) data;
(void) signal;
(void) type_data;
infolist = (struct t_infolist *)signal_data;
if (weechat_infolist_next (infolist))
{
plugin_name = weechat_infolist_string (infolist, "plugin_name");
plugin_id = weechat_infolist_string (infolist, "plugin_id");
if (plugin_name && (strcmp (plugin_name, IRC_PLUGIN_NAME) == 0) && plugin_id)
{
ptr_server = irc_server_search (plugin_id);
if (ptr_server)
{
filename = weechat_infolist_string (infolist, "filename");
spaces_in_name = (strchr (filename, ' ') != NULL);
irc_server_sendf (
ptr_server,
IRC_SERVER_SEND_OUTQ_PRIO_HIGH, NULL,
"PRIVMSG %s :\01DCC RESUME %s%s%s %d %s\01",
weechat_infolist_string (infolist, "remote_nick"),
(spaces_in_name) ? "\"" : "",
filename,
(spaces_in_name) ? "\"" : "",
weechat_infolist_integer (infolist, "port"),
weechat_infolist_string (infolist, "start_resume"));
}
}
}
weechat_infolist_reset_item_cursor (infolist);
return WEECHAT_RC_OK;
}
/*
* Callback called when xfer plugin accepted resume request from receiver.
*
* In that case, irc plugin sends accept message to remote nick with resume
* position.
*/
int
irc_server_xfer_send_accept_resume_cb (const void *pointer, void *data,
const char *signal,
const char *type_data,
void *signal_data)
{
struct t_infolist *infolist;
struct t_irc_server *ptr_server;
const char *plugin_name, *plugin_id, *filename;
int spaces_in_name;
/* make C compiler happy */
(void) pointer;
(void) data;
(void) signal;
(void) type_data;
infolist = (struct t_infolist *)signal_data;
if (weechat_infolist_next (infolist))
{
plugin_name = weechat_infolist_string (infolist, "plugin_name");
plugin_id = weechat_infolist_string (infolist, "plugin_id");
if (plugin_name && (strcmp (plugin_name, IRC_PLUGIN_NAME) == 0) && plugin_id)
{
ptr_server = irc_server_search (plugin_id);
if (ptr_server)
{
filename = weechat_infolist_string (infolist, "filename");
spaces_in_name = (strchr (filename, ' ') != NULL);
irc_server_sendf (
ptr_server,
IRC_SERVER_SEND_OUTQ_PRIO_HIGH, NULL,
"PRIVMSG %s :\01DCC ACCEPT %s%s%s %d %s\01",
weechat_infolist_string (infolist, "remote_nick"),
(spaces_in_name) ? "\"" : "",
filename,
(spaces_in_name) ? "\"" : "",
weechat_infolist_integer (infolist, "port"),
weechat_infolist_string (infolist, "start_resume"));
}
}
}
weechat_infolist_reset_item_cursor (infolist);
return WEECHAT_RC_OK;
}
/*
* Returns hdata for server.
*/
struct t_hdata *
irc_server_hdata_server_cb (const void *pointer, void *data,
const char *hdata_name)
{
struct t_hdata *hdata;
/* make C compiler happy */
(void) pointer;
(void) data;
hdata = weechat_hdata_new (hdata_name, "prev_server", "next_server",
0, 0, NULL, NULL);
if (hdata)
{
WEECHAT_HDATA_VAR(struct t_irc_server, name, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, options, POINTER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, temp_server, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reloading_from_config, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reloaded_from_config, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, addresses_eval, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, addresses_count, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, addresses_array, STRING, 0, "addresses_count", NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, ports_array, INTEGER, 0, "addresses_count", NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, retry_array, INTEGER, 0, "addresses_count", NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, index_current_address, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, current_address, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, current_ip, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, current_port, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, current_retry, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, sock, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, hook_connect, POINTER, 0, NULL, "hook");
WEECHAT_HDATA_VAR(struct t_irc_server, hook_fd, POINTER, 0, NULL, "hook");
WEECHAT_HDATA_VAR(struct t_irc_server, hook_timer_connection, POINTER, 0, NULL, "hook");
WEECHAT_HDATA_VAR(struct t_irc_server, hook_timer_sasl, POINTER, 0, NULL, "hook");
WEECHAT_HDATA_VAR(struct t_irc_server, is_connected, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, ssl_connected, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, disconnected, INTEGER, 0, NULL, NULL);
#ifdef HAVE_GNUTLS
WEECHAT_HDATA_VAR(struct t_irc_server, gnutls_sess, OTHER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, tls_cert, OTHER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, tls_cert_key, OTHER, 0, NULL, NULL);
#endif /* HAVE_GNUTLS */
WEECHAT_HDATA_VAR(struct t_irc_server, unterminated_message, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nicks_count, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nicks_array, STRING, 0, "nicks_count", NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick_first_tried, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick_alternate_number, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick_modes, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, host, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, checking_cap_ls, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, cap_ls, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, checking_cap_list, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, cap_list, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, isupport, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, prefix_modes, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, prefix_chars, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick_max_length, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, user_max_length, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, host_max_length, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, casemapping, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, chantypes, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, chanmodes, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, monitor, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, monitor_time, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reconnect_delay, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reconnect_start, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, command_time, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reconnect_join, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, disable_autojoin, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, is_away, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, away_message, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, away_time, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag_displayed, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag_check_time, OTHER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag_next_check, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag_last_refresh, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, cmd_list_regexp, POINTER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, last_user_message, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, last_away_check, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, last_data_purge, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, outqueue, POINTER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, last_outqueue, POINTER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, redirects, POINTER, 0, NULL, "irc_redirect");
WEECHAT_HDATA_VAR(struct t_irc_server, last_redirect, POINTER, 0, NULL, "irc_redirect");
WEECHAT_HDATA_VAR(struct t_irc_server, notify_list, POINTER, 0, NULL, "irc_notify");
WEECHAT_HDATA_VAR(struct t_irc_server, last_notify, POINTER, 0, NULL, "irc_notify");
WEECHAT_HDATA_VAR(struct t_irc_server, notify_count, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, join_manual, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, join_channel_key, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, join_noswitch, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, buffer, POINTER, 0, NULL, "buffer");
WEECHAT_HDATA_VAR(struct t_irc_server, buffer_as_string, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, channels, POINTER, 0, NULL, "irc_channel");
WEECHAT_HDATA_VAR(struct t_irc_server, last_channel, POINTER, 0, NULL, "irc_channel");
WEECHAT_HDATA_VAR(struct t_irc_server, prev_server, POINTER, 0, NULL, hdata_name);
WEECHAT_HDATA_VAR(struct t_irc_server, next_server, POINTER, 0, NULL, hdata_name);
WEECHAT_HDATA_LIST(irc_servers, WEECHAT_HDATA_LIST_CHECK_POINTERS);
WEECHAT_HDATA_LIST(last_irc_server, 0);
}
return hdata;
}
/*
* Adds a server in an infolist.
*
* Returns:
* 1: OK
* 0: error
*/
int
irc_server_add_to_infolist (struct t_infolist *infolist,
struct t_irc_server *server)
{
struct t_infolist_item *ptr_item;
if (!infolist || !server)
return 0;
ptr_item = weechat_infolist_new_item (infolist);
if (!ptr_item)
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "name", server->name))
return 0;
if (!weechat_infolist_new_var_pointer (ptr_item, "buffer", server->buffer))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "buffer_name",
(server->buffer) ?
weechat_buffer_get_string (server->buffer, "name") : ""))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "buffer_short_name",
(server->buffer) ?
weechat_buffer_get_string (server->buffer, "short_name") : ""))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "addresses",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_ADDRESSES)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "proxy",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_PROXY)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "ipv6",
IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_IPV6)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "ssl",
IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "ssl_cert",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SSL_CERT)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "ssl_password",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SSL_PASSWORD)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "ssl_priorities",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SSL_PRIORITIES)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "ssl_dhkey_size",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_SSL_DHKEY_SIZE)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "ssl_fingerprint",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SSL_FINGERPRINT)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "ssl_verify",
IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL_VERIFY)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "password",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_PASSWORD)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "capabilities",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_CAPABILITIES)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "sasl_mechanism",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_SASL_MECHANISM)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "sasl_username",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_USERNAME)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "sasl_password",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_PASSWORD)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "sasl_key",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_KEY)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "sasl_fail",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_SASL_FAIL)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "autoconnect",
IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_AUTOCONNECT)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "autoreconnect",
IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_AUTORECONNECT)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "autoreconnect_delay",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_AUTORECONNECT_DELAY)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "nicks",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_NICKS)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "nicks_alternate",
IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_NICKS_ALTERNATE)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "username",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_USERNAME)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "realname",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_REALNAME)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "local_hostname",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_LOCAL_HOSTNAME)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "usermode",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_USERMODE)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "command",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_COMMAND)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "command_delay",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_COMMAND_DELAY)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "autojoin",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_AUTOJOIN)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "autorejoin",
IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_AUTOREJOIN)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "autorejoin_delay",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_AUTOREJOIN_DELAY)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "connection_timeout",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_CONNECTION_TIMEOUT)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "anti_flood_prio_high",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_HIGH)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "anti_flood_prio_low",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_LOW)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "away_check",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_AWAY_CHECK)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "away_check_max_nicks",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_AWAY_CHECK_MAX_NICKS)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "msg_kick",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_MSG_KICK)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "msg_part",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_MSG_PART)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "msg_quit",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_MSG_QUIT)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "temp_server", server->temp_server))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "index_current_address", server->index_current_address))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "current_address", server->current_address))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "current_ip", server->current_ip))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "current_port", server->current_port))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "current_retry", server->current_retry))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "sock", server->sock))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "is_connected", server->is_connected))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "ssl_connected", server->ssl_connected))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "disconnected", server->disconnected))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "unterminated_message", server->unterminated_message))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "nick", server->nick))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "nick_modes", server->nick_modes))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "host", server->host))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "checking_cap_ls", server->checking_cap_ls))
return 0;
if (!weechat_hashtable_add_to_infolist (server->cap_ls, ptr_item, "cap_ls"))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "checking_cap_list", server->checking_cap_list))
return 0;
if (!weechat_hashtable_add_to_infolist (server->cap_list, ptr_item, "cap_list"))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "isupport", server->isupport))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "prefix_modes", server->prefix_modes))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "prefix_chars", server->prefix_chars))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "nick_max_length", server->nick_max_length))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "user_max_length", server->user_max_length))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "host_max_length", server->host_max_length))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "casemapping", server->casemapping))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "casemapping_string", irc_server_casemapping_string[server->casemapping]))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "chantypes", server->chantypes))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "chanmodes", server->chanmodes))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "monitor", server->monitor))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "monitor_time", server->monitor_time))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "reconnect_delay", server->reconnect_delay))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "reconnect_start", server->reconnect_start))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "command_time", server->command_time))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "reconnect_join", server->reconnect_join))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "disable_autojoin", server->disable_autojoin))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "is_away", server->is_away))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "away_message", server->away_message))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "away_time", server->away_time))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "lag", server->lag))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "lag_displayed", server->lag_displayed))
return 0;
if (!weechat_infolist_new_var_buffer (ptr_item, "lag_check_time", &(server->lag_check_time), sizeof (struct timeval)))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "lag_next_check", server->lag_next_check))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "lag_last_refresh", server->lag_last_refresh))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "last_user_message", server->last_user_message))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "last_away_check", server->last_away_check))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "last_data_purge", server->last_data_purge))
return 0;
return 1;
}
/*
* Prints server infos in WeeChat log file (usually for crash dump).
*/
void
irc_server_print_log ()
{
struct t_irc_server *ptr_server;
struct t_irc_channel *ptr_channel;
int i;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
weechat_log_printf ("");
weechat_log_printf ("[server %s (addr:0x%lx)]", ptr_server->name, ptr_server);
/* addresses */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_ADDRESSES]))
weechat_log_printf (" addresses. . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_ADDRESSES));
else
weechat_log_printf (" addresses. . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_ADDRESSES]));
/* proxy */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_PROXY]))
weechat_log_printf (" proxy. . . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_PROXY));
else
weechat_log_printf (" proxy. . . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_PROXY]));
/* ipv6 */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_IPV6]))
weechat_log_printf (" ipv6 . . . . . . . . : null (%s)",
(IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_IPV6)) ?
"on" : "off");
else
weechat_log_printf (" ipv6 . . . . . . . . : %s",
(weechat_config_boolean (ptr_server->options[IRC_SERVER_OPTION_IPV6])) ?
"on" : "off");
/* ssl */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SSL]))
weechat_log_printf (" ssl. . . . . . . . . : null (%s)",
(IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_SSL)) ?
"on" : "off");
else
weechat_log_printf (" ssl. . . . . . . . . : %s",
(weechat_config_boolean (ptr_server->options[IRC_SERVER_OPTION_SSL])) ?
"on" : "off");
/* ssl_cert */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SSL_CERT]))
weechat_log_printf (" ssl_cert . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_SSL_CERT));
else
weechat_log_printf (" ssl_cert . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_SSL_CERT]));
/* ssl_password */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SSL_PASSWORD]))
weechat_log_printf (" ssl_password . . . . : null");
else
weechat_log_printf (" ssl_password . . . . : (hidden)");
/* ssl_priorities */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SSL_PRIORITIES]))
weechat_log_printf (" ssl_priorities . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_SSL_PRIORITIES));
else
weechat_log_printf (" ssl_priorities . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_SSL_PRIORITIES]));
/* ssl_dhkey_size */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SSL_DHKEY_SIZE]))
weechat_log_printf (" ssl_dhkey_size . . . : null ('%d')",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_SSL_DHKEY_SIZE));
else
weechat_log_printf (" ssl_dhkey_size . . . : '%d'",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_SSL_DHKEY_SIZE]));
/* ssl_fingerprint */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SSL_FINGERPRINT]))
weechat_log_printf (" ssl_fingerprint. . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_SSL_FINGERPRINT));
else
weechat_log_printf (" ssl_fingerprint. . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_SSL_FINGERPRINT]));
/* ssl_verify */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SSL_VERIFY]))
weechat_log_printf (" ssl_verify . . . . . : null (%s)",
(IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_SSL_VERIFY)) ?
"on" : "off");
else
weechat_log_printf (" ssl_verify . . . . . : %s",
(weechat_config_boolean (ptr_server->options[IRC_SERVER_OPTION_SSL_VERIFY])) ?
"on" : "off");
/* password */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_PASSWORD]))
weechat_log_printf (" password . . . . . . : null");
else
weechat_log_printf (" password . . . . . . : (hidden)");
/* client capabilities */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_CAPABILITIES]))
weechat_log_printf (" capabilities . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_CAPABILITIES));
else
weechat_log_printf (" capabilities . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_CAPABILITIES]));
/* sasl_mechanism */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SASL_MECHANISM]))
weechat_log_printf (" sasl_mechanism . . . : null ('%s')",
irc_sasl_mechanism_string[IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_SASL_MECHANISM)]);
else
weechat_log_printf (" sasl_mechanism . . . : '%s'",
irc_sasl_mechanism_string[weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_SASL_MECHANISM])]);
/* sasl_username */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SASL_USERNAME]))
weechat_log_printf (" sasl_username. . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_SASL_USERNAME));
else
weechat_log_printf (" sasl_username. . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_SASL_USERNAME]));
/* sasl_password */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SASL_PASSWORD]))
weechat_log_printf (" sasl_password. . . . : null");
else
weechat_log_printf (" sasl_password. . . . : (hidden)");
/* sasl_key */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SASL_KEY]))
weechat_log_printf (" sasl_key. . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_SASL_KEY));
else
weechat_log_printf (" sasl_key. . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_SASL_KEY]));
/* sasl_fail */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SASL_FAIL]))
weechat_log_printf (" sasl_fail. . . . . . : null ('%s')",
irc_server_sasl_fail_string[IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_SASL_FAIL)]);
else
weechat_log_printf (" sasl_fail. . . . . . : '%s'",
irc_server_sasl_fail_string[weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_SASL_FAIL])]);
/* autoconnect */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AUTOCONNECT]))
weechat_log_printf (" autoconnect. . . . . : null (%s)",
(IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_AUTOCONNECT)) ?
"on" : "off");
else
weechat_log_printf (" autoconnect. . . . . : %s",
(weechat_config_boolean (ptr_server->options[IRC_SERVER_OPTION_AUTOCONNECT])) ?
"on" : "off");
/* autoreconnect */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AUTORECONNECT]))
weechat_log_printf (" autoreconnect. . . . : null (%s)",
(IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_AUTORECONNECT)) ?
"on" : "off");
else
weechat_log_printf (" autoreconnect. . . . : %s",
(weechat_config_boolean (ptr_server->options[IRC_SERVER_OPTION_AUTORECONNECT])) ?
"on" : "off");
/* autoreconnect_delay */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AUTORECONNECT_DELAY]))
weechat_log_printf (" autoreconnect_delay. : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_AUTORECONNECT_DELAY));
else
weechat_log_printf (" autoreconnect_delay. : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_AUTORECONNECT_DELAY]));
/* nicks */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_NICKS]))
weechat_log_printf (" nicks. . . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_NICKS));
else
weechat_log_printf (" nicks. . . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_NICKS]));
/* nicks_alternate */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_NICKS_ALTERNATE]))
weechat_log_printf (" nicks_alternate. . . : null (%s)",
(IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_NICKS_ALTERNATE)) ?
"on" : "off");
else
weechat_log_printf (" nicks_alternate. . . : %s",
(weechat_config_boolean (ptr_server->options[IRC_SERVER_OPTION_NICKS_ALTERNATE])) ?
"on" : "off");
/* username */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_USERNAME]))
weechat_log_printf (" username . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_USERNAME));
else
weechat_log_printf (" username . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_USERNAME]));
/* realname */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_REALNAME]))
weechat_log_printf (" realname . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_REALNAME));
else
weechat_log_printf (" realname . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_REALNAME]));
/* local_hostname */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_LOCAL_HOSTNAME]))
weechat_log_printf (" local_hostname . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_LOCAL_HOSTNAME));
else
weechat_log_printf (" local_hostname . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_LOCAL_HOSTNAME]));
/* usermode */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_USERMODE]))
weechat_log_printf (" usermode . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_USERMODE));
else
weechat_log_printf (" usermode . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_USERMODE]));
/* command */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_COMMAND]))
weechat_log_printf (" command. . . . . . . : null");
else
weechat_log_printf (" command. . . . . . . : (hidden)");
/* command_delay */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_COMMAND_DELAY]))
weechat_log_printf (" command_delay. . . . : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_COMMAND_DELAY));
else
weechat_log_printf (" command_delay. . . . : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_COMMAND_DELAY]));
/* autojoin */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AUTOJOIN]))
weechat_log_printf (" autojoin . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_AUTOJOIN));
else
weechat_log_printf (" autojoin . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_AUTOJOIN]));
/* autorejoin */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AUTOREJOIN]))
weechat_log_printf (" autorejoin . . . . . : null (%s)",
(IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_AUTOREJOIN)) ?
"on" : "off");
else
weechat_log_printf (" autorejoin . . . . . : %s",
(weechat_config_boolean (ptr_server->options[IRC_SERVER_OPTION_AUTOREJOIN])) ?
"on" : "off");
/* autorejoin_delay */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AUTOREJOIN_DELAY]))
weechat_log_printf (" autorejoin_delay . . : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_AUTOREJOIN_DELAY));
else
weechat_log_printf (" autorejoin_delay . . : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_AUTOREJOIN_DELAY]));
/* connection_timeout */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_CONNECTION_TIMEOUT]))
weechat_log_printf (" connection_timeout . : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_CONNECTION_TIMEOUT));
else
weechat_log_printf (" connection_timeout . : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_CONNECTION_TIMEOUT]));
/* anti_flood_prio_high */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_HIGH]))
weechat_log_printf (" anti_flood_prio_high : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_HIGH));
else
weechat_log_printf (" anti_flood_prio_high : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_HIGH]));
/* anti_flood_prio_low */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_LOW]))
weechat_log_printf (" anti_flood_prio_low. : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_LOW));
else
weechat_log_printf (" anti_flood_prio_low. : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_LOW]));
/* away_check */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AWAY_CHECK]))
weechat_log_printf (" away_check . . . . . : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_AWAY_CHECK));
else
weechat_log_printf (" away_check . . . . . : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_AWAY_CHECK]));
/* away_check_max_nicks */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AWAY_CHECK_MAX_NICKS]))
weechat_log_printf (" away_check_max_nicks : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_AWAY_CHECK_MAX_NICKS));
else
weechat_log_printf (" away_check_max_nicks : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_AWAY_CHECK_MAX_NICKS]));
/* msg_kick */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_MSG_KICK]))
weechat_log_printf (" msg_kick . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_MSG_KICK));
else
weechat_log_printf (" msg_kick . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_MSG_KICK]));
/* msg_part */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_MSG_PART]))
weechat_log_printf (" msg_part . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_MSG_PART));
else
weechat_log_printf (" msg_part . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_MSG_PART]));
/* msg_quit */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_MSG_QUIT]))
weechat_log_printf (" msg_quit . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_MSG_QUIT));
else
weechat_log_printf (" msg_quit . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_MSG_QUIT]));
/* other server variables */
weechat_log_printf (" temp_server. . . . . : %d", ptr_server->temp_server);
weechat_log_printf (" reloading_from_config: %d", ptr_server->reloaded_from_config);
weechat_log_printf (" reloaded_from_config : %d", ptr_server->reloaded_from_config);
weechat_log_printf (" addresses_eval . . . : '%s'", ptr_server->addresses_eval);
weechat_log_printf (" addresses_count. . . : %d", ptr_server->addresses_count);
weechat_log_printf (" addresses_array. . . : 0x%lx", ptr_server->addresses_array);
weechat_log_printf (" ports_array. . . . . : 0x%lx", ptr_server->ports_array);
weechat_log_printf (" retry_array. . . . . : 0x%lx", ptr_server->retry_array);
weechat_log_printf (" index_current_address: %d", ptr_server->index_current_address);
weechat_log_printf (" current_address. . . : '%s'", ptr_server->current_address);
weechat_log_printf (" current_ip . . . . . : '%s'", ptr_server->current_ip);
weechat_log_printf (" current_port . . . . : %d", ptr_server->current_port);
weechat_log_printf (" current_retry. . . . : %d", ptr_server->current_retry);
weechat_log_printf (" sock . . . . . . . . : %d", ptr_server->sock);
weechat_log_printf (" hook_connect . . . . : 0x%lx", ptr_server->hook_connect);
weechat_log_printf (" hook_fd. . . . . . . : 0x%lx", ptr_server->hook_fd);
weechat_log_printf (" hook_timer_connection: 0x%lx", ptr_server->hook_timer_connection);
weechat_log_printf (" hook_timer_sasl. . . : 0x%lx", ptr_server->hook_timer_sasl);
weechat_log_printf (" is_connected . . . . : %d", ptr_server->is_connected);
weechat_log_printf (" ssl_connected. . . . : %d", ptr_server->ssl_connected);
weechat_log_printf (" disconnected . . . . : %d", ptr_server->disconnected);
#ifdef HAVE_GNUTLS
weechat_log_printf (" gnutls_sess. . . . . : 0x%lx", ptr_server->gnutls_sess);
#endif /* HAVE_GNUTLS */
weechat_log_printf (" unterminated_message : '%s'", ptr_server->unterminated_message);
weechat_log_printf (" nicks_count. . . . . : %d", ptr_server->nicks_count);
weechat_log_printf (" nicks_array. . . . . : 0x%lx", ptr_server->nicks_array);
weechat_log_printf (" nick_first_tried . . : %d", ptr_server->nick_first_tried);
weechat_log_printf (" nick_alternate_number: %d", ptr_server->nick_alternate_number);
weechat_log_printf (" nick . . . . . . . . : '%s'", ptr_server->nick);
weechat_log_printf (" nick_modes . . . . . : '%s'", ptr_server->nick_modes);
weechat_log_printf (" host . . . . . . . . : '%s'", ptr_server->host);
weechat_log_printf (" checking_cap_ls. . . : %d", ptr_server->checking_cap_ls);
weechat_log_printf (" cap_ls . . . . . . . : 0x%lx (hashtable: '%s')",
ptr_server->cap_ls,
weechat_hashtable_get_string (ptr_server->cap_ls, "keys_values"));
weechat_log_printf (" checking_cap_list. . : %d", ptr_server->checking_cap_list);
weechat_log_printf (" cap_list . . . . . . : 0x%lx (hashtable: '%s')",
ptr_server->cap_list,
weechat_hashtable_get_string (ptr_server->cap_list, "keys_values"));
weechat_log_printf (" isupport . . . . . . : '%s'", ptr_server->isupport);
weechat_log_printf (" prefix_modes . . . . : '%s'", ptr_server->prefix_modes);
weechat_log_printf (" prefix_chars . . . . : '%s'", ptr_server->prefix_chars);
weechat_log_printf (" nick_max_length. . . : %d", ptr_server->nick_max_length);
weechat_log_printf (" user_max_length. . . : %d", ptr_server->user_max_length);
weechat_log_printf (" host_max_length. . . : %d", ptr_server->host_max_length);
weechat_log_printf (" casemapping. . . . . : %d (%s)",
ptr_server->casemapping,
irc_server_casemapping_string[ptr_server->casemapping]);
weechat_log_printf (" chantypes. . . . . . : '%s'", ptr_server->chantypes);
weechat_log_printf (" chanmodes. . . . . . : '%s'", ptr_server->chanmodes);
weechat_log_printf (" monitor. . . . . . . : %d", ptr_server->monitor);
weechat_log_printf (" monitor_time . . . . : %lld", (long long)ptr_server->monitor_time);
weechat_log_printf (" reconnect_delay. . . : %d", ptr_server->reconnect_delay);
weechat_log_printf (" reconnect_start. . . : %lld", (long long)ptr_server->reconnect_start);
weechat_log_printf (" command_time . . . . : %lld", (long long)ptr_server->command_time);
weechat_log_printf (" reconnect_join . . . : %d", ptr_server->reconnect_join);
weechat_log_printf (" disable_autojoin . . : %d", ptr_server->disable_autojoin);
weechat_log_printf (" is_away. . . . . . . : %d", ptr_server->is_away);
weechat_log_printf (" away_message . . . . : '%s'", ptr_server->away_message);
weechat_log_printf (" away_time. . . . . . : %lld", (long long)ptr_server->away_time);
weechat_log_printf (" lag. . . . . . . . . : %d", ptr_server->lag);
weechat_log_printf (" lag_displayed. . . . : %d", ptr_server->lag_displayed);
weechat_log_printf (" lag_check_time . . . : tv_sec:%d, tv_usec:%d",
ptr_server->lag_check_time.tv_sec,
ptr_server->lag_check_time.tv_usec);
weechat_log_printf (" lag_next_check . . . : %lld", (long long)ptr_server->lag_next_check);
weechat_log_printf (" lag_last_refresh . . : %lld", (long long)ptr_server->lag_last_refresh);
weechat_log_printf (" cmd_list_regexp. . . : 0x%lx", ptr_server->cmd_list_regexp);
weechat_log_printf (" last_user_message. . : %lld", (long long)ptr_server->last_user_message);
weechat_log_printf (" last_away_check. . . : %lld", (long long)ptr_server->last_away_check);
weechat_log_printf (" last_data_purge. . . : %lld", (long long)ptr_server->last_data_purge);
for (i = 0; i < IRC_SERVER_NUM_OUTQUEUES_PRIO; i++)
{
weechat_log_printf (" outqueue[%02d] . . . . : 0x%lx", i, ptr_server->outqueue[i]);
weechat_log_printf (" last_outqueue[%02d]. . : 0x%lx", i, ptr_server->last_outqueue[i]);
}
weechat_log_printf (" redirects. . . . . . : 0x%lx", ptr_server->redirects);
weechat_log_printf (" last_redirect. . . . : 0x%lx", ptr_server->last_redirect);
weechat_log_printf (" notify_list. . . . . : 0x%lx", ptr_server->notify_list);
weechat_log_printf (" last_notify. . . . . : 0x%lx", ptr_server->last_notify);
weechat_log_printf (" notify_count . . . . : %d", ptr_server->notify_count);
weechat_log_printf (" join_manual. . . . . : 0x%lx (hashtable: '%s')",
ptr_server->join_manual,
weechat_hashtable_get_string (ptr_server->join_manual, "keys_values"));
weechat_log_printf (" join_channel_key . . : 0x%lx (hashtable: '%s')",
ptr_server->join_channel_key,
weechat_hashtable_get_string (ptr_server->join_channel_key, "keys_values"));
weechat_log_printf (" join_noswitch. . . . : 0x%lx (hashtable: '%s')",
ptr_server->join_noswitch,
weechat_hashtable_get_string (ptr_server->join_noswitch, "keys_values"));
weechat_log_printf (" buffer . . . . . . . : 0x%lx", ptr_server->buffer);
weechat_log_printf (" buffer_as_string . . : 0x%lx", ptr_server->buffer_as_string);
weechat_log_printf (" channels . . . . . . : 0x%lx", ptr_server->channels);
weechat_log_printf (" last_channel . . . . : 0x%lx", ptr_server->last_channel);
weechat_log_printf (" prev_server. . . . . : 0x%lx", ptr_server->prev_server);
weechat_log_printf (" next_server. . . . . : 0x%lx", ptr_server->next_server);
irc_redirect_print_log (ptr_server);
irc_notify_print_log (ptr_server);
for (ptr_channel = ptr_server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
irc_channel_print_log (ptr_channel);
}
}
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_4697_3 |
crossvul-cpp_data_good_999_1 | /*
* Marvell Wireless LAN device driver: AP specific command handling
*
* Copyright (C) 2012-2014, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "main.h"
#include "11ac.h"
#include "11n.h"
/* This function parses security related parameters from cfg80211_ap_settings
* and sets into FW understandable bss_config structure.
*/
int mwifiex_set_secure_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_config,
struct cfg80211_ap_settings *params) {
int i;
struct mwifiex_wep_key wep_key;
if (!params->privacy) {
bss_config->protocol = PROTOCOL_NO_SECURITY;
bss_config->key_mgmt = KEY_MGMT_NONE;
bss_config->wpa_cfg.length = 0;
priv->sec_info.wep_enabled = 0;
priv->sec_info.wpa_enabled = 0;
priv->sec_info.wpa2_enabled = 0;
return 0;
}
switch (params->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
bss_config->auth_mode = WLAN_AUTH_OPEN;
break;
case NL80211_AUTHTYPE_SHARED_KEY:
bss_config->auth_mode = WLAN_AUTH_SHARED_KEY;
break;
case NL80211_AUTHTYPE_NETWORK_EAP:
bss_config->auth_mode = WLAN_AUTH_LEAP;
break;
default:
bss_config->auth_mode = MWIFIEX_AUTH_MODE_AUTO;
break;
}
bss_config->key_mgmt_operation |= KEY_MGMT_ON_HOST;
for (i = 0; i < params->crypto.n_akm_suites; i++) {
switch (params->crypto.akm_suites[i]) {
case WLAN_AKM_SUITE_8021X:
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_1) {
bss_config->protocol = PROTOCOL_WPA;
bss_config->key_mgmt = KEY_MGMT_EAP;
}
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_2) {
bss_config->protocol |= PROTOCOL_WPA2;
bss_config->key_mgmt = KEY_MGMT_EAP;
}
break;
case WLAN_AKM_SUITE_PSK:
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_1) {
bss_config->protocol = PROTOCOL_WPA;
bss_config->key_mgmt = KEY_MGMT_PSK;
}
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_2) {
bss_config->protocol |= PROTOCOL_WPA2;
bss_config->key_mgmt = KEY_MGMT_PSK;
}
break;
default:
break;
}
}
for (i = 0; i < params->crypto.n_ciphers_pairwise; i++) {
switch (params->crypto.ciphers_pairwise[i]) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
break;
case WLAN_CIPHER_SUITE_TKIP:
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
bss_config->wpa_cfg.pairwise_cipher_wpa |=
CIPHER_TKIP;
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
CIPHER_TKIP;
break;
case WLAN_CIPHER_SUITE_CCMP:
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
bss_config->wpa_cfg.pairwise_cipher_wpa |=
CIPHER_AES_CCMP;
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
CIPHER_AES_CCMP;
default:
break;
}
}
switch (params->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
if (priv->sec_info.wep_enabled) {
bss_config->protocol = PROTOCOL_STATIC_WEP;
bss_config->key_mgmt = KEY_MGMT_NONE;
bss_config->wpa_cfg.length = 0;
for (i = 0; i < NUM_WEP_KEYS; i++) {
wep_key = priv->wep_key[i];
bss_config->wep_cfg[i].key_index = i;
if (priv->wep_key_curr_index == i)
bss_config->wep_cfg[i].is_default = 1;
else
bss_config->wep_cfg[i].is_default = 0;
bss_config->wep_cfg[i].length =
wep_key.key_length;
memcpy(&bss_config->wep_cfg[i].key,
&wep_key.key_material,
wep_key.key_length);
}
}
break;
case WLAN_CIPHER_SUITE_TKIP:
bss_config->wpa_cfg.group_cipher = CIPHER_TKIP;
break;
case WLAN_CIPHER_SUITE_CCMP:
bss_config->wpa_cfg.group_cipher = CIPHER_AES_CCMP;
break;
default:
break;
}
return 0;
}
/* This function updates 11n related parameters from IE and sets them into
* bss_config structure.
*/
void
mwifiex_set_ht_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *ht_ie;
if (!ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
return;
ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
params->beacon.tail_len);
if (ht_ie) {
memcpy(&bss_cfg->ht_cap, ht_ie + 2,
sizeof(struct ieee80211_ht_cap));
priv->ap_11n_enabled = 1;
} else {
memset(&bss_cfg->ht_cap, 0, sizeof(struct ieee80211_ht_cap));
bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
bss_cfg->ht_cap.ampdu_params_info = MWIFIEX_DEF_AMPDU;
}
return;
}
/* This function updates 11ac related parameters from IE
* and sets them into bss_config structure.
*/
void mwifiex_set_vht_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *vht_ie;
vht_ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, params->beacon.tail,
params->beacon.tail_len);
if (vht_ie) {
memcpy(&bss_cfg->vht_cap, vht_ie + 2,
sizeof(struct ieee80211_vht_cap));
priv->ap_11ac_enabled = 1;
} else {
priv->ap_11ac_enabled = 0;
}
return;
}
/* This function updates 11ac related parameters from IE
* and sets them into bss_config structure.
*/
void mwifiex_set_tpc_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *tpc_ie;
tpc_ie = cfg80211_find_ie(WLAN_EID_TPC_REQUEST, params->beacon.tail,
params->beacon.tail_len);
if (tpc_ie)
bss_cfg->power_constraint = *(tpc_ie + 2);
else
bss_cfg->power_constraint = 0;
}
/* Enable VHT only when cfg80211_ap_settings has VHT IE.
* Otherwise disable VHT.
*/
void mwifiex_set_vht_width(struct mwifiex_private *priv,
enum nl80211_chan_width width,
bool ap_11ac_enable)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_11ac_vht_cfg vht_cfg;
vht_cfg.band_config = VHT_CFG_5GHZ;
vht_cfg.cap_info = adapter->hw_dot_11ac_dev_cap;
if (!ap_11ac_enable) {
vht_cfg.mcs_tx_set = DISABLE_VHT_MCS_SET;
vht_cfg.mcs_rx_set = DISABLE_VHT_MCS_SET;
} else {
vht_cfg.mcs_tx_set = DEFAULT_VHT_MCS_SET;
vht_cfg.mcs_rx_set = DEFAULT_VHT_MCS_SET;
}
vht_cfg.misc_config = VHT_CAP_UAP_ONLY;
if (ap_11ac_enable && width >= NL80211_CHAN_WIDTH_80)
vht_cfg.misc_config |= VHT_BW_80_160_80P80;
mwifiex_send_cmd(priv, HostCmd_CMD_11AC_CFG,
HostCmd_ACT_GEN_SET, 0, &vht_cfg, true);
return;
}
/* This function finds supported rates IE from beacon parameter and sets
* these rates into bss_config structure.
*/
void
mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
struct ieee_types_header *rate_ie;
int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
const u8 *var_pos = params->beacon.head + var_offset;
int len = params->beacon.head_len - var_offset;
u8 rate_len = 0;
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
if (rate_ie) {
if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
return;
memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
rate_len = rate_ie->len;
}
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
params->beacon.tail,
params->beacon.tail_len);
if (rate_ie) {
if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
return;
memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
}
return;
}
/* This function initializes some of mwifiex_uap_bss_param variables.
* This helps FW in ignoring invalid values. These values may or may not
* be get updated to valid ones at later stage.
*/
void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config)
{
config->bcast_ssid_ctl = 0x7F;
config->radio_ctl = 0x7F;
config->dtim_period = 0x7F;
config->beacon_period = 0x7FFF;
config->auth_mode = 0x7F;
config->rts_threshold = 0x7FFF;
config->frag_threshold = 0x7FFF;
config->retry_limit = 0x7F;
config->qos_info = 0xFF;
}
/* This function parses BSS related parameters from structure
* and prepares TLVs specific to WPA/WPA2 security.
* These TLVs are appended to command buffer.
*/
static void
mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
{
struct host_cmd_tlv_pwk_cipher *pwk_cipher;
struct host_cmd_tlv_gwk_cipher *gwk_cipher;
struct host_cmd_tlv_passphrase *passphrase;
struct host_cmd_tlv_akmp *tlv_akmp;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
u16 cmd_size = *param_size;
u8 *tlv = *tlv_buf;
tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
tlv_akmp->header.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
tlv_akmp->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
sizeof(struct mwifiex_ie_types_header));
tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation);
tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
cmd_size += sizeof(struct host_cmd_tlv_akmp);
tlv += sizeof(struct host_cmd_tlv_akmp);
if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) {
pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
pwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
sizeof(struct mwifiex_ie_types_header));
pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa;
cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
}
if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) {
pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
pwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
sizeof(struct mwifiex_ie_types_header));
pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
}
if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
gwk_cipher->header.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
gwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) -
sizeof(struct mwifiex_ie_types_header));
gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
}
if (bss_cfg->wpa_cfg.length) {
passphrase = (struct host_cmd_tlv_passphrase *)tlv;
passphrase->header.type =
cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
passphrase->header.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase,
bss_cfg->wpa_cfg.length);
cmd_size += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->wpa_cfg.length;
tlv += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->wpa_cfg.length;
}
*param_size = cmd_size;
*tlv_buf = tlv;
return;
}
/* This function parses WMM related parameters from cfg80211_ap_settings
* structure and updates bss_config structure.
*/
void
mwifiex_set_wmm_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *vendor_ie;
const u8 *wmm_ie;
u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02};
vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
params->beacon.tail,
params->beacon.tail_len);
if (vendor_ie) {
wmm_ie = vendor_ie;
if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
return;
memcpy(&bss_cfg->wmm_info, wmm_ie +
sizeof(struct ieee_types_header), *(wmm_ie + 1));
priv->wmm_enabled = 1;
} else {
memset(&bss_cfg->wmm_info, 0, sizeof(bss_cfg->wmm_info));
memcpy(&bss_cfg->wmm_info.oui, wmm_oui, sizeof(wmm_oui));
bss_cfg->wmm_info.subtype = MWIFIEX_WMM_SUBTYPE;
bss_cfg->wmm_info.version = MWIFIEX_WMM_VERSION;
priv->wmm_enabled = 0;
}
bss_cfg->qos_info = 0x00;
return;
}
/* This function parses BSS related parameters from structure
* and prepares TLVs specific to WEP encryption.
* These TLVs are appended to command buffer.
*/
static void
mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
{
struct host_cmd_tlv_wep_key *wep_key;
u16 cmd_size = *param_size;
int i;
u8 *tlv = *tlv_buf;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
for (i = 0; i < NUM_WEP_KEYS; i++) {
if (bss_cfg->wep_cfg[i].length &&
(bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 ||
bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) {
wep_key = (struct host_cmd_tlv_wep_key *)tlv;
wep_key->header.type =
cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
wep_key->header.len =
cpu_to_le16(bss_cfg->wep_cfg[i].length + 2);
wep_key->key_index = bss_cfg->wep_cfg[i].key_index;
wep_key->is_default = bss_cfg->wep_cfg[i].is_default;
memcpy(wep_key->key, bss_cfg->wep_cfg[i].key,
bss_cfg->wep_cfg[i].length);
cmd_size += sizeof(struct mwifiex_ie_types_header) + 2 +
bss_cfg->wep_cfg[i].length;
tlv += sizeof(struct mwifiex_ie_types_header) + 2 +
bss_cfg->wep_cfg[i].length;
}
}
*param_size = cmd_size;
*tlv_buf = tlv;
return;
}
/* This function enable 11D if userspace set the country IE.
*/
void mwifiex_config_uap_11d(struct mwifiex_private *priv,
struct cfg80211_beacon_data *beacon_data)
{
enum state_11d_t state_11d;
const u8 *country_ie;
country_ie = cfg80211_find_ie(WLAN_EID_COUNTRY, beacon_data->tail,
beacon_data->tail_len);
if (country_ie) {
/* Send cmd to FW to enable 11D function */
state_11d = ENABLE_11D;
if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
HostCmd_ACT_GEN_SET, DOT11D_I,
&state_11d, true)) {
mwifiex_dbg(priv->adapter, ERROR,
"11D: failed to enable 11D\n");
}
}
}
/* This function parses BSS related parameters from structure
* and prepares TLVs. These TLVs are appended to command buffer.
*/
static int
mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
{
struct host_cmd_tlv_dtim_period *dtim_period;
struct host_cmd_tlv_beacon_period *beacon_period;
struct host_cmd_tlv_ssid *ssid;
struct host_cmd_tlv_bcast_ssid *bcast_ssid;
struct host_cmd_tlv_channel_band *chan_band;
struct host_cmd_tlv_frag_threshold *frag_threshold;
struct host_cmd_tlv_rts_threshold *rts_threshold;
struct host_cmd_tlv_retry_limit *retry_limit;
struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
struct host_cmd_tlv_auth_type *auth_type;
struct host_cmd_tlv_rates *tlv_rates;
struct host_cmd_tlv_ageout_timer *ao_timer, *ps_ao_timer;
struct host_cmd_tlv_power_constraint *pwr_ct;
struct mwifiex_ie_types_htcap *htcap;
struct mwifiex_ie_types_wmmcap *wmm_cap;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
int i;
u16 cmd_size = *param_size;
if (bss_cfg->ssid.ssid_len) {
ssid = (struct host_cmd_tlv_ssid *)tlv;
ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
ssid->header.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len);
memcpy(ssid->ssid, bss_cfg->ssid.ssid, bss_cfg->ssid.ssid_len);
cmd_size += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->ssid.ssid_len;
tlv += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->ssid.ssid_len;
bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
bcast_ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
bcast_ssid->header.len =
cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
}
if (bss_cfg->rates[0]) {
tlv_rates = (struct host_cmd_tlv_rates *)tlv;
tlv_rates->header.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i];
i++)
tlv_rates->rates[i] = bss_cfg->rates[i];
tlv_rates->header.len = cpu_to_le16(i);
cmd_size += sizeof(struct host_cmd_tlv_rates) + i;
tlv += sizeof(struct host_cmd_tlv_rates) + i;
}
if (bss_cfg->channel &&
(((bss_cfg->band_cfg & BIT(0)) == BAND_CONFIG_BG &&
bss_cfg->channel <= MAX_CHANNEL_BAND_BG) ||
((bss_cfg->band_cfg & BIT(0)) == BAND_CONFIG_A &&
bss_cfg->channel <= MAX_CHANNEL_BAND_A))) {
chan_band = (struct host_cmd_tlv_channel_band *)tlv;
chan_band->header.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
chan_band->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_channel_band) -
sizeof(struct mwifiex_ie_types_header));
chan_band->band_config = bss_cfg->band_cfg;
chan_band->channel = bss_cfg->channel;
cmd_size += sizeof(struct host_cmd_tlv_channel_band);
tlv += sizeof(struct host_cmd_tlv_channel_band);
}
if (bss_cfg->beacon_period >= MIN_BEACON_PERIOD &&
bss_cfg->beacon_period <= MAX_BEACON_PERIOD) {
beacon_period = (struct host_cmd_tlv_beacon_period *)tlv;
beacon_period->header.type =
cpu_to_le16(TLV_TYPE_UAP_BEACON_PERIOD);
beacon_period->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_beacon_period) -
sizeof(struct mwifiex_ie_types_header));
beacon_period->period = cpu_to_le16(bss_cfg->beacon_period);
cmd_size += sizeof(struct host_cmd_tlv_beacon_period);
tlv += sizeof(struct host_cmd_tlv_beacon_period);
}
if (bss_cfg->dtim_period >= MIN_DTIM_PERIOD &&
bss_cfg->dtim_period <= MAX_DTIM_PERIOD) {
dtim_period = (struct host_cmd_tlv_dtim_period *)tlv;
dtim_period->header.type =
cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD);
dtim_period->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_dtim_period) -
sizeof(struct mwifiex_ie_types_header));
dtim_period->period = bss_cfg->dtim_period;
cmd_size += sizeof(struct host_cmd_tlv_dtim_period);
tlv += sizeof(struct host_cmd_tlv_dtim_period);
}
if (bss_cfg->rts_threshold <= MWIFIEX_RTS_MAX_VALUE) {
rts_threshold = (struct host_cmd_tlv_rts_threshold *)tlv;
rts_threshold->header.type =
cpu_to_le16(TLV_TYPE_UAP_RTS_THRESHOLD);
rts_threshold->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_rts_threshold) -
sizeof(struct mwifiex_ie_types_header));
rts_threshold->rts_thr = cpu_to_le16(bss_cfg->rts_threshold);
cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
tlv += sizeof(struct host_cmd_tlv_frag_threshold);
}
if ((bss_cfg->frag_threshold >= MWIFIEX_FRAG_MIN_VALUE) &&
(bss_cfg->frag_threshold <= MWIFIEX_FRAG_MAX_VALUE)) {
frag_threshold = (struct host_cmd_tlv_frag_threshold *)tlv;
frag_threshold->header.type =
cpu_to_le16(TLV_TYPE_UAP_FRAG_THRESHOLD);
frag_threshold->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_frag_threshold) -
sizeof(struct mwifiex_ie_types_header));
frag_threshold->frag_thr = cpu_to_le16(bss_cfg->frag_threshold);
cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
tlv += sizeof(struct host_cmd_tlv_frag_threshold);
}
if (bss_cfg->retry_limit <= MWIFIEX_RETRY_LIMIT) {
retry_limit = (struct host_cmd_tlv_retry_limit *)tlv;
retry_limit->header.type =
cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT);
retry_limit->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_retry_limit) -
sizeof(struct mwifiex_ie_types_header));
retry_limit->limit = (u8)bss_cfg->retry_limit;
cmd_size += sizeof(struct host_cmd_tlv_retry_limit);
tlv += sizeof(struct host_cmd_tlv_retry_limit);
}
if ((bss_cfg->protocol & PROTOCOL_WPA) ||
(bss_cfg->protocol & PROTOCOL_WPA2) ||
(bss_cfg->protocol & PROTOCOL_EAP))
mwifiex_uap_bss_wpa(&tlv, cmd_buf, &cmd_size);
else
mwifiex_uap_bss_wep(&tlv, cmd_buf, &cmd_size);
if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) ||
(bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) {
auth_type = (struct host_cmd_tlv_auth_type *)tlv;
auth_type->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
auth_type->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_auth_type) -
sizeof(struct mwifiex_ie_types_header));
auth_type->auth_type = (u8)bss_cfg->auth_mode;
cmd_size += sizeof(struct host_cmd_tlv_auth_type);
tlv += sizeof(struct host_cmd_tlv_auth_type);
}
if (bss_cfg->protocol) {
encrypt_protocol = (struct host_cmd_tlv_encrypt_protocol *)tlv;
encrypt_protocol->header.type =
cpu_to_le16(TLV_TYPE_UAP_ENCRY_PROTOCOL);
encrypt_protocol->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_encrypt_protocol)
- sizeof(struct mwifiex_ie_types_header));
encrypt_protocol->proto = cpu_to_le16(bss_cfg->protocol);
cmd_size += sizeof(struct host_cmd_tlv_encrypt_protocol);
tlv += sizeof(struct host_cmd_tlv_encrypt_protocol);
}
if (bss_cfg->ht_cap.cap_info) {
htcap = (struct mwifiex_ie_types_htcap *)tlv;
htcap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
htcap->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
htcap->ht_cap.cap_info = bss_cfg->ht_cap.cap_info;
htcap->ht_cap.ampdu_params_info =
bss_cfg->ht_cap.ampdu_params_info;
memcpy(&htcap->ht_cap.mcs, &bss_cfg->ht_cap.mcs,
sizeof(struct ieee80211_mcs_info));
htcap->ht_cap.extended_ht_cap_info =
bss_cfg->ht_cap.extended_ht_cap_info;
htcap->ht_cap.tx_BF_cap_info = bss_cfg->ht_cap.tx_BF_cap_info;
htcap->ht_cap.antenna_selection_info =
bss_cfg->ht_cap.antenna_selection_info;
cmd_size += sizeof(struct mwifiex_ie_types_htcap);
tlv += sizeof(struct mwifiex_ie_types_htcap);
}
if (bss_cfg->wmm_info.qos_info != 0xFF) {
wmm_cap = (struct mwifiex_ie_types_wmmcap *)tlv;
wmm_cap->header.type = cpu_to_le16(WLAN_EID_VENDOR_SPECIFIC);
wmm_cap->header.len = cpu_to_le16(sizeof(wmm_cap->wmm_info));
memcpy(&wmm_cap->wmm_info, &bss_cfg->wmm_info,
sizeof(wmm_cap->wmm_info));
cmd_size += sizeof(struct mwifiex_ie_types_wmmcap);
tlv += sizeof(struct mwifiex_ie_types_wmmcap);
}
if (bss_cfg->sta_ao_timer) {
ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
ao_timer->header.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
ao_timer->header.len = cpu_to_le16(sizeof(*ao_timer) -
sizeof(struct mwifiex_ie_types_header));
ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer);
cmd_size += sizeof(*ao_timer);
tlv += sizeof(*ao_timer);
}
if (bss_cfg->power_constraint) {
pwr_ct = (void *)tlv;
pwr_ct->header.type = cpu_to_le16(TLV_TYPE_PWR_CONSTRAINT);
pwr_ct->header.len = cpu_to_le16(sizeof(u8));
pwr_ct->constraint = bss_cfg->power_constraint;
cmd_size += sizeof(*pwr_ct);
tlv += sizeof(*pwr_ct);
}
if (bss_cfg->ps_sta_ao_timer) {
ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
ps_ao_timer->header.type =
cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
ps_ao_timer->header.len = cpu_to_le16(sizeof(*ps_ao_timer) -
sizeof(struct mwifiex_ie_types_header));
ps_ao_timer->sta_ao_timer =
cpu_to_le32(bss_cfg->ps_sta_ao_timer);
cmd_size += sizeof(*ps_ao_timer);
tlv += sizeof(*ps_ao_timer);
}
*param_size = cmd_size;
return 0;
}
/* This function parses custom IEs from IE list and prepares command buffer */
static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size)
{
struct mwifiex_ie_list *ap_ie = cmd_buf;
struct mwifiex_ie_types_header *tlv_ie = (void *)tlv;
if (!ap_ie || !ap_ie->len)
return -1;
*ie_size += le16_to_cpu(ap_ie->len) +
sizeof(struct mwifiex_ie_types_header);
tlv_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
tlv_ie->len = ap_ie->len;
tlv += sizeof(struct mwifiex_ie_types_header);
memcpy(tlv, ap_ie->ie_list, le16_to_cpu(ap_ie->len));
return 0;
}
/* Parse AP config structure and prepare TLV based command structure
* to be sent to FW for uAP configuration
*/
static int
mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action,
u32 type, void *cmd_buf)
{
u8 *tlv;
u16 cmd_size, param_size, ie_size;
struct host_cmd_ds_sys_config *sys_cfg;
cmd->command = cpu_to_le16(HostCmd_CMD_UAP_SYS_CONFIG);
cmd_size = (u16)(sizeof(struct host_cmd_ds_sys_config) + S_DS_GEN);
sys_cfg = (struct host_cmd_ds_sys_config *)&cmd->params.uap_sys_config;
sys_cfg->action = cpu_to_le16(cmd_action);
tlv = sys_cfg->tlv;
switch (type) {
case UAP_BSS_PARAMS_I:
param_size = cmd_size;
if (mwifiex_uap_bss_param_prepare(tlv, cmd_buf, ¶m_size))
return -1;
cmd->size = cpu_to_le16(param_size);
break;
case UAP_CUSTOM_IE_I:
ie_size = cmd_size;
if (mwifiex_uap_custom_ie_prepare(tlv, cmd_buf, &ie_size))
return -1;
cmd->size = cpu_to_le16(ie_size);
break;
default:
return -1;
}
return 0;
}
/* This function prepares AP specific deauth command with mac supplied in
* function parameter.
*/
static int mwifiex_cmd_uap_sta_deauth(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd, u8 *mac)
{
struct host_cmd_ds_sta_deauth *sta_deauth = &cmd->params.sta_deauth;
cmd->command = cpu_to_le16(HostCmd_CMD_UAP_STA_DEAUTH);
memcpy(sta_deauth->mac, mac, ETH_ALEN);
sta_deauth->reason = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING);
cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_sta_deauth) +
S_DS_GEN);
return 0;
}
/* This function prepares the AP specific commands before sending them
* to the firmware.
* This is a generic function which calls specific command preparation
* routines based upon the command number.
*/
int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
u16 cmd_action, u32 type,
void *data_buf, void *cmd_buf)
{
struct host_cmd_ds_command *cmd = cmd_buf;
switch (cmd_no) {
case HostCmd_CMD_UAP_SYS_CONFIG:
if (mwifiex_cmd_uap_sys_config(cmd, cmd_action, type, data_buf))
return -1;
break;
case HostCmd_CMD_UAP_BSS_START:
case HostCmd_CMD_UAP_BSS_STOP:
case HOST_CMD_APCMD_SYS_RESET:
case HOST_CMD_APCMD_STA_LIST:
cmd->command = cpu_to_le16(cmd_no);
cmd->size = cpu_to_le16(S_DS_GEN);
break;
case HostCmd_CMD_UAP_STA_DEAUTH:
if (mwifiex_cmd_uap_sta_deauth(priv, cmd, data_buf))
return -1;
break;
case HostCmd_CMD_CHAN_REPORT_REQUEST:
if (mwifiex_cmd_issue_chan_report_request(priv, cmd_buf,
data_buf))
return -1;
break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd %#x\n", cmd_no);
return -1;
}
return 0;
}
void mwifiex_uap_set_channel(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_chan_def chandef)
{
u8 config_bands = 0, old_bands = priv->adapter->config_bands;
priv->bss_chandef = chandef;
bss_cfg->channel = ieee80211_frequency_to_channel(
chandef.chan->center_freq);
/* Set appropriate bands */
if (chandef.chan->band == NL80211_BAND_2GHZ) {
bss_cfg->band_cfg = BAND_CONFIG_BG;
config_bands = BAND_B | BAND_G;
if (chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
config_bands |= BAND_GN;
} else {
bss_cfg->band_cfg = BAND_CONFIG_A;
config_bands = BAND_A;
if (chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
config_bands |= BAND_AN;
if (chandef.width > NL80211_CHAN_WIDTH_40)
config_bands |= BAND_AAC;
}
switch (chandef.width) {
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
break;
case NL80211_CHAN_WIDTH_40:
if (chandef.center_freq1 < chandef.chan->center_freq)
bss_cfg->band_cfg |= MWIFIEX_SEC_CHAN_BELOW;
else
bss_cfg->band_cfg |= MWIFIEX_SEC_CHAN_ABOVE;
break;
case NL80211_CHAN_WIDTH_80:
case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_160:
bss_cfg->band_cfg |=
mwifiex_get_sec_chan_offset(bss_cfg->channel) << 4;
break;
default:
mwifiex_dbg(priv->adapter,
WARN, "Unknown channel width: %d\n",
chandef.width);
break;
}
priv->adapter->config_bands = config_bands;
if (old_bands != config_bands) {
mwifiex_send_domain_info_cmd_fw(priv->adapter->wiphy);
mwifiex_dnld_txpwr_table(priv);
}
}
int mwifiex_config_start_uap(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg)
{
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
HostCmd_ACT_GEN_SET,
UAP_BSS_PARAMS_I, bss_cfg, true)) {
mwifiex_dbg(priv->adapter, ERROR,
"Failed to set AP configuration\n");
return -1;
}
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
HostCmd_ACT_GEN_SET, 0, NULL, true)) {
mwifiex_dbg(priv->adapter, ERROR,
"Failed to start the BSS\n");
return -1;
}
if (priv->sec_info.wep_enabled)
priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
else
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
HostCmd_ACT_GEN_SET, 0,
&priv->curr_pkt_filter, true))
return -1;
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_999_1 |
crossvul-cpp_data_good_4697_3 | /*
* irc-server.c - I/O communication with IRC servers
*
* Copyright (C) 2003-2020 Sébastien Helleu <flashcode@flashtux.org>
* Copyright (C) 2005-2010 Emmanuel Bouthenot <kolter@openics.org>
* Copyright (C) 2012 Simon Arlott
*
* This file is part of WeeChat, the extensible chat client.
*
* WeeChat is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* WeeChat is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with WeeChat. If not, see <https://www.gnu.org/licenses/>.
*/
#include <stdlib.h>
#include <stddef.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <ctype.h>
#include <time.h>
#ifdef _WIN32
#include <winsock.h>
#else
#include <sys/socket.h>
#include <sys/time.h>
#endif /* _WIN32 */
#include <sys/types.h>
#include <netdb.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <arpa/nameser.h>
#include <resolv.h>
#ifdef HAVE_GNUTLS
#include <gnutls/gnutls.h>
#include <gnutls/x509.h>
#endif /* HAVE_GNUTLS */
#include "../weechat-plugin.h"
#include "irc.h"
#include "irc-server.h"
#include "irc-bar-item.h"
#include "irc-buffer.h"
#include "irc-channel.h"
#include "irc-color.h"
#include "irc-command.h"
#include "irc-config.h"
#include "irc-input.h"
#include "irc-message.h"
#include "irc-nick.h"
#include "irc-notify.h"
#include "irc-protocol.h"
#include "irc-raw.h"
#include "irc-redirect.h"
#include "irc-sasl.h"
struct t_irc_server *irc_servers = NULL;
struct t_irc_server *last_irc_server = NULL;
struct t_irc_message *irc_recv_msgq = NULL;
struct t_irc_message *irc_msgq_last_msg = NULL;
char *irc_server_sasl_fail_string[IRC_SERVER_NUM_SASL_FAIL] =
{ "continue", "reconnect", "disconnect" };
char *irc_server_options[IRC_SERVER_NUM_OPTIONS][2] =
{ { "addresses", "" },
{ "proxy", "" },
{ "ipv6", "on" },
{ "ssl", "off" },
{ "ssl_cert", "" },
{ "ssl_password", "" },
{ "ssl_priorities", "NORMAL:-VERS-SSL3.0" },
{ "ssl_dhkey_size", "2048" },
{ "ssl_fingerprint", "" },
{ "ssl_verify", "on" },
{ "password", "" },
{ "capabilities", "" },
{ "sasl_mechanism", "plain" },
{ "sasl_username", "" },
{ "sasl_password", "" },
{ "sasl_key", "", },
{ "sasl_timeout", "15" },
{ "sasl_fail", "continue" },
{ "autoconnect", "off" },
{ "autoreconnect", "on" },
{ "autoreconnect_delay", "10" },
{ "nicks", "" },
{ "nicks_alternate", "on" },
{ "username", "" },
{ "realname", "" },
{ "local_hostname", "" },
{ "usermode", "" },
{ "command", "" },
{ "command_delay", "0" },
{ "autojoin", "" },
{ "autorejoin", "off" },
{ "autorejoin_delay", "30" },
{ "connection_timeout", "60" },
{ "anti_flood_prio_high", "2" },
{ "anti_flood_prio_low", "2" },
{ "away_check", "0" },
{ "away_check_max_nicks", "25" },
{ "msg_kick", "" },
{ "msg_part", "WeeChat ${info:version}" },
{ "msg_quit", "WeeChat ${info:version}" },
{ "notify", "" },
{ "split_msg_max_length", "512" },
{ "charset_message", "message" },
};
char *irc_server_casemapping_string[IRC_SERVER_NUM_CASEMAPPING] =
{ "rfc1459", "strict-rfc1459", "ascii" };
char *irc_server_prefix_modes_default = "ov";
char *irc_server_prefix_chars_default = "@+";
char *irc_server_chanmodes_default = "beI,k,l";
const char *irc_server_send_default_tags = NULL; /* default tags when */
/* sending a message */
#ifdef HAVE_GNUTLS
gnutls_digest_algorithm_t irc_fingerprint_digest_algos[IRC_FINGERPRINT_NUM_ALGOS] =
{ GNUTLS_DIG_SHA1, GNUTLS_DIG_SHA256, GNUTLS_DIG_SHA512 };
char *irc_fingerprint_digest_algos_name[IRC_FINGERPRINT_NUM_ALGOS] =
{ "SHA-1", "SHA-256", "SHA-512" };
int irc_fingerprint_digest_algos_size[IRC_FINGERPRINT_NUM_ALGOS] =
{ 160, 256, 512 };
#endif /* HAVE_GNUTLS */
void irc_server_reconnect (struct t_irc_server *server);
void irc_server_free_data (struct t_irc_server *server);
void irc_server_autojoin_create_buffers (struct t_irc_server *server);
/*
* Checks if a server pointer is valid.
*
* Returns:
* 1: server exists
* 0: server does not exist
*/
int
irc_server_valid (struct t_irc_server *server)
{
struct t_irc_server *ptr_server;
if (!server)
return 0;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
if (ptr_server == server)
return 1;
}
/* server not found */
return 0;
}
/*
* Searches for a server by name.
*
* Returns pointer to server found, NULL if not found.
*/
struct t_irc_server *
irc_server_search (const char *server_name)
{
struct t_irc_server *ptr_server;
if (!server_name)
return NULL;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
if (strcmp (ptr_server->name, server_name) == 0)
return ptr_server;
}
/* server not found */
return NULL;
}
/*
* Searches for a server by name (case insensitive).
*
* Returns pointer to server found, NULL if not found.
*/
struct t_irc_server *
irc_server_casesearch (const char *server_name)
{
struct t_irc_server *ptr_server;
if (!server_name)
return NULL;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
if (weechat_strcasecmp (ptr_server->name, server_name) == 0)
return ptr_server;
}
/* server not found */
return NULL;
}
/*
* Searches for a server option name.
*
* Returns index of option in array "irc_server_option_string", -1 if not found.
*/
int
irc_server_search_option (const char *option_name)
{
int i;
if (!option_name)
return -1;
for (i = 0; i < IRC_SERVER_NUM_OPTIONS; i++)
{
if (weechat_strcasecmp (irc_server_options[i][0], option_name) == 0)
return i;
}
/* server option not found */
return -1;
}
/*
* Searches for a casemapping.
*
* Returns index of casemapping in array "irc_server_casemapping_string", -1 if
* not found.
*/
int
irc_server_search_casemapping (const char *casemapping)
{
int i;
for (i = 0; i < IRC_SERVER_NUM_CASEMAPPING; i++)
{
if (weechat_strcasecmp (irc_server_casemapping_string[i], casemapping) == 0)
return i;
}
/* casemapping not found */
return -1;
}
/*
* Compares two strings on server (case insensitive, depends on casemapping).
*
* Returns:
* < 0: string1 < string2
* 0: string1 == string2
* > 0: string1 > string2
*/
int
irc_server_strcasecmp (struct t_irc_server *server,
const char *string1, const char *string2)
{
int casemapping, rc;
casemapping = (server) ? server->casemapping : IRC_SERVER_CASEMAPPING_RFC1459;
switch (casemapping)
{
case IRC_SERVER_CASEMAPPING_RFC1459:
rc = weechat_strcasecmp_range (string1, string2, 30);
break;
case IRC_SERVER_CASEMAPPING_STRICT_RFC1459:
rc = weechat_strcasecmp_range (string1, string2, 29);
break;
case IRC_SERVER_CASEMAPPING_ASCII:
rc = weechat_strcasecmp (string1, string2);
break;
default:
rc = weechat_strcasecmp_range (string1, string2, 30);
break;
}
return rc;
}
/*
* Compares two strings on server (case insensitive, depends on casemapping) for
* max chars.
*
* Returns:
* < 0: string1 < string2
* 0: string1 == string2
* > 0: string1 > string2
*/
int
irc_server_strncasecmp (struct t_irc_server *server,
const char *string1, const char *string2, int max)
{
int casemapping, rc;
casemapping = (server) ? server->casemapping : IRC_SERVER_CASEMAPPING_RFC1459;
switch (casemapping)
{
case IRC_SERVER_CASEMAPPING_RFC1459:
rc = weechat_strncasecmp_range (string1, string2, max, 30);
break;
case IRC_SERVER_CASEMAPPING_STRICT_RFC1459:
rc = weechat_strncasecmp_range (string1, string2, max, 29);
break;
case IRC_SERVER_CASEMAPPING_ASCII:
rc = weechat_strncasecmp (string1, string2, max);
break;
default:
rc = weechat_strncasecmp_range (string1, string2, max, 30);
break;
}
return rc;
}
/*
* Evaluates a string using the server as context:
* ${irc_server.xxx} and ${server} are replaced by a server option and the
* server name.
*
* Returns the evaluated string.
*
* Note: result must be freed after use.
*/
char *
irc_server_eval_expression (struct t_irc_server *server, const char *string)
{
struct t_hashtable *pointers, *extra_vars;
char *value;
pointers = weechat_hashtable_new (
32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_POINTER,
NULL, NULL);
extra_vars = weechat_hashtable_new (
32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_STRING,
NULL, NULL);
if (server)
{
if (pointers)
weechat_hashtable_set (pointers, "irc_server", server);
if (extra_vars)
weechat_hashtable_set (extra_vars, "server", server->name);
}
value = weechat_string_eval_expression (string,
pointers, extra_vars, NULL);
if (pointers)
weechat_hashtable_free (pointers);
if (extra_vars)
weechat_hashtable_free (extra_vars);
return value;
}
/*
* Evaluates and returns the fingerprint.
*
* Returns the evaluated fingerprint, NULL if the fingerprint option is
* invalid.
*
* Note: result must be freed after use.
*/
char *
irc_server_eval_fingerprint (struct t_irc_server *server)
{
#ifdef HAVE_GNUTLS
const char *ptr_fingerprint;
char *fingerprint_eval, **fingerprints, *str_sizes;
int i, j, rc, algo, length;
ptr_fingerprint = IRC_SERVER_OPTION_STRING(server,
IRC_SERVER_OPTION_SSL_FINGERPRINT);
/* empty fingerprint is just ignored (considered OK) */
if (!ptr_fingerprint || !ptr_fingerprint[0])
return strdup ("");
/* evaluate fingerprint */
fingerprint_eval = irc_server_eval_expression (server, ptr_fingerprint);
if (!fingerprint_eval || !fingerprint_eval[0])
{
weechat_printf (
server->buffer,
_("%s%s: the evaluated fingerprint for server \"%s\" must not be "
"empty"),
weechat_prefix ("error"),
IRC_PLUGIN_NAME,
server->name);
if (fingerprint_eval)
free (fingerprint_eval);
return NULL;
}
/* split fingerprint */
fingerprints = weechat_string_split (fingerprint_eval, ",", NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0, NULL);
if (!fingerprints)
return fingerprint_eval;
rc = 0;
for (i = 0; fingerprints[i]; i++)
{
length = strlen (fingerprints[i]);
algo = irc_server_fingerprint_search_algo_with_size (length * 4);
if (algo < 0)
{
rc = -1;
break;
}
for (j = 0; j < length; j++)
{
if (!isxdigit ((unsigned char)fingerprints[i][j]))
{
rc = -2;
break;
}
}
if (rc < 0)
break;
}
weechat_string_free_split (fingerprints);
switch (rc)
{
case -1: /* invalid size */
str_sizes = irc_server_fingerprint_str_sizes ();
weechat_printf (
server->buffer,
_("%s%s: invalid fingerprint size for server \"%s\", the "
"number of hexadecimal digits must be "
"one of: %s"),
weechat_prefix ("error"),
IRC_PLUGIN_NAME,
server->name,
(str_sizes) ? str_sizes : "?");
if (str_sizes)
free (str_sizes);
free (fingerprint_eval);
return NULL;
case -2: /* invalid content */
weechat_printf (
server->buffer,
_("%s%s: invalid fingerprint for server \"%s\", it must "
"contain only hexadecimal digits (0-9, "
"a-f)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, server->name);
free (fingerprint_eval);
return NULL;
}
return fingerprint_eval;
#else
/* make C compiler happy */
(void) server;
return strdup ("");
#endif /* HAVE_GNUTLS */
}
/*
* Checks if SASL is enabled on server.
*
* Returns:
* 1: SASL is enabled
* 0: SASL is disabled
*/
int
irc_server_sasl_enabled (struct t_irc_server *server)
{
int sasl_mechanism, rc;
char *sasl_username, *sasl_password;
const char *sasl_key;
sasl_mechanism = IRC_SERVER_OPTION_INTEGER(
server, IRC_SERVER_OPTION_SASL_MECHANISM);
sasl_username = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_USERNAME));
sasl_password = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_PASSWORD));
sasl_key = IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_KEY);
/*
* SASL is enabled if one of these conditions is true:
* - mechanism is "external"
* - mechanism is "ecdsa-nist256p-challenge" with username/key set
* - another mechanism with username/password set
*/
rc = ((sasl_mechanism == IRC_SASL_MECHANISM_EXTERNAL)
|| ((sasl_mechanism == IRC_SASL_MECHANISM_ECDSA_NIST256P_CHALLENGE)
&& sasl_username && sasl_username[0]
&& sasl_key && sasl_key[0])
|| (sasl_username && sasl_username[0]
&& sasl_password && sasl_password[0])) ? 1 : 0;
if (sasl_username)
free (sasl_username);
if (sasl_password)
free (sasl_password);
return rc;
}
/*
* Gets name of server without port (ends before first '/' if found).
*
* Note: result must be freed after use.
*/
char *
irc_server_get_name_without_port (const char *name)
{
char *pos;
if (!name)
return NULL;
pos = strchr (name, '/');
if (pos && (pos != name))
return weechat_strndup (name, pos - name);
return strdup (name);
}
/*
* Sets addresses for server.
*
* Returns:
* 1: addresses have been set (changed)
* 0: nothing set (addresses unchanged)
*/
int
irc_server_set_addresses (struct t_irc_server *server, const char *addresses)
{
int i;
char *pos, *error, *addresses_eval;
long number;
addresses_eval = NULL;
if (addresses && addresses[0])
{
addresses_eval = irc_server_eval_expression (server, addresses);
if (server->addresses_eval
&& (strcmp (server->addresses_eval, addresses_eval) == 0))
{
free (addresses_eval);
return 0;
}
}
/* free data */
if (server->addresses_eval)
{
free (server->addresses_eval);
server->addresses_eval = NULL;
}
server->addresses_count = 0;
if (server->addresses_array)
{
weechat_string_free_split (server->addresses_array);
server->addresses_array = NULL;
}
if (server->ports_array)
{
free (server->ports_array);
server->ports_array = NULL;
}
if (server->retry_array)
{
free (server->retry_array);
server->retry_array = NULL;
}
/* set new addresses/ports */
server->addresses_eval = addresses_eval;
if (!addresses_eval)
return 1;
server->addresses_array = weechat_string_split (
addresses_eval,
",",
" ",
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0,
&server->addresses_count);
server->ports_array = malloc (
server->addresses_count * sizeof (server->ports_array[0]));
server->retry_array = malloc (
server->addresses_count * sizeof (server->retry_array[0]));
for (i = 0; i < server->addresses_count; i++)
{
pos = strchr (server->addresses_array[i], '/');
if (pos)
{
pos[0] = 0;
pos++;
error = NULL;
number = strtol (pos, &error, 10);
server->ports_array[i] = (error && !error[0]) ?
number : IRC_SERVER_DEFAULT_PORT;
}
else
{
server->ports_array[i] = IRC_SERVER_DEFAULT_PORT;
}
server->retry_array[i] = 0;
}
return 1;
}
/*
* Sets index of current address for server.
*/
void
irc_server_set_index_current_address (struct t_irc_server *server, int index)
{
int addresses_changed;
addresses_changed = irc_server_set_addresses (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_ADDRESSES));
if (addresses_changed)
{
/* if the addresses have changed, reset the index to 0 */
index = 0;
}
if (server->current_address)
{
free (server->current_address);
server->current_address = NULL;
/* copy current retry value before loading next server */
if (!addresses_changed
&& server->index_current_address < server->addresses_count)
{
server->retry_array[server->index_current_address] = server->current_retry;
}
}
server->current_port = 0;
server->current_retry = 0;
if (server->addresses_count > 0)
{
index %= server->addresses_count;
server->index_current_address = index;
server->current_address = strdup (server->addresses_array[index]);
server->current_port = server->ports_array[index];
server->current_retry = server->retry_array[index];
}
}
/*
* Sets nicks for server.
*/
void
irc_server_set_nicks (struct t_irc_server *server, const char *nicks)
{
char *nicks2;
/* free data */
server->nicks_count = 0;
if (server->nicks_array)
{
weechat_string_free_split (server->nicks_array);
server->nicks_array = NULL;
}
/* evaluate value */
nicks2 = irc_server_eval_expression (server, nicks);
/* set new nicks */
server->nicks_array = weechat_string_split (
(nicks2) ? nicks2 : IRC_SERVER_DEFAULT_NICKS,
",",
NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0,
&server->nicks_count);
if (nicks2)
free (nicks2);
}
/*
* Sets nickname for server.
*/
void
irc_server_set_nick (struct t_irc_server *server, const char *nick)
{
struct t_irc_channel *ptr_channel;
/* if nick is the same, just return */
if ((!server->nick && !nick)
|| (server->nick && nick && strcmp (server->nick, nick) == 0))
{
return;
}
/* update the nick in server */
if (server->nick)
free (server->nick);
server->nick = (nick) ? strdup (nick) : NULL;
/* set local variable "nick" for server and all channels/pv */
weechat_buffer_set (server->buffer, "localvar_set_nick", nick);
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
weechat_buffer_set (ptr_channel->buffer, "localvar_set_nick", nick);
}
weechat_bar_item_update ("input_prompt");
weechat_bar_item_update ("irc_nick");
weechat_bar_item_update ("irc_nick_host");
}
/*
* Sets host for server.
*/
void
irc_server_set_host (struct t_irc_server *server, const char *host)
{
struct t_irc_channel *ptr_channel;
/* if host is the same, just return */
if ((!server->host && !host)
|| (server->host && host && strcmp (server->host, host) == 0))
{
return;
}
/* update the nick host in server */
if (server->host)
free (server->host);
server->host = (host) ? strdup (host) : NULL;
/* set local variable "host" for server and all channels/pv */
weechat_buffer_set (server->buffer, "localvar_set_host", host);
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
weechat_buffer_set (ptr_channel->buffer,
"localvar_set_host", host);
}
weechat_bar_item_update ("irc_host");
weechat_bar_item_update ("irc_nick_host");
}
/*
* Gets index of nick in array "nicks_array".
*
* Returns index of nick in array, -1 if nick is not set or not found in
* "nicks_array".
*/
int
irc_server_get_nick_index (struct t_irc_server *server)
{
int i;
if (!server->nick)
return -1;
for (i = 0; i < server->nicks_count; i++)
{
if (strcmp (server->nick, server->nicks_array[i]) == 0)
{
return i;
}
}
/* nick not found */
return -1;
}
/*
* Gets an alternate nick when the nick is already used on server.
*
* First tries all declared nicks, then builds nicks by adding "_", until
* length of 9.
*
* If all nicks are still used, builds 99 alternate nicks by using number at the
* end.
*
* Example: nicks = "abcde,fghi,jkl"
* => nicks tried: abcde
* fghi
* jkl
* abcde_
* abcde__
* abcde___
* abcde____
* abcde___1
* abcde___2
* ...
* abcde__99
*
* Returns NULL if no more alternate nick is available.
*/
const char *
irc_server_get_alternate_nick (struct t_irc_server *server)
{
static char nick[64];
char str_number[64];
int nick_index, length_nick, length_number;
nick[0] = '\0';
/* we are still trying nicks from option "nicks" */
if (server->nick_alternate_number < 0)
{
nick_index = irc_server_get_nick_index (server);
if (nick_index < 0)
nick_index = 0;
else
{
nick_index = (nick_index + 1) % server->nicks_count;
/* stop loop if first nick tried was not in the list of nicks */
if ((nick_index == 0) && (server->nick_first_tried < 0))
server->nick_first_tried = 0;
}
if (nick_index != server->nick_first_tried)
{
snprintf (nick, sizeof (nick),
"%s", server->nicks_array[nick_index]);
return nick;
}
/* now we have tried all nicks in list */
/* if alternate nicks are disabled, just return NULL */
if (!IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_NICKS_ALTERNATE))
return NULL;
/* use main nick and we will add "_" and then number if needed */
server->nick_alternate_number = 0;
snprintf (nick, sizeof (nick), "%s", server->nicks_array[0]);
}
else
snprintf (nick, sizeof (nick), "%s", server->nick);
/* if length is < 9, just add a "_" */
if (strlen (nick) < 9)
{
strcat (nick, "_");
return nick;
}
server->nick_alternate_number++;
/* number is max 99 */
if (server->nick_alternate_number > 99)
return NULL;
/* be sure the nick has 9 chars max */
nick[9] = '\0';
/* generate number */
snprintf (str_number, sizeof (str_number),
"%d", server->nick_alternate_number);
/* copy number in nick */
length_nick = strlen (nick);
length_number = strlen (str_number);
if (length_number > length_nick)
return NULL;
memcpy (nick + length_nick - length_number, str_number, length_number);
/* return alternate nick */
return nick;
}
/*
* Gets value of a feature item in "isupport" (copy of IRC message 005).
*
* Returns value of feature (empty string if feature has no value, NULL if
* feature is not found).
*/
const char *
irc_server_get_isupport_value (struct t_irc_server *server, const char *feature)
{
char feature2[64], *pos_feature, *pos_equal, *pos_space;
int length;
static char value[256];
if (!server || !server->isupport || !feature)
return NULL;
/* search feature with value */
snprintf (feature2, sizeof (feature2), " %s=", feature);
pos_feature = strstr (server->isupport, feature2);
if (pos_feature)
{
/* feature found with value, return value */
pos_feature++;
pos_equal = strchr (pos_feature, '=');
pos_space = strchr (pos_feature, ' ');
if (pos_space)
length = pos_space - pos_equal - 1;
else
length = strlen (pos_equal) + 1;
if (length > (int)sizeof (value) - 1)
length = (int)sizeof (value) - 1;
memcpy (value, pos_equal + 1, length);
value[length] = '\0';
return value;
}
/* search feature without value */
feature2[strlen (feature2) - 1] = ' ';
pos_feature = strstr (server->isupport, feature2);
if (pos_feature)
{
value[0] = '\0';
return value;
}
/* feature not found in isupport */
return NULL;
}
/*
* Sets "prefix_modes" and "prefix_chars" in server using value of PREFIX in IRC
* message 005.
*
* For example, if prefix is "(ohv)@%+":
* prefix_modes is set to "ohv"
* prefix_chars is set to "@%+".
*/
void
irc_server_set_prefix_modes_chars (struct t_irc_server *server,
const char *prefix)
{
char *pos;
int i, old_length_chars, length_modes, length_chars;
if (!server || !prefix)
return;
old_length_chars = (server->prefix_chars) ?
strlen (server->prefix_chars) : 0;
/* free previous values */
if (server->prefix_modes)
{
free (server->prefix_modes);
server->prefix_modes = NULL;
}
if (server->prefix_chars)
{
free (server->prefix_chars);
server->prefix_chars = NULL;
}
/* assign new values */
pos = strchr (prefix, ')');
if (pos)
{
server->prefix_modes = weechat_strndup (prefix + 1,
pos - prefix - 1);
if (server->prefix_modes)
{
pos++;
length_modes = strlen (server->prefix_modes);
length_chars = strlen (pos);
server->prefix_chars = malloc (length_modes + 1);
if (server->prefix_chars)
{
for (i = 0; i < length_modes; i++)
{
server->prefix_chars[i] = (i < length_chars) ? pos[i] : ' ';
}
server->prefix_chars[length_modes] = '\0';
}
else
{
free (server->prefix_modes);
server->prefix_modes = NULL;
}
}
}
length_chars = (server->prefix_chars) ? strlen (server->prefix_chars) : 0;
if (server->prefix_chars && (length_chars != old_length_chars))
irc_nick_realloc_prefixes (server, old_length_chars, length_chars);
}
/*
* Sets lag in server buffer (local variable), update bar item "lag"
* and send signal "irc_server_lag_changed" for the server.
*/
void
irc_server_set_lag (struct t_irc_server *server)
{
char str_lag[32];
if (server->lag >= weechat_config_integer (irc_config_network_lag_min_show))
{
snprintf (str_lag, sizeof (str_lag),
((server->lag_check_time.tv_sec == 0) || (server->lag < 1000)) ?
"%.3f" : "%.0f",
((float)(server->lag)) / 1000);
weechat_buffer_set (server->buffer, "localvar_set_lag", str_lag);
}
else
{
weechat_buffer_set (server->buffer, "localvar_del_lag", "");
}
weechat_hook_signal_send ("irc_server_lag_changed",
WEECHAT_HOOK_SIGNAL_STRING,
server->name);
weechat_bar_item_update ("lag");
}
/*
* Gets prefix_modes for server (for example: "ohv").
*
* Returns default modes if prefix_modes is not set in server.
*/
const char *
irc_server_get_prefix_modes (struct t_irc_server *server)
{
return (server && server->prefix_modes) ?
server->prefix_modes : irc_server_prefix_modes_default;
}
/*
* Gets prefix_chars for server (for example: "@%+").
*
* Returns default chars if prefix_chars is not set in server.
*/
const char *
irc_server_get_prefix_chars (struct t_irc_server *server)
{
return (server && server->prefix_chars) ?
server->prefix_chars : irc_server_prefix_chars_default;
}
/*
* Gets index of mode in prefix_modes.
*
* The mode is for example 'o' or 'v'.
*
* Returns -1 if mode does not exist in server.
*/
int
irc_server_get_prefix_mode_index (struct t_irc_server *server, char mode)
{
const char *prefix_modes;
char *pos;
if (server)
{
prefix_modes = irc_server_get_prefix_modes (server);
pos = strchr (prefix_modes, mode);
if (pos)
return pos - prefix_modes;
}
return -1;
}
/*
* Gets index of prefix_char in prefix_chars.
*
* The prefix char is for example '@' or '+'.
*
* Returns -1 if prefix_char does not exist in server.
*/
int
irc_server_get_prefix_char_index (struct t_irc_server *server,
char prefix_char)
{
const char *prefix_chars;
char *pos;
if (server)
{
prefix_chars = irc_server_get_prefix_chars (server);
pos = strchr (prefix_chars, prefix_char);
if (pos)
return pos - prefix_chars;
}
return -1;
}
/*
* Gets mode for prefix char.
*
* For example prefix_char '@' can return 'o'.
*
* Returns ' ' (space) if prefix char is not found.
*/
char
irc_server_get_prefix_mode_for_char (struct t_irc_server *server,
char prefix_char)
{
const char *prefix_modes;
int index;
if (server)
{
prefix_modes = irc_server_get_prefix_modes (server);
index = irc_server_get_prefix_char_index (server, prefix_char);
if (index >= 0)
return prefix_modes[index];
}
return ' ';
}
/*
* Gets prefix char for mode.
*
* For example mode 'o' can return '@'.
*
* Returns a space if mode is not found.
*/
char
irc_server_get_prefix_char_for_mode (struct t_irc_server *server, char mode)
{
const char *prefix_chars;
int index;
if (server)
{
prefix_chars = irc_server_get_prefix_chars (server);
index = irc_server_get_prefix_mode_index (server, mode);
if (index >= 0)
return prefix_chars[index];
}
return ' ';
}
/*
* Gets chanmodes for server (for example: "eIb,k,l,imnpstS").
*
* Returns default chanmodes if chanmodes is not set in server.
*/
const char *
irc_server_get_chanmodes (struct t_irc_server *server)
{
return (server && server->chanmodes) ?
server->chanmodes : irc_server_chanmodes_default;
}
/*
* Checks if a prefix char is valid for a status message
* (message sent for example to ops/voiced).
*
* The prefix (for example '@' or '+') must be in STATUSMSG,
* or in "prefix_chars" if STATUSMSG is not defined.
*
* Returns:
* 1: prefix is valid for a status message
* 0: prefix is NOT valid for a status message
*/
int
irc_server_prefix_char_statusmsg (struct t_irc_server *server,
char prefix_char)
{
const char *support_statusmsg;
support_statusmsg = irc_server_get_isupport_value (server, "STATUSMSG");
if (support_statusmsg)
return (strchr (support_statusmsg, prefix_char)) ? 1 : 0;
return (irc_server_get_prefix_char_index (server, prefix_char) >= 0) ?
1 : 0;
}
/*
* Get max modes supported in one command by the server
* (in isupport value, with the format: "MODES=4").
*
* Default is 4 if the info is not given by the server.
*/
int
irc_server_get_max_modes (struct t_irc_server *server)
{
const char *support_modes;
char *error;
long number;
int max_modes;
max_modes = 4;
support_modes = irc_server_get_isupport_value (server, "MODES");
if (support_modes)
{
error = NULL;
number = strtol (support_modes, &error, 10);
if (error && !error[0])
{
max_modes = number;
if (max_modes < 1)
max_modes = 1;
if (max_modes > 128)
max_modes = 128;
}
}
return max_modes;
}
/*
* Gets an evaluated default_msg server option: replaces "%v" by WeeChat
* version if there's no ${...} in string, or just evaluates the string.
*
* Note: result must be freed after use.
*/
char *
irc_server_get_default_msg (const char *default_msg,
struct t_irc_server *server,
const char *channel_name)
{
char *version;
struct t_hashtable *extra_vars;
char *msg, *res;
/*
* "%v" for version is deprecated since WeeChat 1.6, where
* an expression ${info:version} is preferred, so we replace
* the "%v" with version only if there's no "${...}" in string
*/
if (strstr (default_msg, "%v") && !strstr (default_msg, "${"))
{
version = weechat_info_get ("version", "");
res = weechat_string_replace (default_msg, "%v",
(version) ? version : "");
if (version)
free (version);
return res;
}
extra_vars = weechat_hashtable_new (32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_STRING,
NULL,
NULL);
if (extra_vars)
{
weechat_hashtable_set (extra_vars, "server", server->name);
weechat_hashtable_set (extra_vars, "channel",
(channel_name) ? channel_name : "");
weechat_hashtable_set (extra_vars, "nick", server->nick);
}
msg = weechat_string_eval_expression (default_msg, NULL, extra_vars, NULL);
if (extra_vars)
weechat_hashtable_free (extra_vars);
return msg;
}
/*
* Allocates a new server and adds it to the servers queue.
*
* Returns pointer to new server, NULL if error.
*/
struct t_irc_server *
irc_server_alloc (const char *name)
{
struct t_irc_server *new_server;
int i, length;
char *option_name;
if (irc_server_casesearch (name))
return NULL;
/* alloc memory for new server */
new_server = malloc (sizeof (*new_server));
if (!new_server)
{
weechat_printf (NULL,
_("%s%s: error when allocating new server"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
return NULL;
}
/* add new server to queue */
new_server->prev_server = last_irc_server;
new_server->next_server = NULL;
if (last_irc_server)
last_irc_server->next_server = new_server;
else
irc_servers = new_server;
last_irc_server = new_server;
/* set name */
new_server->name = strdup (name);
/* internal vars */
new_server->temp_server = 0;
new_server->reloading_from_config = 0;
new_server->reloaded_from_config = 0;
new_server->addresses_eval = NULL;
new_server->addresses_count = 0;
new_server->addresses_array = NULL;
new_server->ports_array = NULL;
new_server->retry_array = NULL;
new_server->index_current_address = 0;
new_server->current_address = NULL;
new_server->current_ip = NULL;
new_server->current_port = 0;
new_server->current_retry = 0;
new_server->sock = -1;
new_server->hook_connect = NULL;
new_server->hook_fd = NULL;
new_server->hook_timer_connection = NULL;
new_server->hook_timer_sasl = NULL;
new_server->is_connected = 0;
new_server->ssl_connected = 0;
new_server->disconnected = 0;
new_server->unterminated_message = NULL;
new_server->nicks_count = 0;
new_server->nicks_array = NULL;
new_server->nick_first_tried = 0;
new_server->nick_alternate_number = -1;
new_server->nick = NULL;
new_server->nick_modes = NULL;
new_server->host = NULL;
new_server->checking_cap_ls = 0;
new_server->cap_ls = weechat_hashtable_new (32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_STRING,
NULL,
NULL);
new_server->checking_cap_list = 0;
new_server->cap_list = weechat_hashtable_new (32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_STRING,
NULL,
NULL);
new_server->isupport = NULL;
new_server->prefix_modes = NULL;
new_server->prefix_chars = NULL;
new_server->nick_max_length = 0;
new_server->user_max_length = 0;
new_server->host_max_length = 0;
new_server->casemapping = IRC_SERVER_CASEMAPPING_RFC1459;
new_server->chantypes = NULL;
new_server->chanmodes = NULL;
new_server->monitor = 0;
new_server->monitor_time = 0;
new_server->reconnect_delay = 0;
new_server->reconnect_start = 0;
new_server->command_time = 0;
new_server->reconnect_join = 0;
new_server->disable_autojoin = 0;
new_server->is_away = 0;
new_server->away_message = NULL;
new_server->away_time = 0;
new_server->lag = 0;
new_server->lag_displayed = -1;
new_server->lag_check_time.tv_sec = 0;
new_server->lag_check_time.tv_usec = 0;
new_server->lag_next_check = time (NULL) +
weechat_config_integer (irc_config_network_lag_check);
new_server->lag_last_refresh = 0;
new_server->cmd_list_regexp = NULL;
new_server->last_user_message = 0;
new_server->last_away_check = 0;
new_server->last_data_purge = 0;
for (i = 0; i < IRC_SERVER_NUM_OUTQUEUES_PRIO; i++)
{
new_server->outqueue[i] = NULL;
new_server->last_outqueue[i] = NULL;
}
new_server->redirects = NULL;
new_server->last_redirect = NULL;
new_server->notify_list = NULL;
new_server->last_notify = NULL;
new_server->notify_count = 0;
new_server->join_manual = weechat_hashtable_new (
32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_TIME,
NULL, NULL);
new_server->join_channel_key = weechat_hashtable_new (
32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_STRING,
NULL, NULL);
new_server->join_noswitch = weechat_hashtable_new (
32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_TIME,
NULL, NULL);
new_server->buffer = NULL;
new_server->buffer_as_string = NULL;
new_server->channels = NULL;
new_server->last_channel = NULL;
/* create options with null value */
for (i = 0; i < IRC_SERVER_NUM_OPTIONS; i++)
{
length = strlen (new_server->name) + 1 +
strlen (irc_server_options[i][0]) +
512 + /* inherited option name (irc.server_default.xxx) */
1;
option_name = malloc (length);
if (option_name)
{
snprintf (option_name, length, "%s.%s << irc.server_default.%s",
new_server->name,
irc_server_options[i][0],
irc_server_options[i][0]);
new_server->options[i] = irc_config_server_new_option (
irc_config_file,
irc_config_section_server,
i,
option_name,
NULL,
NULL,
1,
&irc_config_server_check_value_cb,
irc_server_options[i][0],
NULL,
&irc_config_server_change_cb,
irc_server_options[i][0],
NULL);
irc_config_server_change_cb (irc_server_options[i][0], NULL,
new_server->options[i]);
free (option_name);
}
}
return new_server;
}
/*
* Initializes a server with URL of this form: irc://nick:pass@irc.toto.org:6667
*
* Returns pointer to new server, NULL if error.
*/
struct t_irc_server *
irc_server_alloc_with_url (const char *irc_url)
{
char *irc_url2, *pos_server, *pos_nick, *pos_password;
char *pos_address, *pos_port, *pos_channel, *pos;
char *server_address, *server_nicks, *server_autojoin;
char default_port[16];
int ipv6, ssl, length;
struct t_irc_server *ptr_server;
irc_url2 = strdup (irc_url);
if (!irc_url2)
return NULL;
pos_server = NULL;
pos_nick = NULL;
pos_password = NULL;
pos_address = NULL;
pos_port = NULL;
pos_channel = NULL;
ipv6 = 0;
ssl = 0;
snprintf (default_port, sizeof (default_port),
"%d", IRC_SERVER_DEFAULT_PORT);
pos_server = strstr (irc_url2, "://");
if (!pos_server || !pos_server[3])
{
free (irc_url2);
return NULL;
}
pos_server[0] = '\0';
pos_server += 3;
pos_channel = strstr (pos_server, "/");
if (pos_channel)
{
pos_channel[0] = '\0';
pos_channel++;
while (pos_channel[0] == '/')
{
pos_channel++;
}
}
/* check for SSL / IPv6 */
if (weechat_strcasecmp (irc_url2, "irc6") == 0)
{
ipv6 = 1;
}
else if (weechat_strcasecmp (irc_url2, "ircs") == 0)
{
ssl = 1;
}
else if ((weechat_strcasecmp (irc_url2, "irc6s") == 0)
|| (weechat_strcasecmp (irc_url2, "ircs6") == 0))
{
ipv6 = 1;
ssl = 1;
}
if (ssl)
{
snprintf (default_port, sizeof (default_port),
"%d", IRC_SERVER_DEFAULT_PORT_SSL);
}
/* search for nick, password, address+port */
pos_address = strchr (pos_server, '@');
if (pos_address)
{
pos_address[0] = '\0';
pos_address++;
pos_nick = pos_server;
pos_password = strchr (pos_server, ':');
if (pos_password)
{
pos_password[0] = '\0';
pos_password++;
}
}
else
pos_address = pos_server;
/*
* search for port in address, and skip optional [ ] around address
* (can be used to indicate IPv6 port, after ']')
*/
if (pos_address[0] == '[')
{
pos_address++;
pos = strchr (pos_address, ']');
if (!pos)
{
free (irc_url2);
return NULL;
}
pos[0] = '\0';
pos++;
pos_port = strchr (pos, ':');
if (pos_port)
{
pos_port[0] = '\0';
pos_port++;
}
}
else
{
pos_port = strchr (pos_address, ':');
if (pos_port)
{
pos_port[0] = '\0';
pos_port++;
}
}
ptr_server = irc_server_alloc (pos_address);
if (ptr_server)
{
ptr_server->temp_server = 1;
if (pos_address && pos_address[0])
{
length = strlen (pos_address) + 1 +
((pos_port) ? strlen (pos_port) : 16) + 1;
server_address = malloc (length);
if (server_address)
{
snprintf (server_address, length,
"%s/%s",
pos_address,
(pos_port && pos_port[0]) ? pos_port : default_port);
weechat_config_option_set (
ptr_server->options[IRC_SERVER_OPTION_ADDRESSES],
server_address,
1);
free (server_address);
}
}
weechat_config_option_set (ptr_server->options[IRC_SERVER_OPTION_IPV6],
(ipv6) ? "on" : "off",
1);
weechat_config_option_set (ptr_server->options[IRC_SERVER_OPTION_SSL],
(ssl) ? "on" : "off",
1);
if (pos_nick && pos_nick[0])
{
length = ((strlen (pos_nick) + 2) * 5) + 1;
server_nicks = malloc (length);
if (server_nicks)
{
snprintf (server_nicks, length,
"%s,%s1,%s2,%s3,%s4",
pos_nick, pos_nick, pos_nick, pos_nick, pos_nick);
weechat_config_option_set (
ptr_server->options[IRC_SERVER_OPTION_NICKS],
server_nicks,
1);
free (server_nicks);
}
}
if (pos_password && pos_password[0])
{
weechat_config_option_set (
ptr_server->options[IRC_SERVER_OPTION_PASSWORD],
pos_password,
1);
}
weechat_config_option_set (
ptr_server->options[IRC_SERVER_OPTION_AUTOCONNECT],
"on",
1);
/* autojoin */
if (pos_channel && pos_channel[0])
{
if (irc_channel_is_channel (ptr_server, pos_channel))
server_autojoin = strdup (pos_channel);
else
{
server_autojoin = malloc (strlen (pos_channel) + 2);
if (server_autojoin)
{
strcpy (server_autojoin, "#");
strcat (server_autojoin, pos_channel);
}
}
if (server_autojoin)
{
weechat_config_option_set (
ptr_server->options[IRC_SERVER_OPTION_AUTOJOIN],
server_autojoin,
1);
free (server_autojoin);
}
}
}
free (irc_url2);
return ptr_server;
}
/*
* Applies command line options to a server.
*
* For example: -ssl -nossl -password=test -proxy=myproxy
*/
void
irc_server_apply_command_line_options (struct t_irc_server *server,
int argc, char **argv)
{
int i, index_option;
char *pos, *option_name, *ptr_value, *value_boolean[2] = { "off", "on" };
for (i = 0; i < argc; i++)
{
if (argv[i][0] == '-')
{
pos = strchr (argv[i], '=');
if (pos)
{
option_name = weechat_strndup (argv[i] + 1, pos - argv[i] - 1);
ptr_value = pos + 1;
}
else
{
option_name = strdup (argv[i] + 1);
ptr_value = value_boolean[1];
}
if (option_name)
{
if (weechat_strcasecmp (option_name, "temp") == 0)
{
/* temporary server, not saved */
server->temp_server = 1;
}
else
{
index_option = irc_server_search_option (option_name);
if (index_option < 0)
{
/* look if option is negative, like "-noxxx" */
if (weechat_strncasecmp (argv[i], "-no", 3) == 0)
{
free (option_name);
option_name = strdup (argv[i] + 3);
index_option = irc_server_search_option (option_name);
ptr_value = value_boolean[0];
}
}
if (index_option >= 0)
{
weechat_config_option_set (server->options[index_option],
ptr_value, 1);
}
}
free (option_name);
}
}
}
}
/*
* Adds a message in out queue.
*/
void
irc_server_outqueue_add (struct t_irc_server *server, int priority,
const char *command, const char *msg1,
const char *msg2, int modified, const char *tags,
struct t_irc_redirect *redirect)
{
struct t_irc_outqueue *new_outqueue;
new_outqueue = malloc (sizeof (*new_outqueue));
if (new_outqueue)
{
new_outqueue->command = (command) ? strdup (command) : strdup ("unknown");
new_outqueue->message_before_mod = (msg1) ? strdup (msg1) : NULL;
new_outqueue->message_after_mod = (msg2) ? strdup (msg2) : NULL;
new_outqueue->modified = modified;
new_outqueue->tags = (tags) ? strdup (tags) : NULL;
new_outqueue->redirect = redirect;
new_outqueue->prev_outqueue = server->last_outqueue[priority];
new_outqueue->next_outqueue = NULL;
if (server->last_outqueue[priority])
server->last_outqueue[priority]->next_outqueue = new_outqueue;
else
server->outqueue[priority] = new_outqueue;
server->last_outqueue[priority] = new_outqueue;
}
}
/*
* Frees a message in out queue.
*/
void
irc_server_outqueue_free (struct t_irc_server *server,
int priority,
struct t_irc_outqueue *outqueue)
{
struct t_irc_outqueue *new_outqueue;
if (!server || !outqueue)
return;
/* remove outqueue message */
if (server->last_outqueue[priority] == outqueue)
server->last_outqueue[priority] = outqueue->prev_outqueue;
if (outqueue->prev_outqueue)
{
(outqueue->prev_outqueue)->next_outqueue = outqueue->next_outqueue;
new_outqueue = server->outqueue[priority];
}
else
new_outqueue = outqueue->next_outqueue;
if (outqueue->next_outqueue)
(outqueue->next_outqueue)->prev_outqueue = outqueue->prev_outqueue;
/* free data */
if (outqueue->command)
free (outqueue->command);
if (outqueue->message_before_mod)
free (outqueue->message_before_mod);
if (outqueue->message_after_mod)
free (outqueue->message_after_mod);
if (outqueue->tags)
free (outqueue->tags);
free (outqueue);
/* set new head */
server->outqueue[priority] = new_outqueue;
}
/*
* Frees all messages in out queue.
*/
void
irc_server_outqueue_free_all (struct t_irc_server *server, int priority)
{
while (server->outqueue[priority])
{
irc_server_outqueue_free (server, priority,
server->outqueue[priority]);
}
}
/*
* Frees server data.
*/
void
irc_server_free_data (struct t_irc_server *server)
{
int i;
if (!server)
return;
/* free linked lists */
for (i = 0; i < IRC_SERVER_NUM_OUTQUEUES_PRIO; i++)
{
irc_server_outqueue_free_all (server, i);
}
irc_redirect_free_all (server);
irc_notify_free_all (server);
irc_channel_free_all (server);
/* free hashtables */
weechat_hashtable_free (server->join_manual);
weechat_hashtable_free (server->join_channel_key);
weechat_hashtable_free (server->join_noswitch);
/* free server data */
for (i = 0; i < IRC_SERVER_NUM_OPTIONS; i++)
{
if (server->options[i])
weechat_config_option_free (server->options[i]);
}
if (server->name)
free (server->name);
if (server->addresses_eval)
free (server->addresses_eval);
if (server->addresses_array)
weechat_string_free_split (server->addresses_array);
if (server->ports_array)
free (server->ports_array);
if (server->retry_array)
free (server->retry_array);
if (server->current_address)
free (server->current_address);
if (server->current_ip)
free (server->current_ip);
if (server->hook_connect)
weechat_unhook (server->hook_connect);
if (server->hook_fd)
weechat_unhook (server->hook_fd);
if (server->hook_timer_connection)
weechat_unhook (server->hook_timer_connection);
if (server->hook_timer_sasl)
weechat_unhook (server->hook_timer_sasl);
if (server->unterminated_message)
free (server->unterminated_message);
if (server->nicks_array)
weechat_string_free_split (server->nicks_array);
if (server->nick)
free (server->nick);
if (server->nick_modes)
free (server->nick_modes);
if (server->host)
free (server->host);
if (server->cap_ls)
weechat_hashtable_free (server->cap_ls);
if (server->cap_list)
weechat_hashtable_free (server->cap_list);
if (server->isupport)
free (server->isupport);
if (server->prefix_modes)
free (server->prefix_modes);
if (server->prefix_chars)
free (server->prefix_chars);
if (server->chantypes)
free (server->chantypes);
if (server->chanmodes)
free (server->chanmodes);
if (server->away_message)
free (server->away_message);
if (server->cmd_list_regexp)
{
regfree (server->cmd_list_regexp);
free (server->cmd_list_regexp);
}
if (server->buffer_as_string)
free (server->buffer_as_string);
}
/*
* Frees a server and remove it from list of servers.
*/
void
irc_server_free (struct t_irc_server *server)
{
struct t_irc_server *new_irc_servers;
if (!server)
return;
/*
* close server buffer (and all channels/privates)
* (only if we are not in a /upgrade, because during upgrade we want to
* keep connections and closing server buffer would disconnect from server)
*/
if (server->buffer && !irc_signal_upgrade_received)
weechat_buffer_close (server->buffer);
/* remove server from queue */
if (last_irc_server == server)
last_irc_server = server->prev_server;
if (server->prev_server)
{
(server->prev_server)->next_server = server->next_server;
new_irc_servers = irc_servers;
}
else
new_irc_servers = server->next_server;
if (server->next_server)
(server->next_server)->prev_server = server->prev_server;
irc_server_free_data (server);
free (server);
irc_servers = new_irc_servers;
}
/*
* Frees all servers.
*/
void
irc_server_free_all ()
{
/* for each server in memory, remove it */
while (irc_servers)
{
irc_server_free (irc_servers);
}
}
/*
* Copies a server.
*
* Returns pointer to new server, NULL if error.
*/
struct t_irc_server *
irc_server_copy (struct t_irc_server *server, const char *new_name)
{
struct t_irc_server *new_server;
struct t_infolist *infolist;
char *mask, *pos;
const char *option_name;
int length, index_option;
/* check if another server exists with this name */
if (irc_server_casesearch (new_name))
return NULL;
new_server = irc_server_alloc (new_name);
if (new_server)
{
/* duplicate options */
length = 32 + strlen (server->name) + 1;
mask = malloc (length);
if (!mask)
return 0;
snprintf (mask, length, "irc.server.%s.*", server->name);
infolist = weechat_infolist_get ("option", NULL, mask);
free (mask);
if (infolist)
{
while (weechat_infolist_next (infolist))
{
if (!weechat_infolist_integer (infolist, "value_is_null"))
{
option_name = weechat_infolist_string (infolist,
"option_name");
pos = strrchr (option_name, '.');
if (pos)
{
index_option = irc_server_search_option (pos + 1);
if (index_option >= 0)
{
weechat_config_option_set (
new_server->options[index_option],
weechat_infolist_string (infolist, "value"),
1);
}
}
}
}
weechat_infolist_free (infolist);
}
}
return new_server;
}
/*
* Renames a server (internal name).
*
* Returns:
* 1: OK
* 0: error
*/
int
irc_server_rename (struct t_irc_server *server, const char *new_name)
{
int length;
char *mask, *pos_option, *new_option_name, charset_modifier[256];
const char *buffer_name, *option_name;
struct t_infolist *infolist;
struct t_config_option *ptr_option;
struct t_irc_channel *ptr_channel;
/* check if another server exists with this name */
if (irc_server_casesearch (new_name))
return 0;
/* rename options */
length = 32 + strlen (server->name) + 1;
mask = malloc (length);
if (!mask)
return 0;
snprintf (mask, length, "irc.server.%s.*", server->name);
infolist = weechat_infolist_get ("option", NULL, mask);
free (mask);
if (infolist)
{
while (weechat_infolist_next (infolist))
{
ptr_option = weechat_config_get (
weechat_infolist_string (infolist, "full_name"));
if (ptr_option)
{
option_name = weechat_infolist_string (infolist, "option_name");
if (option_name)
{
pos_option = strrchr (option_name, '.');
if (pos_option)
{
pos_option++;
length = strlen (new_name) + 1 + strlen (pos_option) + 1;
new_option_name = malloc (length);
if (new_option_name)
{
snprintf (new_option_name, length,
"%s.%s", new_name, pos_option);
weechat_config_option_rename (ptr_option, new_option_name);
free (new_option_name);
}
}
}
}
}
weechat_infolist_free (infolist);
}
/* rename server */
if (server->name)
free (server->name);
server->name = strdup (new_name);
/* change name and local variables on buffers */
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if (ptr_channel->buffer)
{
buffer_name = irc_buffer_build_name (server->name,
ptr_channel->name);
weechat_buffer_set (ptr_channel->buffer, "name", buffer_name);
weechat_buffer_set (ptr_channel->buffer, "localvar_set_server",
server->name);
}
}
if (server->buffer)
{
buffer_name = irc_buffer_build_name (server->name, NULL);
weechat_buffer_set (server->buffer, "name", buffer_name);
weechat_buffer_set (server->buffer, "short_name", server->name);
weechat_buffer_set (server->buffer, "localvar_set_server",
server->name);
weechat_buffer_set (server->buffer, "localvar_set_channel",
server->name);
snprintf (charset_modifier, sizeof (charset_modifier),
"irc.%s", server->name);
weechat_buffer_set (server->buffer, "localvar_set_charset_modifier",
charset_modifier);
}
return 1;
}
/*
* Reorders list of servers.
*
* Returns the number of servers moved in the list (>= 0).
*/
int
irc_server_reorder (const char **servers, int num_servers)
{
struct t_irc_server *ptr_server, *ptr_server2;
int i, num_moved;
ptr_server = irc_servers;
num_moved = 0;
for (i = 0; ptr_server && (i < num_servers); i++)
{
for (ptr_server2 = ptr_server; ptr_server2;
ptr_server2 = ptr_server2->next_server)
{
if (strcmp (ptr_server2->name, servers[i]) == 0)
break;
}
if (ptr_server2 == ptr_server)
{
ptr_server = ptr_server->next_server;
}
else if (ptr_server2)
{
/* extract server from list */
if (ptr_server2 == irc_servers)
irc_servers = ptr_server2->next_server;
if (ptr_server2 == last_irc_server)
last_irc_server = ptr_server2->prev_server;
if (ptr_server2->prev_server)
(ptr_server2->prev_server)->next_server = ptr_server2->next_server;
if (ptr_server2->next_server)
(ptr_server2->next_server)->prev_server = ptr_server2->prev_server;
/* set pointers in ptr_server2 */
ptr_server2->prev_server = ptr_server->prev_server;
ptr_server2->next_server = ptr_server;
/* insert ptr_server2 before ptr_server */
if (ptr_server->prev_server)
(ptr_server->prev_server)->next_server = ptr_server2;
ptr_server->prev_server = ptr_server2;
/* adjust list of servers if needed */
if (ptr_server == irc_servers)
irc_servers = ptr_server2;
num_moved++;
}
}
return num_moved;
}
/*
* Sends a signal for an IRC message (received or sent).
*/
void
irc_server_send_signal (struct t_irc_server *server, const char *signal,
const char *command, const char *full_message,
const char *tags)
{
int length;
char *str_signal, *full_message_tags;
length = strlen (server->name) + 1 + strlen (signal) + 1 + strlen (command) + 1;
str_signal = malloc (length);
if (str_signal)
{
snprintf (str_signal, length,
"%s,%s_%s", server->name, signal, command);
if (tags)
{
length = strlen (tags) + 1 + strlen (full_message) + 1;
full_message_tags = malloc (length);
if (full_message_tags)
{
snprintf (full_message_tags, length,
"%s;%s", tags, full_message);
(void) weechat_hook_signal_send (str_signal,
WEECHAT_HOOK_SIGNAL_STRING,
(void *)full_message_tags);
free (full_message_tags);
}
}
else
{
(void) weechat_hook_signal_send (str_signal,
WEECHAT_HOOK_SIGNAL_STRING,
(void *)full_message);
}
free (str_signal);
}
}
/*
* Sends data to IRC server.
*
* Returns number of bytes sent, -1 if error.
*/
int
irc_server_send (struct t_irc_server *server, const char *buffer, int size_buf)
{
int rc;
if (!server)
{
weechat_printf (
NULL,
_("%s%s: sending data to server: null pointer (please report "
"problem to developers)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
return 0;
}
if (size_buf <= 0)
{
weechat_printf (
server->buffer,
_("%s%s: sending data to server: empty buffer (please report "
"problem to developers)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
return 0;
}
#ifdef HAVE_GNUTLS
if (server->ssl_connected)
rc = gnutls_record_send (server->gnutls_sess, buffer, size_buf);
else
#endif /* HAVE_GNUTLS */
rc = send (server->sock, buffer, size_buf, 0);
if (rc < 0)
{
#ifdef HAVE_GNUTLS
if (server->ssl_connected)
{
weechat_printf (
server->buffer,
_("%s%s: sending data to server: error %d %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
rc, gnutls_strerror (rc));
}
else
#endif /* HAVE_GNUTLS */
{
weechat_printf (
server->buffer,
_("%s%s: sending data to server: error %d %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
errno, strerror (errno));
}
}
return rc;
}
/*
* Sets default tags used when sending message.
*/
void
irc_server_set_send_default_tags (const char *tags)
{
irc_server_send_default_tags = tags;
}
/*
* Gets tags to send by concatenation of tags and irc_server_send_default_tags
* (if set).
*
* Note: result must be freed after use.
*/
char *
irc_server_get_tags_to_send (const char *tags)
{
int length;
char *buf;
if (!tags && !irc_server_send_default_tags)
return NULL;
if (!tags)
return strdup (irc_server_send_default_tags);
if (!irc_server_send_default_tags)
return strdup (tags);
/* concatenate tags and irc_server_send_default_tags */
length = strlen (tags) + 1 + strlen (irc_server_send_default_tags) + 1;
buf = malloc (length);
if (buf)
snprintf (buf, length, "%s,%s", tags, irc_server_send_default_tags);
return buf;
}
/*
* Sends a message from out queue.
*/
void
irc_server_outqueue_send (struct t_irc_server *server)
{
time_t time_now;
char *pos, *tags_to_send;
int priority, anti_flood;
time_now = time (NULL);
/* detect if system clock has been changed (now lower than before) */
if (server->last_user_message > time_now)
server->last_user_message = time_now;
for (priority = 0; priority < IRC_SERVER_NUM_OUTQUEUES_PRIO; priority++)
{
switch (priority)
{
case 0:
anti_flood = IRC_SERVER_OPTION_INTEGER(
server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_HIGH);
break;
default:
anti_flood = IRC_SERVER_OPTION_INTEGER(
server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_LOW);
break;
}
if (server->outqueue[priority]
&& (time_now >= server->last_user_message + anti_flood))
{
if (server->outqueue[priority]->message_before_mod)
{
pos = strchr (server->outqueue[priority]->message_before_mod,
'\r');
if (pos)
pos[0] = '\0';
irc_raw_print (server, IRC_RAW_FLAG_SEND,
server->outqueue[priority]->message_before_mod);
if (pos)
pos[0] = '\r';
}
if (server->outqueue[priority]->message_after_mod)
{
pos = strchr (server->outqueue[priority]->message_after_mod,
'\r');
if (pos)
pos[0] = '\0';
irc_raw_print (server, IRC_RAW_FLAG_SEND |
((server->outqueue[priority]->modified) ? IRC_RAW_FLAG_MODIFIED : 0),
server->outqueue[priority]->message_after_mod);
if (pos)
pos[0] = '\r';
/* send signal with command that will be sent to server */
irc_server_send_signal (
server, "irc_out",
server->outqueue[priority]->command,
server->outqueue[priority]->message_after_mod,
NULL);
tags_to_send = irc_server_get_tags_to_send (
server->outqueue[priority]->tags);
irc_server_send_signal (
server, "irc_outtags",
server->outqueue[priority]->command,
server->outqueue[priority]->message_after_mod,
(tags_to_send) ? tags_to_send : "");
if (tags_to_send)
free (tags_to_send);
/* send command */
irc_server_send (
server, server->outqueue[priority]->message_after_mod,
strlen (server->outqueue[priority]->message_after_mod));
server->last_user_message = time_now;
/* start redirection if redirect is set */
if (server->outqueue[priority]->redirect)
{
irc_redirect_init_command (
server->outqueue[priority]->redirect,
server->outqueue[priority]->message_after_mod);
}
}
irc_server_outqueue_free (server, priority,
server->outqueue[priority]);
break;
}
}
}
/*
* Sends one message to IRC server.
*
* If flag contains outqueue priority value, then messages are in a queue and
* sent slowly (to be sure there will not be any "excess flood"), value of
* queue_msg is priority:
* 1 = higher priority, for user messages
* 2 = lower priority, for other messages (like auto reply to CTCP queries)
*
* Returns:
* 1: OK
* 0: error
*/
int
irc_server_send_one_msg (struct t_irc_server *server, int flags,
const char *message, const char *nick,
const char *command, const char *channel,
const char *tags)
{
static char buffer[4096];
const char *ptr_msg, *ptr_chan_nick;
char *new_msg, *pos, *tags_to_send, *msg_encoded;
char str_modifier[128], modifier_data[256];
int rc, queue_msg, add_to_queue, first_message, anti_flood;
int pos_channel, pos_text, pos_encode;
time_t time_now;
struct t_irc_redirect *ptr_redirect;
rc = 1;
/* run modifier "irc_out_xxx" */
snprintf (str_modifier, sizeof (str_modifier),
"irc_out_%s",
(command) ? command : "unknown");
new_msg = weechat_hook_modifier_exec (str_modifier,
server->name,
message);
/* no changes in new message */
if (new_msg && (strcmp (message, new_msg) == 0))
{
free (new_msg);
new_msg = NULL;
}
/* message not dropped? */
if (!new_msg || new_msg[0])
{
first_message = 1;
ptr_msg = (new_msg) ? new_msg : message;
msg_encoded = NULL;
irc_message_parse (server, ptr_msg, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, &pos_channel,
&pos_text);
switch (IRC_SERVER_OPTION_INTEGER(server,
IRC_SERVER_OPTION_CHARSET_MESSAGE))
{
case IRC_SERVER_CHARSET_MESSAGE_MESSAGE:
pos_encode = 0;
break;
case IRC_SERVER_CHARSET_MESSAGE_CHANNEL:
pos_encode = (pos_channel >= 0) ? pos_channel : pos_text;
break;
case IRC_SERVER_CHARSET_MESSAGE_TEXT:
pos_encode = pos_text;
break;
default:
pos_encode = 0;
break;
}
if (pos_encode >= 0)
{
ptr_chan_nick = (channel) ? channel : nick;
if (ptr_chan_nick)
{
snprintf (modifier_data, sizeof (modifier_data),
"%s.%s.%s",
weechat_plugin->name,
server->name,
ptr_chan_nick);
}
else
{
snprintf (modifier_data, sizeof (modifier_data),
"%s.%s",
weechat_plugin->name,
server->name);
}
msg_encoded = irc_message_convert_charset (ptr_msg, pos_encode,
"charset_encode",
modifier_data);
}
if (msg_encoded)
ptr_msg = msg_encoded;
while (rc && ptr_msg && ptr_msg[0])
{
pos = strchr (ptr_msg, '\n');
if (pos)
pos[0] = '\0';
snprintf (buffer, sizeof (buffer), "%s\r\n", ptr_msg);
/* anti-flood: look whether we should queue outgoing message or not */
time_now = time (NULL);
/* detect if system clock has been changed (now lower than before) */
if (server->last_user_message > time_now)
server->last_user_message = time_now;
/* get queue from flags */
queue_msg = 0;
if (flags & IRC_SERVER_SEND_OUTQ_PRIO_HIGH)
queue_msg = 1;
else if (flags & IRC_SERVER_SEND_OUTQ_PRIO_LOW)
queue_msg = 2;
switch (queue_msg - 1)
{
case 0:
anti_flood = IRC_SERVER_OPTION_INTEGER(
server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_HIGH);
break;
default:
anti_flood = IRC_SERVER_OPTION_INTEGER(
server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_LOW);
break;
}
add_to_queue = 0;
if ((queue_msg > 0)
&& (server->outqueue[queue_msg - 1]
|| ((anti_flood > 0)
&& (time_now - server->last_user_message < anti_flood))))
{
add_to_queue = queue_msg;
}
tags_to_send = irc_server_get_tags_to_send (tags);
ptr_redirect = irc_redirect_search_available (server);
if (add_to_queue > 0)
{
/* queue message (do not send anything now) */
irc_server_outqueue_add (server, add_to_queue - 1, command,
(new_msg && first_message) ? message : NULL,
buffer,
(new_msg) ? 1 : 0,
tags_to_send,
ptr_redirect);
/* mark redirect as "used" */
if (ptr_redirect)
ptr_redirect->assigned_to_command = 1;
}
else
{
if (first_message)
{
irc_raw_print (server, IRC_RAW_FLAG_SEND, message);
}
if (new_msg)
{
irc_raw_print (server,
IRC_RAW_FLAG_SEND | IRC_RAW_FLAG_MODIFIED,
ptr_msg);
}
/* send signal with command that will be sent to server */
irc_server_send_signal (server, "irc_out",
(command) ? command : "unknown",
ptr_msg,
NULL);
irc_server_send_signal (server, "irc_outtags",
(command) ? command : "unknown",
ptr_msg,
(tags_to_send) ? tags_to_send : "");
if (irc_server_send (server, buffer, strlen (buffer)) <= 0)
rc = 0;
else
{
if (queue_msg > 0)
server->last_user_message = time_now;
}
if (ptr_redirect)
irc_redirect_init_command (ptr_redirect, buffer);
}
if (tags_to_send)
free (tags_to_send);
if (pos)
{
pos[0] = '\n';
ptr_msg = pos + 1;
}
else
ptr_msg = NULL;
first_message = 0;
}
if (msg_encoded)
free (msg_encoded);
}
else
{
irc_raw_print (server, IRC_RAW_FLAG_SEND | IRC_RAW_FLAG_MODIFIED,
_("(message dropped)"));
}
if (new_msg)
free (new_msg);
return rc;
}
/*
* Sends formatted data to IRC server.
*
* Many messages may be sent, separated by '\n'.
*
* If flags contains "IRC_SERVER_SEND_RETURN_HASHTABLE", then a hashtable with
* split of message is returned (see function irc_message_split() in
* irc-message.c)
*
* Note: hashtable must be freed after use.
*/
struct t_hashtable *
irc_server_sendf (struct t_irc_server *server, int flags, const char *tags,
const char *format, ...)
{
char **items, hash_key[32], value[32], *nick, *command, *channel, *new_msg;
char str_modifier[128];
const char *str_message, *str_args;
int i, items_count, number, ret_number, rc;
struct t_hashtable *hashtable, *ret_hashtable;
if (!server)
return NULL;
weechat_va_format (format);
if (!vbuffer)
return NULL;
ret_hashtable = NULL;
ret_number = 1;
if (flags & IRC_SERVER_SEND_RETURN_HASHTABLE)
{
ret_hashtable = weechat_hashtable_new (32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_STRING,
NULL, NULL);
}
rc = 1;
items = weechat_string_split (vbuffer, "\n", NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0, &items_count);
for (i = 0; i < items_count; i++)
{
/* run modifier "irc_out1_xxx" (like "irc_out_xxx", but before split) */
irc_message_parse (server, items[i], NULL, NULL,
&nick, NULL, NULL, &command, &channel, NULL, NULL,
NULL, NULL, NULL, NULL);
snprintf (str_modifier, sizeof (str_modifier),
"irc_out1_%s",
(command) ? command : "unknown");
new_msg = weechat_hook_modifier_exec (str_modifier,
server->name,
items[i]);
/* no changes in new message */
if (new_msg && (strcmp (items[i], new_msg) == 0))
{
free (new_msg);
new_msg = NULL;
}
/* message not dropped? */
if (!new_msg || new_msg[0])
{
/* send signal with command that will be sent to server (before split) */
irc_server_send_signal (server, "irc_out1",
(command) ? command : "unknown",
(new_msg) ? new_msg : items[i],
NULL);
/*
* split message if needed (max is 512 bytes by default,
* including the final "\r\n")
*/
hashtable = irc_message_split (server,
(new_msg) ? new_msg : items[i]);
if (hashtable)
{
number = 1;
while (1)
{
snprintf (hash_key, sizeof (hash_key), "msg%d", number);
str_message = weechat_hashtable_get (hashtable, hash_key);
if (!str_message)
break;
snprintf (hash_key, sizeof (hash_key), "args%d", number);
str_args = weechat_hashtable_get (hashtable, hash_key);
rc = irc_server_send_one_msg (server, flags, str_message,
nick, command, channel, tags);
if (!rc)
break;
if (ret_hashtable)
{
snprintf (hash_key, sizeof (hash_key),
"msg%d", ret_number);
weechat_hashtable_set (ret_hashtable,
hash_key, str_message);
if (str_args)
{
snprintf (hash_key, sizeof (hash_key),
"args%d", ret_number);
weechat_hashtable_set (ret_hashtable,
hash_key, str_args);
}
ret_number++;
}
number++;
}
if (ret_hashtable)
{
snprintf (value, sizeof (value), "%d", ret_number - 1);
weechat_hashtable_set (ret_hashtable, "count", value);
}
weechat_hashtable_free (hashtable);
if (!rc)
break;
}
}
if (nick)
free (nick);
if (command)
free (command);
if (channel)
free (channel);
if (new_msg)
free (new_msg);
}
if (items)
weechat_string_free_split (items);
free (vbuffer);
return ret_hashtable;
}
/*
* Adds a message to received messages queue (at the end).
*/
void
irc_server_msgq_add_msg (struct t_irc_server *server, const char *msg)
{
struct t_irc_message *message;
if (!server->unterminated_message && !msg[0])
return;
message = malloc (sizeof (*message));
if (!message)
{
weechat_printf (server->buffer,
_("%s%s: not enough memory for received message"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
return;
}
message->server = server;
if (server->unterminated_message)
{
message->data = malloc (strlen (server->unterminated_message) +
strlen (msg) + 1);
if (!message->data)
{
weechat_printf (server->buffer,
_("%s%s: not enough memory for received message"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
}
else
{
strcpy (message->data, server->unterminated_message);
strcat (message->data, msg);
}
free (server->unterminated_message);
server->unterminated_message = NULL;
}
else
message->data = strdup (msg);
message->next_message = NULL;
if (irc_msgq_last_msg)
{
irc_msgq_last_msg->next_message = message;
irc_msgq_last_msg = message;
}
else
{
irc_recv_msgq = message;
irc_msgq_last_msg = message;
}
}
/*
* Adds an unterminated message to queue.
*/
void
irc_server_msgq_add_unterminated (struct t_irc_server *server,
const char *string)
{
char *unterminated_message2;
if (!string[0])
return;
if (server->unterminated_message)
{
unterminated_message2 =
realloc (server->unterminated_message,
(strlen (server->unterminated_message) +
strlen (string) + 1));
if (!unterminated_message2)
{
weechat_printf (server->buffer,
_("%s%s: not enough memory for received message"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
free (server->unterminated_message);
server->unterminated_message = NULL;
return;
}
server->unterminated_message = unterminated_message2;
strcat (server->unterminated_message, string);
}
else
{
server->unterminated_message = strdup (string);
if (!server->unterminated_message)
{
weechat_printf (server->buffer,
_("%s%s: not enough memory for received message"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
}
}
}
/*
* Splits received buffer, creating queued messages.
*/
void
irc_server_msgq_add_buffer (struct t_irc_server *server, const char *buffer)
{
char *pos_cr, *pos_lf;
while (buffer[0])
{
pos_cr = strchr (buffer, '\r');
pos_lf = strchr (buffer, '\n');
if (!pos_cr && !pos_lf)
{
/* no CR/LF found => add to unterminated and return */
irc_server_msgq_add_unterminated (server, buffer);
return;
}
if (pos_cr && ((!pos_lf) || (pos_lf > pos_cr)))
{
/* found '\r' first => ignore this char */
pos_cr[0] = '\0';
irc_server_msgq_add_unterminated (server, buffer);
buffer = pos_cr + 1;
}
else
{
/* found: '\n' first => terminate message */
pos_lf[0] = '\0';
irc_server_msgq_add_msg (server, buffer);
buffer = pos_lf + 1;
}
}
}
/*
* Flushes message queue.
*/
void
irc_server_msgq_flush ()
{
struct t_irc_message *next;
char *ptr_data, *new_msg, *new_msg2, *ptr_msg, *ptr_msg2, *pos;
char *nick, *host, *command, *channel, *arguments;
char *msg_decoded, *msg_decoded_without_color;
char str_modifier[128], modifier_data[256];
int pos_channel, pos_text, pos_decode;
while (irc_recv_msgq)
{
if (irc_recv_msgq->data)
{
/* read message only if connection was not lost */
if (irc_recv_msgq->server->sock != -1)
{
ptr_data = irc_recv_msgq->data;
while (ptr_data[0] == ' ')
{
ptr_data++;
}
if (ptr_data[0])
{
irc_raw_print (irc_recv_msgq->server, IRC_RAW_FLAG_RECV,
ptr_data);
irc_message_parse (irc_recv_msgq->server,
ptr_data, NULL, NULL, NULL, NULL, NULL,
&command, NULL, NULL, NULL, NULL, NULL,
NULL, NULL);
snprintf (str_modifier, sizeof (str_modifier),
"irc_in_%s",
(command) ? command : "unknown");
new_msg = weechat_hook_modifier_exec (
str_modifier,
irc_recv_msgq->server->name,
ptr_data);
if (command)
free (command);
/* no changes in new message */
if (new_msg && (strcmp (ptr_data, new_msg) == 0))
{
free (new_msg);
new_msg = NULL;
}
/* message not dropped? */
if (!new_msg || new_msg[0])
{
/* use new message (returned by plugin) */
ptr_msg = (new_msg) ? new_msg : ptr_data;
while (ptr_msg && ptr_msg[0])
{
pos = strchr (ptr_msg, '\n');
if (pos)
pos[0] = '\0';
if (new_msg)
{
irc_raw_print (
irc_recv_msgq->server,
IRC_RAW_FLAG_RECV | IRC_RAW_FLAG_MODIFIED,
ptr_msg);
}
irc_message_parse (irc_recv_msgq->server, ptr_msg,
NULL, NULL, &nick, NULL, &host,
&command, &channel, &arguments,
NULL, NULL, NULL,
&pos_channel, &pos_text);
msg_decoded = NULL;
switch (IRC_SERVER_OPTION_INTEGER(irc_recv_msgq->server,
IRC_SERVER_OPTION_CHARSET_MESSAGE))
{
case IRC_SERVER_CHARSET_MESSAGE_MESSAGE:
pos_decode = 0;
break;
case IRC_SERVER_CHARSET_MESSAGE_CHANNEL:
pos_decode = (pos_channel >= 0) ? pos_channel : pos_text;
break;
case IRC_SERVER_CHARSET_MESSAGE_TEXT:
pos_decode = pos_text;
break;
default:
pos_decode = 0;
break;
}
if (pos_decode >= 0)
{
/* convert charset for message */
if (channel
&& irc_channel_is_channel (irc_recv_msgq->server,
channel))
{
snprintf (modifier_data, sizeof (modifier_data),
"%s.%s.%s",
weechat_plugin->name,
irc_recv_msgq->server->name,
channel);
}
else
{
if (nick && (!host || (strcmp (nick, host) != 0)))
{
snprintf (modifier_data,
sizeof (modifier_data),
"%s.%s.%s",
weechat_plugin->name,
irc_recv_msgq->server->name,
nick);
}
else
{
snprintf (modifier_data,
sizeof (modifier_data),
"%s.%s",
weechat_plugin->name,
irc_recv_msgq->server->name);
}
}
msg_decoded = irc_message_convert_charset (
ptr_msg, pos_decode,
"charset_decode", modifier_data);
}
/* replace WeeChat internal color codes by "?" */
msg_decoded_without_color =
weechat_string_remove_color (
(msg_decoded) ? msg_decoded : ptr_msg,
"?");
/* call modifier after charset */
ptr_msg2 = (msg_decoded_without_color) ?
msg_decoded_without_color : ((msg_decoded) ? msg_decoded : ptr_msg);
snprintf (str_modifier, sizeof (str_modifier),
"irc_in2_%s",
(command) ? command : "unknown");
new_msg2 = weechat_hook_modifier_exec (
str_modifier,
irc_recv_msgq->server->name,
ptr_msg2);
if (new_msg2 && (strcmp (ptr_msg2, new_msg2) == 0))
{
free (new_msg2);
new_msg2 = NULL;
}
/* message not dropped? */
if (!new_msg2 || new_msg2[0])
{
/* use new message (returned by plugin) */
if (new_msg2)
ptr_msg2 = new_msg2;
/* parse and execute command */
if (irc_redirect_message (irc_recv_msgq->server,
ptr_msg2, command,
arguments))
{
/* message redirected, we'll not display it! */
}
else
{
/* message not redirected, display it */
irc_protocol_recv_command (
irc_recv_msgq->server,
ptr_msg2,
command,
channel);
}
}
if (new_msg2)
free (new_msg2);
if (nick)
free (nick);
if (host)
free (host);
if (command)
free (command);
if (channel)
free (channel);
if (arguments)
free (arguments);
if (msg_decoded)
free (msg_decoded);
if (msg_decoded_without_color)
free (msg_decoded_without_color);
if (pos)
{
pos[0] = '\n';
ptr_msg = pos + 1;
}
else
ptr_msg = NULL;
}
}
else
{
irc_raw_print (irc_recv_msgq->server,
IRC_RAW_FLAG_RECV | IRC_RAW_FLAG_MODIFIED,
_("(message dropped)"));
}
if (new_msg)
free (new_msg);
}
}
free (irc_recv_msgq->data);
}
next = irc_recv_msgq->next_message;
free (irc_recv_msgq);
irc_recv_msgq = next;
if (!irc_recv_msgq)
irc_msgq_last_msg = NULL;
}
}
/*
* Receives data from a server.
*/
int
irc_server_recv_cb (const void *pointer, void *data, int fd)
{
struct t_irc_server *server;
static char buffer[4096 + 2];
int num_read, msgq_flush, end_recv;
/* make C compiler happy */
(void) data;
(void) fd;
server = (struct t_irc_server *)pointer;
if (!server)
return WEECHAT_RC_ERROR;
msgq_flush = 0;
end_recv = 0;
while (!end_recv)
{
end_recv = 1;
#ifdef HAVE_GNUTLS
if (server->ssl_connected)
num_read = gnutls_record_recv (server->gnutls_sess, buffer,
sizeof (buffer) - 2);
else
#endif /* HAVE_GNUTLS */
num_read = recv (server->sock, buffer, sizeof (buffer) - 2, 0);
if (num_read > 0)
{
buffer[num_read] = '\0';
irc_server_msgq_add_buffer (server, buffer);
msgq_flush = 1; /* the flush will be done after the loop */
#ifdef HAVE_GNUTLS
if (server->ssl_connected
&& (gnutls_record_check_pending (server->gnutls_sess) > 0))
{
/*
* if there are unread data in the gnutls buffers,
* go on with recv
*/
end_recv = 0;
}
#endif /* HAVE_GNUTLS */
}
else
{
#ifdef HAVE_GNUTLS
if (server->ssl_connected)
{
if ((num_read == 0)
|| ((num_read != GNUTLS_E_AGAIN)
&& (num_read != GNUTLS_E_INTERRUPTED)))
{
weechat_printf (
server->buffer,
_("%s%s: reading data on socket: error %d %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
num_read,
(num_read == 0) ? _("(connection closed by peer)") :
gnutls_strerror (num_read));
weechat_printf (
server->buffer,
_("%s%s: disconnecting from server..."),
weechat_prefix ("network"), IRC_PLUGIN_NAME);
irc_server_disconnect (server, !server->is_connected, 1);
}
}
else
#endif /* HAVE_GNUTLS */
{
if ((num_read == 0)
|| ((errno != EAGAIN) && (errno != EWOULDBLOCK)))
{
weechat_printf (
server->buffer,
_("%s%s: reading data on socket: error %d %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
errno,
(num_read == 0) ? _("(connection closed by peer)") :
strerror (errno));
weechat_printf (
server->buffer,
_("%s%s: disconnecting from server..."),
weechat_prefix ("network"), IRC_PLUGIN_NAME);
irc_server_disconnect (server, !server->is_connected, 1);
}
}
}
}
if (msgq_flush)
irc_server_msgq_flush ();
return WEECHAT_RC_OK;
}
/*
* Callback for server connection: it is called if WeeChat is TCP-connected to
* server, but did not receive message 001.
*/
int
irc_server_timer_connection_cb (const void *pointer, void *data,
int remaining_calls)
{
struct t_irc_server *server;
/* make C compiler happy */
(void) data;
(void) remaining_calls;
server = (struct t_irc_server *)pointer;
if (!server)
return WEECHAT_RC_ERROR;
server->hook_timer_connection = NULL;
if (!server->is_connected)
{
weechat_printf (
server->buffer,
_("%s%s: connection timeout (message 001 not received)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
irc_server_disconnect (server, !server->is_connected, 1);
}
return WEECHAT_RC_OK;
}
/*
* Callback for SASL authentication timer: it is called if there is a timeout
* with SASL authentication (if SASL authentication is OK or failed, then hook
* timer is removed before this callback is called).
*/
int
irc_server_timer_sasl_cb (const void *pointer, void *data, int remaining_calls)
{
struct t_irc_server *server;
int sasl_fail;
/* make C compiler happy */
(void) data;
(void) remaining_calls;
server = (struct t_irc_server *)pointer;
if (!server)
return WEECHAT_RC_ERROR;
server->hook_timer_sasl = NULL;
if (!server->is_connected)
{
weechat_printf (server->buffer,
_("%s%s: SASL authentication timeout"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
sasl_fail = IRC_SERVER_OPTION_INTEGER(server,
IRC_SERVER_OPTION_SASL_FAIL);
if ((sasl_fail == IRC_SERVER_SASL_FAIL_RECONNECT)
|| (sasl_fail == IRC_SERVER_SASL_FAIL_DISCONNECT))
{
irc_server_disconnect (
server, 0,
(sasl_fail == IRC_SERVER_SASL_FAIL_RECONNECT) ? 1 : 0);
}
else
irc_server_sendf (server, 0, NULL, "CAP END");
}
return WEECHAT_RC_OK;
}
/*
* Callback called for each manual join of a server: deletes old channels in the
* hashtable.
*/
void
irc_server_check_join_manual_cb (void *data,
struct t_hashtable *hashtable,
const void *key, const void *value)
{
/* make C compiler happy */
(void) data;
if (*((time_t *)value) + (60 * 10) < time (NULL))
weechat_hashtable_remove (hashtable, key);
}
/*
* Callback called for each join without switch of a server: deletes old channel
* in the hashtable.
*/
void
irc_server_check_join_noswitch_cb (void *data,
struct t_hashtable *hashtable,
const void *key, const void *value)
{
/* make C compiler happy */
(void) data;
if (*((time_t *)value) + (60 * 10) < time (NULL))
weechat_hashtable_remove (hashtable, key);
}
/*
* Callback called for each smart filtered join of a channel: deletes old
* entries in the hashtable.
*/
void
irc_server_check_join_smart_filtered_cb (void *data,
struct t_hashtable *hashtable,
const void *key, const void *value)
{
int unmask_delay;
/* make C compiler happy */
(void) data;
unmask_delay = weechat_config_integer (irc_config_look_smart_filter_join_unmask);
if ((unmask_delay == 0)
|| (*((time_t *)value) < time (NULL) - (unmask_delay * 60)))
{
weechat_hashtable_remove (hashtable, key);
}
}
/*
* Timer called each second to perform some operations on servers.
*/
int
irc_server_timer_cb (const void *pointer, void *data, int remaining_calls)
{
struct t_irc_server *ptr_server;
struct t_irc_channel *ptr_channel;
struct t_irc_redirect *ptr_redirect, *ptr_next_redirect;
time_t current_time;
static struct timeval tv;
int away_check, refresh_lag;
/* make C compiler happy */
(void) pointer;
(void) data;
(void) remaining_calls;
current_time = time (NULL);
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
/* check if reconnection is pending */
if ((!ptr_server->is_connected)
&& (ptr_server->reconnect_start > 0)
&& (current_time >= (ptr_server->reconnect_start + ptr_server->reconnect_delay)))
{
irc_server_reconnect (ptr_server);
}
else
{
if (!ptr_server->is_connected)
continue;
/* send queued messages */
irc_server_outqueue_send (ptr_server);
/* check for lag */
if ((weechat_config_integer (irc_config_network_lag_check) > 0)
&& (ptr_server->lag_check_time.tv_sec == 0)
&& (current_time >= ptr_server->lag_next_check))
{
irc_server_sendf (ptr_server, 0, NULL, "PING %s",
(ptr_server->current_address) ?
ptr_server->current_address : "weechat");
gettimeofday (&(ptr_server->lag_check_time), NULL);
ptr_server->lag = 0;
ptr_server->lag_last_refresh = 0;
}
else
{
/* check away (only if lag check was not done) */
away_check = IRC_SERVER_OPTION_INTEGER(
ptr_server, IRC_SERVER_OPTION_AWAY_CHECK);
if (!weechat_hashtable_has_key (ptr_server->cap_list,
"away-notify")
&& (away_check > 0)
&& ((ptr_server->last_away_check == 0)
|| (current_time >= ptr_server->last_away_check + (away_check * 60))))
{
irc_server_check_away (ptr_server);
}
}
/* check if it's time to autojoin channels (after command delay) */
if ((ptr_server->command_time != 0)
&& (current_time >= ptr_server->command_time +
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_COMMAND_DELAY)))
{
irc_server_autojoin_channels (ptr_server);
ptr_server->command_time = 0;
}
/* check if it's time to send MONITOR command */
if ((ptr_server->monitor_time != 0)
&& (current_time >= ptr_server->monitor_time))
{
if (ptr_server->monitor > 0)
irc_notify_send_monitor (ptr_server);
ptr_server->monitor_time = 0;
}
/* compute lag */
if (ptr_server->lag_check_time.tv_sec != 0)
{
refresh_lag = 0;
gettimeofday (&tv, NULL);
ptr_server->lag = (int)(weechat_util_timeval_diff (&(ptr_server->lag_check_time),
&tv) / 1000);
/* refresh lag item if needed */
if (((ptr_server->lag_last_refresh == 0)
|| (current_time >= ptr_server->lag_last_refresh + weechat_config_integer (irc_config_network_lag_refresh_interval)))
&& (ptr_server->lag >= weechat_config_integer (irc_config_network_lag_min_show)))
{
ptr_server->lag_last_refresh = current_time;
if (ptr_server->lag != ptr_server->lag_displayed)
{
ptr_server->lag_displayed = ptr_server->lag;
refresh_lag = 1;
}
}
/* lag timeout? => disconnect */
if ((weechat_config_integer (irc_config_network_lag_reconnect) > 0)
&& (ptr_server->lag >= weechat_config_integer (irc_config_network_lag_reconnect) * 1000))
{
weechat_printf (
ptr_server->buffer,
_("%s%s: lag is high, reconnecting to server %s%s%s"),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
IRC_COLOR_CHAT_SERVER,
ptr_server->name,
IRC_COLOR_RESET);
irc_server_disconnect (ptr_server, 0, 1);
}
else
{
/* stop lag counting if max lag is reached */
if ((weechat_config_integer (irc_config_network_lag_max) > 0)
&& (ptr_server->lag >= (weechat_config_integer (irc_config_network_lag_max) * 1000)))
{
/* refresh lag item */
ptr_server->lag_last_refresh = current_time;
if (ptr_server->lag != ptr_server->lag_displayed)
{
ptr_server->lag_displayed = ptr_server->lag;
refresh_lag = 1;
}
/* schedule next lag check in 5 seconds */
ptr_server->lag_check_time.tv_sec = 0;
ptr_server->lag_check_time.tv_usec = 0;
ptr_server->lag_next_check = time (NULL) +
weechat_config_integer (irc_config_network_lag_check);
}
}
if (refresh_lag)
irc_server_set_lag (ptr_server);
}
/* remove redirects if timeout occurs */
ptr_redirect = ptr_server->redirects;
while (ptr_redirect)
{
ptr_next_redirect = ptr_redirect->next_redirect;
if ((ptr_redirect->start_time > 0)
&& (ptr_redirect->start_time + ptr_redirect->timeout < current_time))
{
irc_redirect_stop (ptr_redirect, "timeout");
}
ptr_redirect = ptr_next_redirect;
}
/* purge some data (every 10 minutes) */
if (current_time > ptr_server->last_data_purge + (60 * 10))
{
weechat_hashtable_map (ptr_server->join_manual,
&irc_server_check_join_manual_cb,
NULL);
weechat_hashtable_map (ptr_server->join_noswitch,
&irc_server_check_join_noswitch_cb,
NULL);
for (ptr_channel = ptr_server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if (ptr_channel->join_smart_filtered)
{
weechat_hashtable_map (ptr_channel->join_smart_filtered,
&irc_server_check_join_smart_filtered_cb,
NULL);
}
}
ptr_server->last_data_purge = current_time;
}
}
}
return WEECHAT_RC_OK;
}
/*
* Closes server connection.
*/
void
irc_server_close_connection (struct t_irc_server *server)
{
int i;
if (server->hook_timer_connection)
{
weechat_unhook (server->hook_timer_connection);
server->hook_timer_connection = NULL;
}
if (server->hook_timer_sasl)
{
weechat_unhook (server->hook_timer_sasl);
server->hook_timer_sasl = NULL;
}
if (server->hook_fd)
{
weechat_unhook (server->hook_fd);
server->hook_fd = NULL;
}
if (server->hook_connect)
{
weechat_unhook (server->hook_connect);
server->hook_connect = NULL;
}
else
{
#ifdef HAVE_GNUTLS
/* close SSL connection */
if (server->ssl_connected)
{
if (server->sock != -1)
gnutls_bye (server->gnutls_sess, GNUTLS_SHUT_WR);
gnutls_deinit (server->gnutls_sess);
}
#endif /* HAVE_GNUTLS */
}
if (server->sock != -1)
{
#ifdef _WIN32
closesocket (server->sock);
#else
close (server->sock);
#endif /* _WIN32 */
server->sock = -1;
}
/* free any pending message */
if (server->unterminated_message)
{
free (server->unterminated_message);
server->unterminated_message = NULL;
}
for (i = 0; i < IRC_SERVER_NUM_OUTQUEUES_PRIO; i++)
{
irc_server_outqueue_free_all (server, i);
}
/* remove all redirects */
irc_redirect_free_all (server);
/* remove all manual joins */
weechat_hashtable_remove_all (server->join_manual);
/* remove all keys for pending joins */
weechat_hashtable_remove_all (server->join_channel_key);
/* remove all keys for joins without switch */
weechat_hashtable_remove_all (server->join_noswitch);
/* server is now disconnected */
server->is_connected = 0;
server->ssl_connected = 0;
}
/*
* Schedules reconnection on server.
*/
void
irc_server_reconnect_schedule (struct t_irc_server *server)
{
int minutes, seconds;
if (IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_AUTORECONNECT))
{
/* growing reconnect delay */
if (server->reconnect_delay == 0)
server->reconnect_delay = IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_AUTORECONNECT_DELAY);
else
server->reconnect_delay = server->reconnect_delay * weechat_config_integer (irc_config_network_autoreconnect_delay_growing);
if ((weechat_config_integer (irc_config_network_autoreconnect_delay_max) > 0)
&& (server->reconnect_delay > weechat_config_integer (irc_config_network_autoreconnect_delay_max)))
server->reconnect_delay = weechat_config_integer (irc_config_network_autoreconnect_delay_max);
server->reconnect_start = time (NULL);
minutes = server->reconnect_delay / 60;
seconds = server->reconnect_delay % 60;
if ((minutes > 0) && (seconds > 0))
{
weechat_printf (
server->buffer,
_("%s%s: reconnecting to server in %d %s, %d %s"),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
minutes,
NG_("minute", "minutes", minutes),
seconds,
NG_("second", "seconds", seconds));
}
else if (minutes > 0)
{
weechat_printf (
server->buffer,
_("%s%s: reconnecting to server in %d %s"),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
minutes,
NG_("minute", "minutes", minutes));
}
else
{
weechat_printf (
server->buffer,
_("%s%s: reconnecting to server in %d %s"),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
seconds,
NG_("second", "seconds", seconds));
}
}
else
{
server->reconnect_delay = 0;
server->reconnect_start = 0;
}
}
/*
* Logins to server.
*/
void
irc_server_login (struct t_irc_server *server)
{
const char *capabilities;
char *password, *username, *realname, *username2;
password = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_PASSWORD));
username = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_USERNAME));
realname = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_REALNAME));
capabilities = IRC_SERVER_OPTION_STRING(
server, IRC_SERVER_OPTION_CAPABILITIES);
if (password && password[0])
{
irc_server_sendf (
server, 0, NULL,
"PASS %s%s",
((password[0] == ':') || (strchr (password, ' '))) ? ":" : "",
password);
}
if (!server->nick)
{
irc_server_set_nick (server,
(server->nicks_array) ?
server->nicks_array[0] : "weechat");
server->nick_first_tried = 0;
}
else
server->nick_first_tried = irc_server_get_nick_index (server);
server->nick_alternate_number = -1;
if (irc_server_sasl_enabled (server) || (capabilities && capabilities[0]))
{
irc_server_sendf (server, 0, NULL, "CAP LS " IRC_SERVER_VERSION_CAP);
}
username2 = (username && username[0]) ?
weechat_string_replace (username, " ", "_") : strdup ("weechat");
irc_server_sendf (
server, 0, NULL,
"NICK %s%s\n"
"USER %s 0 * :%s",
(server->nick && strchr (server->nick, ':')) ? ":" : "",
server->nick,
(username2) ? username2 : "weechat",
(realname && realname[0]) ? realname : ((username2) ? username2 : "weechat"));
if (username2)
free (username2);
if (server->hook_timer_connection)
weechat_unhook (server->hook_timer_connection);
server->hook_timer_connection = weechat_hook_timer (
IRC_SERVER_OPTION_INTEGER (server, IRC_SERVER_OPTION_CONNECTION_TIMEOUT) * 1000,
0, 1,
&irc_server_timer_connection_cb,
server, NULL);
if (password)
free (password);
if (username)
free (username);
if (realname)
free (realname);
}
/*
* Switches address and tries another (called if connection failed with an
* address/port).
*/
void
irc_server_switch_address (struct t_irc_server *server, int connection)
{
if (server->addresses_count > 1)
{
irc_server_set_index_current_address (
server,
(server->index_current_address + 1) % server->addresses_count);
weechat_printf (
server->buffer,
_("%s%s: switching address to %s/%d"),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
server->current_address,
server->current_port);
if (connection)
{
if (server->index_current_address == 0)
irc_server_reconnect_schedule (server);
else
irc_server_connect (server);
}
}
else
{
if (connection)
irc_server_reconnect_schedule (server);
}
}
/*
* Reads connection status.
*/
int
irc_server_connect_cb (const void *pointer, void *data,
int status, int gnutls_rc, int sock,
const char *error, const char *ip_address)
{
struct t_irc_server *server;
const char *proxy;
/* make C compiler happy */
(void) data;
server = (struct t_irc_server *)pointer;
proxy = IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_PROXY);
server->hook_connect = NULL;
server->sock = sock;
switch (status)
{
case WEECHAT_HOOK_CONNECT_OK:
/* set IP */
if (server->current_ip)
free (server->current_ip);
server->current_ip = (ip_address) ? strdup (ip_address) : NULL;
weechat_printf (
server->buffer,
_("%s%s: connected to %s/%d (%s)"),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
server->current_address,
server->current_port,
(server->current_ip) ? server->current_ip : "?");
server->hook_fd = weechat_hook_fd (server->sock,
1, 0, 0,
&irc_server_recv_cb,
server, NULL);
/* login to server */
irc_server_login (server);
break;
case WEECHAT_HOOK_CONNECT_ADDRESS_NOT_FOUND:
weechat_printf (
server->buffer,
(proxy && proxy[0]) ?
_("%s%s: proxy address \"%s\" not found") :
_("%s%s: address \"%s\" not found"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
server->current_address);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
irc_server_switch_address (server, 1);
break;
case WEECHAT_HOOK_CONNECT_IP_ADDRESS_NOT_FOUND:
weechat_printf (
server->buffer,
(proxy && proxy[0]) ?
_("%s%s: proxy IP address not found") :
_("%s%s: IP address not found"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
irc_server_switch_address (server, 1);
break;
case WEECHAT_HOOK_CONNECT_CONNECTION_REFUSED:
weechat_printf (
server->buffer,
(proxy && proxy[0]) ?
_("%s%s: proxy connection refused") :
_("%s%s: connection refused"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
server->current_retry++;
irc_server_switch_address (server, 1);
break;
case WEECHAT_HOOK_CONNECT_PROXY_ERROR:
weechat_printf (
server->buffer,
_("%s%s: proxy fails to establish connection to server (check "
"username/password if used and if server address/port is "
"allowed by proxy)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
irc_server_switch_address (server, 1);
break;
case WEECHAT_HOOK_CONNECT_LOCAL_HOSTNAME_ERROR:
weechat_printf (
server->buffer,
_("%s%s: unable to set local hostname/IP"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
irc_server_reconnect_schedule (server);
break;
case WEECHAT_HOOK_CONNECT_GNUTLS_INIT_ERROR:
weechat_printf (
server->buffer,
_("%s%s: TLS init error"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
server->current_retry++;
irc_server_reconnect_schedule (server);
break;
case WEECHAT_HOOK_CONNECT_GNUTLS_HANDSHAKE_ERROR:
weechat_printf (
server->buffer,
_("%s%s: TLS handshake failed"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
#ifdef HAVE_GNUTLS
if (gnutls_rc == GNUTLS_E_DH_PRIME_UNACCEPTABLE)
{
weechat_printf (
server->buffer,
_("%s%s: you should play with option "
"irc.server.%s.ssl_dhkey_size (current value is %d, try "
"a lower value like %d or %d)"),
weechat_prefix ("error"),
IRC_PLUGIN_NAME,
server->name,
IRC_SERVER_OPTION_INTEGER (
server, IRC_SERVER_OPTION_SSL_DHKEY_SIZE),
IRC_SERVER_OPTION_INTEGER (
server, IRC_SERVER_OPTION_SSL_DHKEY_SIZE) / 2,
IRC_SERVER_OPTION_INTEGER (
server, IRC_SERVER_OPTION_SSL_DHKEY_SIZE) / 4);
}
#else
(void) gnutls_rc;
#endif /* HAVE_GNUTLS */
irc_server_close_connection (server);
server->current_retry++;
irc_server_switch_address (server, 1);
break;
case WEECHAT_HOOK_CONNECT_MEMORY_ERROR:
weechat_printf (
server->buffer,
_("%s%s: not enough memory (%s)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
(error) ? error : "-");
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
irc_server_reconnect_schedule (server);
break;
case WEECHAT_HOOK_CONNECT_TIMEOUT:
weechat_printf (
server->buffer,
_("%s%s: timeout"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
server->current_retry++;
irc_server_switch_address (server, 1);
break;
case WEECHAT_HOOK_CONNECT_SOCKET_ERROR:
weechat_printf (
server->buffer,
_("%s%s: unable to create socket"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
if (error && error[0])
{
weechat_printf (
server->buffer,
_("%s%s: error: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, error);
}
irc_server_close_connection (server);
server->current_retry++;
irc_server_reconnect_schedule (server);
break;
}
return WEECHAT_RC_OK;
}
/*
* Sets the title for a server buffer.
*/
void
irc_server_set_buffer_title (struct t_irc_server *server)
{
char *title;
int length;
if (server && server->buffer)
{
if (server->is_connected)
{
length = 16 +
((server->current_address) ? strlen (server->current_address) : 16) +
16 + ((server->current_ip) ? strlen (server->current_ip) : 16) + 1;
title = malloc (length);
if (title)
{
snprintf (title, length, "IRC: %s/%d (%s)",
server->current_address,
server->current_port,
(server->current_ip) ? server->current_ip : "");
weechat_buffer_set (server->buffer, "title", title);
free (title);
}
}
else
{
weechat_buffer_set (server->buffer, "title", "");
}
}
}
/*
* Creates a buffer for a server.
*
* Returns pointer to buffer, NULL if error.
*/
struct t_gui_buffer *
irc_server_create_buffer (struct t_irc_server *server)
{
char buffer_name[256], charset_modifier[256];
struct t_gui_buffer *ptr_buffer_for_merge;
ptr_buffer_for_merge = NULL;
switch (weechat_config_integer (irc_config_look_server_buffer))
{
case IRC_CONFIG_LOOK_SERVER_BUFFER_MERGE_WITH_CORE:
/* merge with WeeChat core buffer */
ptr_buffer_for_merge = weechat_buffer_search_main ();
break;
case IRC_CONFIG_LOOK_SERVER_BUFFER_MERGE_WITHOUT_CORE:
/* find buffer used to merge all IRC server buffers */
ptr_buffer_for_merge = irc_buffer_search_server_lowest_number ();
break;
}
snprintf (buffer_name, sizeof (buffer_name),
"server.%s", server->name);
server->buffer = weechat_buffer_new (buffer_name,
&irc_input_data_cb, NULL, NULL,
&irc_buffer_close_cb, NULL, NULL);
if (!server->buffer)
return NULL;
if (!weechat_buffer_get_integer (server->buffer, "short_name_is_set"))
weechat_buffer_set (server->buffer, "short_name", server->name);
weechat_buffer_set (server->buffer, "localvar_set_type", "server");
weechat_buffer_set (server->buffer, "localvar_set_server", server->name);
weechat_buffer_set (server->buffer, "localvar_set_channel", server->name);
snprintf (charset_modifier, sizeof (charset_modifier),
"irc.%s", server->name);
weechat_buffer_set (server->buffer, "localvar_set_charset_modifier",
charset_modifier);
(void) weechat_hook_signal_send ("logger_backlog",
WEECHAT_HOOK_SIGNAL_POINTER,
server->buffer);
if (weechat_config_boolean (irc_config_network_send_unknown_commands))
weechat_buffer_set (server->buffer, "input_get_unknown_commands", "1");
/* set highlights settings on server buffer */
weechat_buffer_set (server->buffer, "highlight_words_add",
weechat_config_string (irc_config_look_highlight_server));
if (weechat_config_string (irc_config_look_highlight_tags_restrict)
&& weechat_config_string (irc_config_look_highlight_tags_restrict)[0])
{
weechat_buffer_set (
server->buffer, "highlight_tags_restrict",
weechat_config_string (irc_config_look_highlight_tags_restrict));
}
irc_server_set_buffer_title (server);
/*
* merge buffer if needed: if merge with(out) core set, and if no layout
* number is assigned for this buffer (if layout number is assigned, then
* buffer was already moved/merged by WeeChat core)
*/
if (ptr_buffer_for_merge
&& (weechat_buffer_get_integer (server->buffer, "layout_number") < 1))
{
weechat_buffer_merge (server->buffer, ptr_buffer_for_merge);
}
(void) weechat_hook_signal_send ("irc_server_opened",
WEECHAT_HOOK_SIGNAL_POINTER,
server->buffer);
return server->buffer;
}
/*
* Searches for a fingerprint digest algorithm with the size (in bits).
*
* Returns index of algo in enum t_irc_fingerprint_digest_algo,
* -1 if not found.
*/
#ifdef HAVE_GNUTLS
int
irc_server_fingerprint_search_algo_with_size (int size)
{
int i;
for (i = 0; i < IRC_FINGERPRINT_NUM_ALGOS; i++)
{
if (irc_fingerprint_digest_algos_size[i] == size)
return i;
}
/* digest algorithm not found */
return -1;
}
#endif /* HAVE_GNUTLS */
/*
* Returns a string with sizes of allowed fingerprint,
* in number of hexadecimal digits (== bits / 4).
*
* Example of output: "128=SHA-512, 64=SHA-256, 40=SHA-1".
*
* Note: result must be freed after use.
*/
#ifdef HAVE_GNUTLS
char *
irc_server_fingerprint_str_sizes ()
{
char str_sizes[1024], str_one_size[128];
int i;
str_sizes[0] = '\0';
for (i = IRC_FINGERPRINT_NUM_ALGOS - 1; i >= 0; i--)
{
snprintf (str_one_size, sizeof (str_one_size),
"%d=%s%s",
irc_fingerprint_digest_algos_size[i] / 4,
irc_fingerprint_digest_algos_name[i],
(i > 0) ? ", " : "");
strcat (str_sizes, str_one_size);
}
return strdup (str_sizes);
}
#endif /* HAVE_GNUTLS */
/*
* Compares two fingerprints: one hexadecimal (given by user), the second binary
* (received from IRC server).
*
* Returns:
* 0: fingerprints are the same
* -1: fingerprints are different
*/
#ifdef HAVE_GNUTLS
int
irc_server_compare_fingerprints (const char *fingerprint,
const unsigned char *fingerprint_server,
ssize_t fingerprint_size)
{
ssize_t i;
unsigned int value;
if ((ssize_t)strlen (fingerprint) != fingerprint_size * 2)
return -1;
for (i = 0; i < fingerprint_size; i++)
{
if (sscanf (&fingerprint[i * 2], "%02x", &value) != 1)
return -1;
if (value != fingerprint_server[i])
return -1;
}
/* fingerprints are the same */
return 0;
}
#endif /* HAVE_GNUTLS */
/*
* Checks if a GnuTLS session uses the certificate with a given fingerprint.
*
* Returns:
* 1: certificate has the good fingerprint
* 0: certificate does NOT have the good fingerprint
*/
#ifdef HAVE_GNUTLS
int
irc_server_check_certificate_fingerprint (struct t_irc_server *server,
gnutls_x509_crt_t certificate,
const char *good_fingerprints)
{
unsigned char *fingerprint_server[IRC_FINGERPRINT_NUM_ALGOS];
char **fingerprints;
int i, rc, algo;
size_t size_bits, size_bytes;
for (i = 0; i < IRC_FINGERPRINT_NUM_ALGOS; i++)
{
fingerprint_server[i] = NULL;
}
/* split good_fingerprints */
fingerprints = weechat_string_split (good_fingerprints, ",", NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0, NULL);
if (!fingerprints)
return 0;
rc = 0;
for (i = 0; fingerprints[i]; i++)
{
size_bits = strlen (fingerprints[i]) * 4;
size_bytes = size_bits / 8;
algo = irc_server_fingerprint_search_algo_with_size (size_bits);
if (algo < 0)
continue;
if (!fingerprint_server[algo])
{
fingerprint_server[algo] = malloc (size_bytes);
if (fingerprint_server[algo])
{
/* calculate the fingerprint for the certificate */
if (gnutls_x509_crt_get_fingerprint (
certificate,
irc_fingerprint_digest_algos[algo],
fingerprint_server[algo],
&size_bytes) != GNUTLS_E_SUCCESS)
{
weechat_printf (
server->buffer,
_("%sgnutls: failed to calculate certificate "
"fingerprint (%s)"),
weechat_prefix ("error"),
irc_fingerprint_digest_algos_name[algo]);
free (fingerprint_server[algo]);
fingerprint_server[algo] = NULL;
}
}
else
{
weechat_printf (
server->buffer,
_("%s%s: not enough memory (%s)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
"fingerprint");
}
}
if (fingerprint_server[algo])
{
/* check if the fingerprint matches */
if (irc_server_compare_fingerprints (fingerprints[i],
fingerprint_server[algo],
size_bytes) == 0)
{
rc = 1;
break;
}
}
}
weechat_string_free_split (fingerprints);
for (i = 0; i < IRC_FINGERPRINT_NUM_ALGOS; i++)
{
if (fingerprint_server[i])
free (fingerprint_server[i]);
}
return rc;
}
#endif /* HAVE_GNUTLS */
/*
* GnuTLS callback called during handshake.
*
* Returns:
* 0: certificate OK
* -1: error in certificate
*/
#ifdef HAVE_GNUTLS
int
irc_server_gnutls_callback (const void *pointer, void *data,
gnutls_session_t tls_session,
const gnutls_datum_t *req_ca, int nreq,
const gnutls_pk_algorithm_t *pk_algos,
int pk_algos_len,
#if LIBGNUTLS_VERSION_NUMBER >= 0x020b00 /* 2.11.0 */
gnutls_retr2_st *answer,
#else
gnutls_retr_st *answer,
#endif /* LIBGNUTLS_VERSION_NUMBER >= 0x020b00 */
int action)
{
struct t_irc_server *server;
#if LIBGNUTLS_VERSION_NUMBER >= 0x020b00 /* 2.11.0 */
gnutls_retr2_st tls_struct;
#else
gnutls_retr_st tls_struct;
#endif /* LIBGNUTLS_VERSION_NUMBER >= 0x020b00 */
gnutls_x509_crt_t cert_temp;
const gnutls_datum_t *cert_list;
gnutls_datum_t filedatum;
unsigned int i, cert_list_len, status;
time_t cert_time;
char *cert_path0, *cert_path1, *cert_path2, *cert_str, *fingerprint_eval;
char *weechat_dir, *ssl_password;
const char *ptr_fingerprint;
int rc, ret, fingerprint_match, hostname_match, cert_temp_init;
#if LIBGNUTLS_VERSION_NUMBER >= 0x010706 /* 1.7.6 */
gnutls_datum_t cinfo;
int rinfo;
#endif /* LIBGNUTLS_VERSION_NUMBER >= 0x010706 */
/* make C compiler happy */
(void) data;
(void) req_ca;
(void) nreq;
(void) pk_algos;
(void) pk_algos_len;
rc = 0;
if (!pointer)
return -1;
server = (struct t_irc_server *) pointer;
cert_temp_init = 0;
cert_list = NULL;
cert_list_len = 0;
fingerprint_eval = NULL;
weechat_dir = NULL;
if (action == WEECHAT_HOOK_CONNECT_GNUTLS_CB_VERIFY_CERT)
{
weechat_printf (
server->buffer,
_("%sgnutls: connected using %d-bit Diffie-Hellman shared secret "
"exchange"),
weechat_prefix ("network"),
IRC_SERVER_OPTION_INTEGER (server,
IRC_SERVER_OPTION_SSL_DHKEY_SIZE));
/* initialize the certificate structure */
if (gnutls_x509_crt_init (&cert_temp) != GNUTLS_E_SUCCESS)
{
weechat_printf (
server->buffer,
_("%sgnutls: failed to initialize certificate structure"),
weechat_prefix ("error"));
rc = -1;
goto end;
}
/* flag to do the "deinit" (at the end of function) */
cert_temp_init = 1;
/* get fingerprint option in server */
ptr_fingerprint = IRC_SERVER_OPTION_STRING(server,
IRC_SERVER_OPTION_SSL_FINGERPRINT);
fingerprint_eval = irc_server_eval_fingerprint (server);
if (!fingerprint_eval)
{
rc = -1;
goto end;
}
/* set match options */
fingerprint_match = (ptr_fingerprint && ptr_fingerprint[0]) ? 0 : 1;
hostname_match = 0;
/* get the peer's raw certificate (chain) as sent by the peer */
cert_list = gnutls_certificate_get_peers (tls_session, &cert_list_len);
if (cert_list)
{
weechat_printf (
server->buffer,
NG_("%sgnutls: receiving %d certificate",
"%sgnutls: receiving %d certificates",
cert_list_len),
weechat_prefix ("network"),
cert_list_len);
for (i = 0; i < cert_list_len; i++)
{
if (gnutls_x509_crt_import (cert_temp,
&cert_list[i],
GNUTLS_X509_FMT_DER) != GNUTLS_E_SUCCESS)
{
weechat_printf (
server->buffer,
_("%sgnutls: failed to import certificate[%d]"),
weechat_prefix ("error"), i + 1);
rc = -1;
goto end;
}
/* checks on first certificate received */
if (i == 0)
{
/* check if fingerprint matches the first certificate */
if (fingerprint_eval && fingerprint_eval[0])
{
fingerprint_match = irc_server_check_certificate_fingerprint (
server, cert_temp, fingerprint_eval);
}
/* check if hostname matches in the first certificate */
if (gnutls_x509_crt_check_hostname (cert_temp,
server->current_address) != 0)
{
hostname_match = 1;
}
}
#if LIBGNUTLS_VERSION_NUMBER >= 0x010706 /* 1.7.6 */
/* display infos about certificate */
#if LIBGNUTLS_VERSION_NUMBER < 0x020400 /* 2.4.0 */
rinfo = gnutls_x509_crt_print (cert_temp,
GNUTLS_X509_CRT_ONELINE, &cinfo);
#else
rinfo = gnutls_x509_crt_print (cert_temp,
GNUTLS_CRT_PRINT_ONELINE, &cinfo);
#endif /* LIBGNUTLS_VERSION_NUMBER < 0x020400 */
if (rinfo == 0)
{
weechat_printf (
server->buffer,
_("%s - certificate[%d] info:"),
weechat_prefix ("network"), i + 1);
weechat_printf (
server->buffer,
"%s - %s",
weechat_prefix ("network"), cinfo.data);
gnutls_free (cinfo.data);
}
#endif /* LIBGNUTLS_VERSION_NUMBER >= 0x010706 */
/* check dates, only if fingerprint is not set */
if (!ptr_fingerprint || !ptr_fingerprint[0])
{
/* check expiration date */
cert_time = gnutls_x509_crt_get_expiration_time (cert_temp);
if (cert_time < time (NULL))
{
weechat_printf (
server->buffer,
_("%sgnutls: certificate has expired"),
weechat_prefix ("error"));
rc = -1;
}
/* check activation date */
cert_time = gnutls_x509_crt_get_activation_time (cert_temp);
if (cert_time > time (NULL))
{
weechat_printf (
server->buffer,
_("%sgnutls: certificate is not yet activated"),
weechat_prefix ("error"));
rc = -1;
}
}
}
/*
* if fingerprint is set, display if matches, and don't check
* anything else
*/
if (ptr_fingerprint && ptr_fingerprint[0])
{
if (fingerprint_match)
{
weechat_printf (
server->buffer,
_("%sgnutls: certificate fingerprint matches"),
weechat_prefix ("network"));
}
else
{
weechat_printf (
server->buffer,
_("%sgnutls: certificate fingerprint does NOT match "
"(check value of option "
"irc.server.%s.ssl_fingerprint)"),
weechat_prefix ("error"), server->name);
rc = -1;
}
goto end;
}
if (!hostname_match)
{
weechat_printf (
server->buffer,
_("%sgnutls: the hostname in the certificate does NOT "
"match \"%s\""),
weechat_prefix ("error"), server->current_address);
rc = -1;
}
}
/* verify the peer’s certificate */
if (gnutls_certificate_verify_peers2 (tls_session, &status) < 0)
{
weechat_printf (
server->buffer,
_("%sgnutls: error while checking peer's certificate"),
weechat_prefix ("error"));
rc = -1;
goto end;
}
/* check if certificate is trusted */
if (status & GNUTLS_CERT_INVALID)
{
weechat_printf (
server->buffer,
_("%sgnutls: peer's certificate is NOT trusted"),
weechat_prefix ("error"));
rc = -1;
}
else
{
weechat_printf (
server->buffer,
_("%sgnutls: peer's certificate is trusted"),
weechat_prefix ("network"));
}
/* check if certificate issuer is known */
if (status & GNUTLS_CERT_SIGNER_NOT_FOUND)
{
weechat_printf (
server->buffer,
_("%sgnutls: peer's certificate issuer is unknown"),
weechat_prefix ("error"));
rc = -1;
}
/* check that certificate is not revoked */
if (status & GNUTLS_CERT_REVOKED)
{
weechat_printf (
server->buffer,
_("%sgnutls: the certificate has been revoked"),
weechat_prefix ("error"));
rc = -1;
}
}
else if (action == WEECHAT_HOOK_CONNECT_GNUTLS_CB_SET_CERT)
{
/* using client certificate if it exists */
cert_path0 = (char *) IRC_SERVER_OPTION_STRING(
server, IRC_SERVER_OPTION_SSL_CERT);
if (cert_path0 && cert_path0[0])
{
weechat_dir = weechat_info_get ("weechat_dir", "");
cert_path1 = weechat_string_replace (cert_path0, "%h", weechat_dir);
cert_path2 = (cert_path1) ?
weechat_string_expand_home (cert_path1) : NULL;
if (cert_path2)
{
cert_str = weechat_file_get_content (cert_path2);
if (cert_str)
{
weechat_printf (
server->buffer,
_("%sgnutls: sending one certificate"),
weechat_prefix ("network"));
filedatum.data = (unsigned char *) cert_str;
filedatum.size = strlen (cert_str);
/* certificate */
gnutls_x509_crt_init (&server->tls_cert);
gnutls_x509_crt_import (server->tls_cert, &filedatum,
GNUTLS_X509_FMT_PEM);
/* key password */
ssl_password = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server,
IRC_SERVER_OPTION_SSL_PASSWORD));
/* key */
gnutls_x509_privkey_init (&server->tls_cert_key);
/*
* gnutls_x509_privkey_import2 has no "Since: ..." in GnuTLS manual but
* GnuTLS NEWS file lists it being added in 3.1.0:
* https://gitlab.com/gnutls/gnutls/blob/2b715b9564681acb3008a5574dcf25464de8b038/NEWS#L2552
*/
#if LIBGNUTLS_VERSION_NUMBER >= 0x030100 /* 3.1.0 */
ret = gnutls_x509_privkey_import2 (server->tls_cert_key,
&filedatum,
GNUTLS_X509_FMT_PEM,
ssl_password,
0);
#else
ret = gnutls_x509_privkey_import (server->tls_cert_key,
&filedatum,
GNUTLS_X509_FMT_PEM);
#endif /* LIBGNUTLS_VERSION_NUMBER >= 0x0301000 */
if (ret < 0)
{
ret = gnutls_x509_privkey_import_pkcs8 (
server->tls_cert_key,
&filedatum,
GNUTLS_X509_FMT_PEM,
ssl_password,
GNUTLS_PKCS_PLAIN);
}
if (ret < 0)
{
weechat_printf (
server->buffer,
_("%sgnutls: invalid certificate \"%s\", error: "
"%s"),
weechat_prefix ("error"), cert_path2,
gnutls_strerror (ret));
rc = -1;
}
else
{
#if LIBGNUTLS_VERSION_NUMBER >= 0x020b00 /* 2.11.0 */
tls_struct.cert_type = GNUTLS_CRT_X509;
tls_struct.key_type = GNUTLS_PRIVKEY_X509;
#else
tls_struct.type = GNUTLS_CRT_X509;
#endif /* LIBGNUTLS_VERSION_NUMBER >= 0x020b00 */
tls_struct.ncerts = 1;
tls_struct.deinit_all = 0;
tls_struct.cert.x509 = &server->tls_cert;
tls_struct.key.x509 = server->tls_cert_key;
#if LIBGNUTLS_VERSION_NUMBER >= 0x010706 /* 1.7.6 */
/* client certificate info */
#if LIBGNUTLS_VERSION_NUMBER < 0x020400 /* 2.4.0 */
rinfo = gnutls_x509_crt_print (server->tls_cert,
GNUTLS_X509_CRT_ONELINE,
&cinfo);
#else
rinfo = gnutls_x509_crt_print (server->tls_cert,
GNUTLS_CRT_PRINT_ONELINE,
&cinfo);
#endif /* LIBGNUTLS_VERSION_NUMBER < 0x020400 */
if (rinfo == 0)
{
weechat_printf (
server->buffer,
_("%s - client certificate info (%s):"),
weechat_prefix ("network"), cert_path2);
weechat_printf (
server->buffer, "%s - %s",
weechat_prefix ("network"), cinfo.data);
gnutls_free (cinfo.data);
}
#endif /* LIBGNUTLS_VERSION_NUMBER >= 0x010706 */
memcpy (answer, &tls_struct, sizeof (tls_struct));
free (cert_str);
}
if (ssl_password)
free (ssl_password);
}
else
{
weechat_printf (
server->buffer,
_("%sgnutls: unable to read certificate \"%s\""),
weechat_prefix ("error"), cert_path2);
}
}
if (cert_path1)
free (cert_path1);
if (cert_path2)
free (cert_path2);
}
}
end:
/* an error should stop the handshake unless the user doesn't care */
if ((rc == -1)
&& (IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL_VERIFY) == 0))
{
rc = 0;
}
if (cert_temp_init)
gnutls_x509_crt_deinit (cert_temp);
if (weechat_dir)
free (weechat_dir);
if (fingerprint_eval)
free (fingerprint_eval);
return rc;
}
#endif /* HAVE_GNUTLS */
/*
* Connects to a server.
*
* Returns:
* 1: OK
* 0: error
*/
int
irc_server_connect (struct t_irc_server *server)
{
int length;
char *option_name;
struct t_config_option *proxy_type, *proxy_ipv6, *proxy_address;
struct t_config_option *proxy_port;
const char *proxy, *str_proxy_type, *str_proxy_address;
server->disconnected = 0;
if (!server->buffer)
{
if (!irc_server_create_buffer (server))
return 0;
weechat_buffer_set (server->buffer, "display", "auto");
}
irc_bar_item_update_channel ();
irc_server_set_index_current_address (server,
server->index_current_address);
if (!server->current_address)
{
weechat_printf (
server->buffer,
_("%s%s: unknown address for server \"%s\", cannot connect"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, server->name);
return 0;
}
/* free some old values (from a previous connection to server) */
if (server->isupport)
{
free (server->isupport);
server->isupport = NULL;
}
if (server->prefix_modes)
{
free (server->prefix_modes);
server->prefix_modes = NULL;
}
if (server->prefix_chars)
{
free (server->prefix_chars);
server->prefix_chars = NULL;
}
proxy_type = NULL;
proxy_ipv6 = NULL;
proxy_address = NULL;
proxy_port = NULL;
str_proxy_type = NULL;
str_proxy_address = NULL;
proxy = IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_PROXY);
if (proxy && proxy[0])
{
length = 32 + strlen (proxy) + 1;
option_name = malloc (length);
if (!option_name)
{
weechat_printf (
server->buffer,
_("%s%s: not enough memory (%s)"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
"proxy");
return 0;
}
snprintf (option_name, length, "weechat.proxy.%s.type", proxy);
proxy_type = weechat_config_get (option_name);
snprintf (option_name, length, "weechat.proxy.%s.ipv6", proxy);
proxy_ipv6 = weechat_config_get (option_name);
snprintf (option_name, length, "weechat.proxy.%s.address", proxy);
proxy_address = weechat_config_get (option_name);
snprintf (option_name, length, "weechat.proxy.%s.port", proxy);
proxy_port = weechat_config_get (option_name);
free (option_name);
if (!proxy_type || !proxy_address)
{
weechat_printf (
server->buffer,
_("%s%s: proxy \"%s\" not found for server \"%s\", cannot "
"connect"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, proxy, server->name);
return 0;
}
str_proxy_type = weechat_config_string (proxy_type);
str_proxy_address = weechat_config_string (proxy_address);
if (!str_proxy_type[0] || !proxy_ipv6 || !str_proxy_address[0]
|| !proxy_port)
{
weechat_printf (
server->buffer,
_("%s%s: missing proxy settings, check options for proxy "
"\"%s\""),
weechat_prefix ("error"), IRC_PLUGIN_NAME, proxy);
return 0;
}
}
if (!server->nicks_array)
{
weechat_printf (
server->buffer,
_("%s%s: nicks not defined for server \"%s\", cannot connect"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, server->name);
return 0;
}
#ifndef HAVE_GNUTLS
if (IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL))
{
weechat_printf (
server->buffer,
_("%s%s: cannot connect with SSL because WeeChat was not built "
"with GnuTLS support"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
return 0;
}
#endif /* HAVE_GNUTLS */
if (proxy_type)
{
weechat_printf (
server->buffer,
_("%s%s: connecting to server %s/%d%s via %s proxy %s/%d%s..."),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
server->current_address,
server->current_port,
(IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL)) ?
" (SSL)" : "",
str_proxy_type,
str_proxy_address,
weechat_config_integer (proxy_port),
(weechat_config_boolean (proxy_ipv6)) ? " (IPv6)" : "");
weechat_log_printf (
_("Connecting to server %s/%d%s via %s proxy %s/%d%s..."),
server->current_address,
server->current_port,
(IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL)) ?
" (SSL)" : "",
str_proxy_type,
str_proxy_address,
weechat_config_integer (proxy_port),
(weechat_config_boolean (proxy_ipv6)) ? " (IPv6)" : "");
}
else
{
weechat_printf (
server->buffer,
_("%s%s: connecting to server %s/%d%s..."),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
server->current_address,
server->current_port,
(IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL)) ?
" (SSL)" : "");
weechat_log_printf (
_("%s%s: connecting to server %s/%d%s..."),
"",
IRC_PLUGIN_NAME,
server->current_address,
server->current_port,
(IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL)) ?
" (SSL)" : "");
}
/* close connection if opened */
irc_server_close_connection (server);
/* open auto-joined channels now (if needed) */
if (weechat_config_boolean (irc_config_look_buffer_open_before_autojoin)
&& !server->disable_autojoin)
{
irc_server_autojoin_create_buffers (server);
}
/* init SSL if asked and connect */
server->ssl_connected = 0;
#ifdef HAVE_GNUTLS
if (IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL))
server->ssl_connected = 1;
server->hook_connect = weechat_hook_connect (
proxy,
server->current_address,
server->current_port,
proxy_type ? weechat_config_integer (proxy_ipv6) : IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_IPV6),
server->current_retry,
(server->ssl_connected) ? &server->gnutls_sess : NULL,
(server->ssl_connected) ? &irc_server_gnutls_callback : NULL,
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_SSL_DHKEY_SIZE),
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SSL_PRIORITIES),
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_LOCAL_HOSTNAME),
&irc_server_connect_cb,
server,
NULL);
#else
server->hook_connect = weechat_hook_connect (
proxy,
server->current_address,
server->current_port,
proxy_type ? weechat_config_integer (proxy_ipv6) : IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_IPV6),
server->current_retry,
NULL, NULL, 0, NULL,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_LOCAL_HOSTNAME),
&irc_server_connect_cb,
server,
NULL);
#endif /* HAVE_GNUTLS */
/* send signal "irc_server_connecting" with server name */
(void) weechat_hook_signal_send ("irc_server_connecting",
WEECHAT_HOOK_SIGNAL_STRING, server->name);
return 1;
}
/*
* Reconnects to a server (after disconnection).
*/
void
irc_server_reconnect (struct t_irc_server *server)
{
weechat_printf (
server->buffer,
_("%s%s: reconnecting to server..."),
weechat_prefix ("network"), IRC_PLUGIN_NAME);
server->reconnect_start = 0;
if (irc_server_connect (server))
server->reconnect_join = 1;
else
irc_server_reconnect_schedule (server);
}
/*
* Callback for auto-connect to servers (called at startup).
*/
int
irc_server_auto_connect_timer_cb (const void *pointer, void *data,
int remaining_calls)
{
struct t_irc_server *ptr_server;
int auto_connect;
/* make C compiler happy */
(void) data;
(void) remaining_calls;
auto_connect = (pointer) ? 1 : 0;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
if ((auto_connect || ptr_server->temp_server)
&& (IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_AUTOCONNECT)))
{
if (!irc_server_connect (ptr_server))
irc_server_reconnect_schedule (ptr_server);
}
}
return WEECHAT_RC_OK;
}
/*
* Auto-connects to servers (called at startup).
*
* If auto_connect == 1, auto-connects to all servers with flag "autoconnect".
* If auto_connect == 0, auto-connect to temporary servers only.
*/
void
irc_server_auto_connect (int auto_connect)
{
weechat_hook_timer (1, 0, 1,
&irc_server_auto_connect_timer_cb,
(auto_connect) ? (void *)1 : (void *)0,
NULL);
}
/*
* Disconnects from a server.
*/
void
irc_server_disconnect (struct t_irc_server *server, int switch_address,
int reconnect)
{
struct t_irc_channel *ptr_channel;
if (server->is_connected)
{
/*
* remove all nicks and write disconnection message on each
* channel/private buffer
*/
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
irc_nick_free_all (server, ptr_channel);
if (ptr_channel->hook_autorejoin)
{
weechat_unhook (ptr_channel->hook_autorejoin);
ptr_channel->hook_autorejoin = NULL;
}
weechat_buffer_set (ptr_channel->buffer, "localvar_del_away", "");
weechat_printf (
ptr_channel->buffer,
_("%s%s: disconnected from server"),
weechat_prefix ("network"), IRC_PLUGIN_NAME);
}
/* remove away status on server buffer */
weechat_buffer_set (server->buffer, "localvar_del_away", "");
}
irc_server_close_connection (server);
if (server->buffer)
{
weechat_printf (
server->buffer,
_("%s%s: disconnected from server"),
weechat_prefix ("network"), IRC_PLUGIN_NAME);
}
server->current_retry = 0;
if (switch_address)
irc_server_switch_address (server, 0);
else
irc_server_set_index_current_address (server, 0);
if (server->nick_modes)
{
free (server->nick_modes);
server->nick_modes = NULL;
weechat_bar_item_update ("input_prompt");
weechat_bar_item_update ("irc_nick_modes");
}
if (server->host)
{
free (server->host);
server->host = NULL;
weechat_bar_item_update ("irc_host");
weechat_bar_item_update ("irc_nick_host");
}
server->checking_cap_ls = 0;
weechat_hashtable_remove_all (server->cap_ls);
server->checking_cap_list = 0;
weechat_hashtable_remove_all (server->cap_list);
server->is_away = 0;
server->away_time = 0;
server->lag = 0;
server->lag_displayed = -1;
server->lag_check_time.tv_sec = 0;
server->lag_check_time.tv_usec = 0;
server->lag_next_check = time (NULL) +
weechat_config_integer (irc_config_network_lag_check);
server->lag_last_refresh = 0;
irc_server_set_lag (server);
server->monitor = 0;
server->monitor_time = 0;
if (reconnect
&& IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_AUTORECONNECT))
irc_server_reconnect_schedule (server);
else
{
server->reconnect_delay = 0;
server->reconnect_start = 0;
}
/* discard current nick if no reconnection asked */
if (!reconnect && server->nick)
irc_server_set_nick (server, NULL);
irc_server_set_buffer_title (server);
server->disconnected = 1;
/* send signal "irc_server_disconnected" with server name */
(void) weechat_hook_signal_send ("irc_server_disconnected",
WEECHAT_HOOK_SIGNAL_STRING, server->name);
}
/*
* Disconnects from all servers.
*/
void
irc_server_disconnect_all ()
{
struct t_irc_server *ptr_server;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
irc_server_disconnect (ptr_server, 0, 0);
}
}
/*
* Creates buffers for auto-joined channels on a server.
*/
void
irc_server_autojoin_create_buffers (struct t_irc_server *server)
{
const char *pos_space;
char *autojoin, *autojoin2, **channels;
int num_channels, i;
/* buffers are opened only if no channels are currently opened */
if (server->channels)
return;
/* evaluate server option "autojoin" */
autojoin = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_AUTOJOIN));
/* extract channel names from autojoin option */
if (autojoin && autojoin[0])
{
pos_space = strchr (autojoin, ' ');
autojoin2 = (pos_space) ?
weechat_strndup (autojoin, pos_space - autojoin) :
strdup (autojoin);
if (autojoin2)
{
channels = weechat_string_split (
autojoin2,
",",
NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0,
&num_channels);
if (channels)
{
for (i = 0; i < num_channels; i++)
{
irc_channel_create_buffer (
server, IRC_CHANNEL_TYPE_CHANNEL, channels[i],
1, 1);
}
weechat_string_free_split (channels);
}
free (autojoin2);
}
}
if (autojoin)
free (autojoin);
}
/*
* Autojoins (or auto-rejoins) channels.
*/
void
irc_server_autojoin_channels (struct t_irc_server *server)
{
struct t_irc_channel *ptr_channel;
char *autojoin;
/* auto-join after disconnection (only rejoins opened channels) */
if (!server->disable_autojoin && server->reconnect_join && server->channels)
{
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if ((ptr_channel->type == IRC_CHANNEL_TYPE_CHANNEL)
&& !ptr_channel->part)
{
if (ptr_channel->key)
{
irc_server_sendf (server,
IRC_SERVER_SEND_OUTQ_PRIO_HIGH, NULL,
"JOIN %s %s",
ptr_channel->name, ptr_channel->key);
}
else
{
irc_server_sendf (server,
IRC_SERVER_SEND_OUTQ_PRIO_HIGH, NULL,
"JOIN %s",
ptr_channel->name);
}
}
}
server->reconnect_join = 0;
}
else
{
/* auto-join when connecting to server for first time */
autojoin = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_AUTOJOIN));
if (!server->disable_autojoin && autojoin && autojoin[0])
irc_command_join_server (server, autojoin, 0, 0);
if (autojoin)
free (autojoin);
}
server->disable_autojoin = 0;
}
/*
* Returns number of channels for server.
*/
int
irc_server_get_channel_count (struct t_irc_server *server)
{
int count;
struct t_irc_channel *ptr_channel;
count = 0;
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if (ptr_channel->type == IRC_CHANNEL_TYPE_CHANNEL)
count++;
}
return count;
}
/*
* Returns number of pv for server.
*/
int
irc_server_get_pv_count (struct t_irc_server *server)
{
int count;
struct t_irc_channel *ptr_channel;
count = 0;
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if (ptr_channel->type == IRC_CHANNEL_TYPE_PRIVATE)
count++;
}
return count;
}
/*
* Removes away for all channels/nicks (for all servers).
*/
void
irc_server_remove_away (struct t_irc_server *server)
{
struct t_irc_channel *ptr_channel;
if (server->is_connected)
{
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if (ptr_channel->type == IRC_CHANNEL_TYPE_CHANNEL)
irc_channel_remove_away (server, ptr_channel);
}
server->last_away_check = 0;
}
}
/*
* Checks for away on all channels of a server.
*/
void
irc_server_check_away (struct t_irc_server *server)
{
struct t_irc_channel *ptr_channel;
if (server->is_connected)
{
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if (ptr_channel->type == IRC_CHANNEL_TYPE_CHANNEL)
irc_channel_check_whox (server, ptr_channel);
}
server->last_away_check = time (NULL);
}
}
/*
* Sets/unsets away status for a server (all channels).
*/
void
irc_server_set_away (struct t_irc_server *server, const char *nick, int is_away)
{
struct t_irc_channel *ptr_channel;
if (server->is_connected)
{
/* set/del "away" local variable on server buffer */
if (is_away)
{
weechat_buffer_set (server->buffer,
"localvar_set_away", server->away_message);
}
else
{
weechat_buffer_set (server->buffer,
"localvar_del_away", "");
}
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
/* set away flag for nick on channel */
if (ptr_channel->type == IRC_CHANNEL_TYPE_CHANNEL)
irc_channel_set_away (server, ptr_channel, nick, is_away);
/* set/del "away" local variable on channel buffer */
if (is_away)
{
weechat_buffer_set (ptr_channel->buffer,
"localvar_set_away", server->away_message);
}
else
{
weechat_buffer_set (ptr_channel->buffer,
"localvar_del_away", "");
}
}
}
}
/*
* Callback called when user sends (file or chat) to someone and that xfer
* plugin successfully initialized xfer and is ready for sending.
*
* In that case, irc plugin sends message to remote nick and wait for "accept"
* reply.
*/
int
irc_server_xfer_send_ready_cb (const void *pointer, void *data,
const char *signal,
const char *type_data, void *signal_data)
{
struct t_infolist *infolist;
struct t_irc_server *ptr_server;
const char *plugin_name, *plugin_id, *type, *filename, *local_address;
char converted_addr[NI_MAXHOST];
struct addrinfo *ainfo;
struct sockaddr_in *saddr;
int spaces_in_name, rc;
/* make C compiler happy */
(void) pointer;
(void) data;
(void) signal;
(void) type_data;
infolist = (struct t_infolist *)signal_data;
if (weechat_infolist_next (infolist))
{
plugin_name = weechat_infolist_string (infolist, "plugin_name");
plugin_id = weechat_infolist_string (infolist, "plugin_id");
if (plugin_name && (strcmp (plugin_name, IRC_PLUGIN_NAME) == 0)
&& plugin_id)
{
ptr_server = irc_server_search (plugin_id);
if (ptr_server)
{
converted_addr[0] = '\0';
local_address = weechat_infolist_string (infolist,
"local_address");
if (local_address)
{
res_init ();
rc = getaddrinfo (local_address, NULL, NULL, &ainfo);
if ((rc == 0) && ainfo && ainfo->ai_addr)
{
if (ainfo->ai_family == AF_INET)
{
/* transform dotted 4 IP address to ulong string */
saddr = (struct sockaddr_in *)ainfo->ai_addr;
snprintf (converted_addr, sizeof (converted_addr),
"%lu",
(unsigned long)ntohl (saddr->sin_addr.s_addr));
}
else
{
snprintf (converted_addr, sizeof (converted_addr),
"%s", local_address);
}
}
}
type = weechat_infolist_string (infolist, "type_string");
if (type && converted_addr[0])
{
/* send DCC PRIVMSG */
if (strcmp (type, "file_send") == 0)
{
filename = weechat_infolist_string (infolist, "filename");
spaces_in_name = (strchr (filename, ' ') != NULL);
irc_server_sendf (
ptr_server,
IRC_SERVER_SEND_OUTQ_PRIO_HIGH, NULL,
"PRIVMSG %s :\01DCC SEND %s%s%s "
"%s %d %s\01",
weechat_infolist_string (infolist, "remote_nick"),
(spaces_in_name) ? "\"" : "",
filename,
(spaces_in_name) ? "\"" : "",
converted_addr,
weechat_infolist_integer (infolist, "port"),
weechat_infolist_string (infolist, "size"));
}
else if (strcmp (type, "chat_send") == 0)
{
irc_server_sendf (
ptr_server,
IRC_SERVER_SEND_OUTQ_PRIO_HIGH, NULL,
"PRIVMSG %s :\01DCC CHAT chat %s %d\01",
weechat_infolist_string (infolist, "remote_nick"),
converted_addr,
weechat_infolist_integer (infolist, "port"));
}
}
}
}
}
weechat_infolist_reset_item_cursor (infolist);
return WEECHAT_RC_OK;
}
/*
* Callback called when user receives a file and that resume is possible (file
* is partially received).
*
* In that case, irc plugin sends message to remote nick with resume position.
*/
int
irc_server_xfer_resume_ready_cb (const void *pointer, void *data,
const char *signal,
const char *type_data, void *signal_data)
{
struct t_infolist *infolist;
struct t_irc_server *ptr_server;
const char *plugin_name, *plugin_id, *filename;
int spaces_in_name;
/* make C compiler happy */
(void) pointer;
(void) data;
(void) signal;
(void) type_data;
infolist = (struct t_infolist *)signal_data;
if (weechat_infolist_next (infolist))
{
plugin_name = weechat_infolist_string (infolist, "plugin_name");
plugin_id = weechat_infolist_string (infolist, "plugin_id");
if (plugin_name && (strcmp (plugin_name, IRC_PLUGIN_NAME) == 0) && plugin_id)
{
ptr_server = irc_server_search (plugin_id);
if (ptr_server)
{
filename = weechat_infolist_string (infolist, "filename");
spaces_in_name = (strchr (filename, ' ') != NULL);
irc_server_sendf (
ptr_server,
IRC_SERVER_SEND_OUTQ_PRIO_HIGH, NULL,
"PRIVMSG %s :\01DCC RESUME %s%s%s %d %s\01",
weechat_infolist_string (infolist, "remote_nick"),
(spaces_in_name) ? "\"" : "",
filename,
(spaces_in_name) ? "\"" : "",
weechat_infolist_integer (infolist, "port"),
weechat_infolist_string (infolist, "start_resume"));
}
}
}
weechat_infolist_reset_item_cursor (infolist);
return WEECHAT_RC_OK;
}
/*
* Callback called when xfer plugin accepted resume request from receiver.
*
* In that case, irc plugin sends accept message to remote nick with resume
* position.
*/
int
irc_server_xfer_send_accept_resume_cb (const void *pointer, void *data,
const char *signal,
const char *type_data,
void *signal_data)
{
struct t_infolist *infolist;
struct t_irc_server *ptr_server;
const char *plugin_name, *plugin_id, *filename;
int spaces_in_name;
/* make C compiler happy */
(void) pointer;
(void) data;
(void) signal;
(void) type_data;
infolist = (struct t_infolist *)signal_data;
if (weechat_infolist_next (infolist))
{
plugin_name = weechat_infolist_string (infolist, "plugin_name");
plugin_id = weechat_infolist_string (infolist, "plugin_id");
if (plugin_name && (strcmp (plugin_name, IRC_PLUGIN_NAME) == 0) && plugin_id)
{
ptr_server = irc_server_search (plugin_id);
if (ptr_server)
{
filename = weechat_infolist_string (infolist, "filename");
spaces_in_name = (strchr (filename, ' ') != NULL);
irc_server_sendf (
ptr_server,
IRC_SERVER_SEND_OUTQ_PRIO_HIGH, NULL,
"PRIVMSG %s :\01DCC ACCEPT %s%s%s %d %s\01",
weechat_infolist_string (infolist, "remote_nick"),
(spaces_in_name) ? "\"" : "",
filename,
(spaces_in_name) ? "\"" : "",
weechat_infolist_integer (infolist, "port"),
weechat_infolist_string (infolist, "start_resume"));
}
}
}
weechat_infolist_reset_item_cursor (infolist);
return WEECHAT_RC_OK;
}
/*
* Returns hdata for server.
*/
struct t_hdata *
irc_server_hdata_server_cb (const void *pointer, void *data,
const char *hdata_name)
{
struct t_hdata *hdata;
/* make C compiler happy */
(void) pointer;
(void) data;
hdata = weechat_hdata_new (hdata_name, "prev_server", "next_server",
0, 0, NULL, NULL);
if (hdata)
{
WEECHAT_HDATA_VAR(struct t_irc_server, name, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, options, POINTER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, temp_server, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reloading_from_config, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reloaded_from_config, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, addresses_eval, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, addresses_count, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, addresses_array, STRING, 0, "addresses_count", NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, ports_array, INTEGER, 0, "addresses_count", NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, retry_array, INTEGER, 0, "addresses_count", NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, index_current_address, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, current_address, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, current_ip, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, current_port, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, current_retry, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, sock, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, hook_connect, POINTER, 0, NULL, "hook");
WEECHAT_HDATA_VAR(struct t_irc_server, hook_fd, POINTER, 0, NULL, "hook");
WEECHAT_HDATA_VAR(struct t_irc_server, hook_timer_connection, POINTER, 0, NULL, "hook");
WEECHAT_HDATA_VAR(struct t_irc_server, hook_timer_sasl, POINTER, 0, NULL, "hook");
WEECHAT_HDATA_VAR(struct t_irc_server, is_connected, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, ssl_connected, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, disconnected, INTEGER, 0, NULL, NULL);
#ifdef HAVE_GNUTLS
WEECHAT_HDATA_VAR(struct t_irc_server, gnutls_sess, OTHER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, tls_cert, OTHER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, tls_cert_key, OTHER, 0, NULL, NULL);
#endif /* HAVE_GNUTLS */
WEECHAT_HDATA_VAR(struct t_irc_server, unterminated_message, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nicks_count, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nicks_array, STRING, 0, "nicks_count", NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick_first_tried, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick_alternate_number, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick_modes, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, host, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, checking_cap_ls, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, cap_ls, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, checking_cap_list, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, cap_list, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, isupport, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, prefix_modes, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, prefix_chars, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick_max_length, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, user_max_length, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, host_max_length, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, casemapping, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, chantypes, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, chanmodes, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, monitor, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, monitor_time, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reconnect_delay, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reconnect_start, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, command_time, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reconnect_join, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, disable_autojoin, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, is_away, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, away_message, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, away_time, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag_displayed, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag_check_time, OTHER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag_next_check, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag_last_refresh, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, cmd_list_regexp, POINTER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, last_user_message, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, last_away_check, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, last_data_purge, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, outqueue, POINTER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, last_outqueue, POINTER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, redirects, POINTER, 0, NULL, "irc_redirect");
WEECHAT_HDATA_VAR(struct t_irc_server, last_redirect, POINTER, 0, NULL, "irc_redirect");
WEECHAT_HDATA_VAR(struct t_irc_server, notify_list, POINTER, 0, NULL, "irc_notify");
WEECHAT_HDATA_VAR(struct t_irc_server, last_notify, POINTER, 0, NULL, "irc_notify");
WEECHAT_HDATA_VAR(struct t_irc_server, notify_count, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, join_manual, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, join_channel_key, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, join_noswitch, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, buffer, POINTER, 0, NULL, "buffer");
WEECHAT_HDATA_VAR(struct t_irc_server, buffer_as_string, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, channels, POINTER, 0, NULL, "irc_channel");
WEECHAT_HDATA_VAR(struct t_irc_server, last_channel, POINTER, 0, NULL, "irc_channel");
WEECHAT_HDATA_VAR(struct t_irc_server, prev_server, POINTER, 0, NULL, hdata_name);
WEECHAT_HDATA_VAR(struct t_irc_server, next_server, POINTER, 0, NULL, hdata_name);
WEECHAT_HDATA_LIST(irc_servers, WEECHAT_HDATA_LIST_CHECK_POINTERS);
WEECHAT_HDATA_LIST(last_irc_server, 0);
}
return hdata;
}
/*
* Adds a server in an infolist.
*
* Returns:
* 1: OK
* 0: error
*/
int
irc_server_add_to_infolist (struct t_infolist *infolist,
struct t_irc_server *server)
{
struct t_infolist_item *ptr_item;
if (!infolist || !server)
return 0;
ptr_item = weechat_infolist_new_item (infolist);
if (!ptr_item)
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "name", server->name))
return 0;
if (!weechat_infolist_new_var_pointer (ptr_item, "buffer", server->buffer))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "buffer_name",
(server->buffer) ?
weechat_buffer_get_string (server->buffer, "name") : ""))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "buffer_short_name",
(server->buffer) ?
weechat_buffer_get_string (server->buffer, "short_name") : ""))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "addresses",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_ADDRESSES)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "proxy",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_PROXY)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "ipv6",
IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_IPV6)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "ssl",
IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "ssl_cert",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SSL_CERT)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "ssl_password",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SSL_PASSWORD)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "ssl_priorities",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SSL_PRIORITIES)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "ssl_dhkey_size",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_SSL_DHKEY_SIZE)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "ssl_fingerprint",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SSL_FINGERPRINT)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "ssl_verify",
IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL_VERIFY)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "password",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_PASSWORD)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "capabilities",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_CAPABILITIES)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "sasl_mechanism",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_SASL_MECHANISM)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "sasl_username",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_USERNAME)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "sasl_password",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_PASSWORD)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "sasl_key",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_KEY)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "sasl_fail",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_SASL_FAIL)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "autoconnect",
IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_AUTOCONNECT)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "autoreconnect",
IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_AUTORECONNECT)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "autoreconnect_delay",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_AUTORECONNECT_DELAY)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "nicks",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_NICKS)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "nicks_alternate",
IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_NICKS_ALTERNATE)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "username",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_USERNAME)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "realname",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_REALNAME)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "local_hostname",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_LOCAL_HOSTNAME)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "usermode",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_USERMODE)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "command",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_COMMAND)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "command_delay",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_COMMAND_DELAY)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "autojoin",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_AUTOJOIN)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "autorejoin",
IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_AUTOREJOIN)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "autorejoin_delay",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_AUTOREJOIN_DELAY)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "connection_timeout",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_CONNECTION_TIMEOUT)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "anti_flood_prio_high",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_HIGH)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "anti_flood_prio_low",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_LOW)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "away_check",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_AWAY_CHECK)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "away_check_max_nicks",
IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_AWAY_CHECK_MAX_NICKS)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "msg_kick",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_MSG_KICK)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "msg_part",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_MSG_PART)))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "msg_quit",
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_MSG_QUIT)))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "temp_server", server->temp_server))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "index_current_address", server->index_current_address))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "current_address", server->current_address))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "current_ip", server->current_ip))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "current_port", server->current_port))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "current_retry", server->current_retry))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "sock", server->sock))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "is_connected", server->is_connected))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "ssl_connected", server->ssl_connected))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "disconnected", server->disconnected))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "unterminated_message", server->unterminated_message))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "nick", server->nick))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "nick_modes", server->nick_modes))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "host", server->host))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "checking_cap_ls", server->checking_cap_ls))
return 0;
if (!weechat_hashtable_add_to_infolist (server->cap_ls, ptr_item, "cap_ls"))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "checking_cap_list", server->checking_cap_list))
return 0;
if (!weechat_hashtable_add_to_infolist (server->cap_list, ptr_item, "cap_list"))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "isupport", server->isupport))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "prefix_modes", server->prefix_modes))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "prefix_chars", server->prefix_chars))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "nick_max_length", server->nick_max_length))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "user_max_length", server->user_max_length))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "host_max_length", server->host_max_length))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "casemapping", server->casemapping))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "casemapping_string", irc_server_casemapping_string[server->casemapping]))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "chantypes", server->chantypes))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "chanmodes", server->chanmodes))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "monitor", server->monitor))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "monitor_time", server->monitor_time))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "reconnect_delay", server->reconnect_delay))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "reconnect_start", server->reconnect_start))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "command_time", server->command_time))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "reconnect_join", server->reconnect_join))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "disable_autojoin", server->disable_autojoin))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "is_away", server->is_away))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "away_message", server->away_message))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "away_time", server->away_time))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "lag", server->lag))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "lag_displayed", server->lag_displayed))
return 0;
if (!weechat_infolist_new_var_buffer (ptr_item, "lag_check_time", &(server->lag_check_time), sizeof (struct timeval)))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "lag_next_check", server->lag_next_check))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "lag_last_refresh", server->lag_last_refresh))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "last_user_message", server->last_user_message))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "last_away_check", server->last_away_check))
return 0;
if (!weechat_infolist_new_var_time (ptr_item, "last_data_purge", server->last_data_purge))
return 0;
return 1;
}
/*
* Prints server infos in WeeChat log file (usually for crash dump).
*/
void
irc_server_print_log ()
{
struct t_irc_server *ptr_server;
struct t_irc_channel *ptr_channel;
int i;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
weechat_log_printf ("");
weechat_log_printf ("[server %s (addr:0x%lx)]", ptr_server->name, ptr_server);
/* addresses */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_ADDRESSES]))
weechat_log_printf (" addresses. . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_ADDRESSES));
else
weechat_log_printf (" addresses. . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_ADDRESSES]));
/* proxy */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_PROXY]))
weechat_log_printf (" proxy. . . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_PROXY));
else
weechat_log_printf (" proxy. . . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_PROXY]));
/* ipv6 */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_IPV6]))
weechat_log_printf (" ipv6 . . . . . . . . : null (%s)",
(IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_IPV6)) ?
"on" : "off");
else
weechat_log_printf (" ipv6 . . . . . . . . : %s",
(weechat_config_boolean (ptr_server->options[IRC_SERVER_OPTION_IPV6])) ?
"on" : "off");
/* ssl */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SSL]))
weechat_log_printf (" ssl. . . . . . . . . : null (%s)",
(IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_SSL)) ?
"on" : "off");
else
weechat_log_printf (" ssl. . . . . . . . . : %s",
(weechat_config_boolean (ptr_server->options[IRC_SERVER_OPTION_SSL])) ?
"on" : "off");
/* ssl_cert */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SSL_CERT]))
weechat_log_printf (" ssl_cert . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_SSL_CERT));
else
weechat_log_printf (" ssl_cert . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_SSL_CERT]));
/* ssl_password */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SSL_PASSWORD]))
weechat_log_printf (" ssl_password . . . . : null");
else
weechat_log_printf (" ssl_password . . . . : (hidden)");
/* ssl_priorities */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SSL_PRIORITIES]))
weechat_log_printf (" ssl_priorities . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_SSL_PRIORITIES));
else
weechat_log_printf (" ssl_priorities . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_SSL_PRIORITIES]));
/* ssl_dhkey_size */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SSL_DHKEY_SIZE]))
weechat_log_printf (" ssl_dhkey_size . . . : null ('%d')",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_SSL_DHKEY_SIZE));
else
weechat_log_printf (" ssl_dhkey_size . . . : '%d'",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_SSL_DHKEY_SIZE]));
/* ssl_fingerprint */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SSL_FINGERPRINT]))
weechat_log_printf (" ssl_fingerprint. . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_SSL_FINGERPRINT));
else
weechat_log_printf (" ssl_fingerprint. . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_SSL_FINGERPRINT]));
/* ssl_verify */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SSL_VERIFY]))
weechat_log_printf (" ssl_verify . . . . . : null (%s)",
(IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_SSL_VERIFY)) ?
"on" : "off");
else
weechat_log_printf (" ssl_verify . . . . . : %s",
(weechat_config_boolean (ptr_server->options[IRC_SERVER_OPTION_SSL_VERIFY])) ?
"on" : "off");
/* password */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_PASSWORD]))
weechat_log_printf (" password . . . . . . : null");
else
weechat_log_printf (" password . . . . . . : (hidden)");
/* client capabilities */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_CAPABILITIES]))
weechat_log_printf (" capabilities . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_CAPABILITIES));
else
weechat_log_printf (" capabilities . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_CAPABILITIES]));
/* sasl_mechanism */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SASL_MECHANISM]))
weechat_log_printf (" sasl_mechanism . . . : null ('%s')",
irc_sasl_mechanism_string[IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_SASL_MECHANISM)]);
else
weechat_log_printf (" sasl_mechanism . . . : '%s'",
irc_sasl_mechanism_string[weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_SASL_MECHANISM])]);
/* sasl_username */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SASL_USERNAME]))
weechat_log_printf (" sasl_username. . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_SASL_USERNAME));
else
weechat_log_printf (" sasl_username. . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_SASL_USERNAME]));
/* sasl_password */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SASL_PASSWORD]))
weechat_log_printf (" sasl_password. . . . : null");
else
weechat_log_printf (" sasl_password. . . . : (hidden)");
/* sasl_key */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SASL_KEY]))
weechat_log_printf (" sasl_key. . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_SASL_KEY));
else
weechat_log_printf (" sasl_key. . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_SASL_KEY]));
/* sasl_fail */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_SASL_FAIL]))
weechat_log_printf (" sasl_fail. . . . . . : null ('%s')",
irc_server_sasl_fail_string[IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_SASL_FAIL)]);
else
weechat_log_printf (" sasl_fail. . . . . . : '%s'",
irc_server_sasl_fail_string[weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_SASL_FAIL])]);
/* autoconnect */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AUTOCONNECT]))
weechat_log_printf (" autoconnect. . . . . : null (%s)",
(IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_AUTOCONNECT)) ?
"on" : "off");
else
weechat_log_printf (" autoconnect. . . . . : %s",
(weechat_config_boolean (ptr_server->options[IRC_SERVER_OPTION_AUTOCONNECT])) ?
"on" : "off");
/* autoreconnect */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AUTORECONNECT]))
weechat_log_printf (" autoreconnect. . . . : null (%s)",
(IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_AUTORECONNECT)) ?
"on" : "off");
else
weechat_log_printf (" autoreconnect. . . . : %s",
(weechat_config_boolean (ptr_server->options[IRC_SERVER_OPTION_AUTORECONNECT])) ?
"on" : "off");
/* autoreconnect_delay */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AUTORECONNECT_DELAY]))
weechat_log_printf (" autoreconnect_delay. : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_AUTORECONNECT_DELAY));
else
weechat_log_printf (" autoreconnect_delay. : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_AUTORECONNECT_DELAY]));
/* nicks */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_NICKS]))
weechat_log_printf (" nicks. . . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_NICKS));
else
weechat_log_printf (" nicks. . . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_NICKS]));
/* nicks_alternate */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_NICKS_ALTERNATE]))
weechat_log_printf (" nicks_alternate. . . : null (%s)",
(IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_NICKS_ALTERNATE)) ?
"on" : "off");
else
weechat_log_printf (" nicks_alternate. . . : %s",
(weechat_config_boolean (ptr_server->options[IRC_SERVER_OPTION_NICKS_ALTERNATE])) ?
"on" : "off");
/* username */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_USERNAME]))
weechat_log_printf (" username . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_USERNAME));
else
weechat_log_printf (" username . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_USERNAME]));
/* realname */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_REALNAME]))
weechat_log_printf (" realname . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_REALNAME));
else
weechat_log_printf (" realname . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_REALNAME]));
/* local_hostname */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_LOCAL_HOSTNAME]))
weechat_log_printf (" local_hostname . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_LOCAL_HOSTNAME));
else
weechat_log_printf (" local_hostname . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_LOCAL_HOSTNAME]));
/* usermode */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_USERMODE]))
weechat_log_printf (" usermode . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_USERMODE));
else
weechat_log_printf (" usermode . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_USERMODE]));
/* command */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_COMMAND]))
weechat_log_printf (" command. . . . . . . : null");
else
weechat_log_printf (" command. . . . . . . : (hidden)");
/* command_delay */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_COMMAND_DELAY]))
weechat_log_printf (" command_delay. . . . : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_COMMAND_DELAY));
else
weechat_log_printf (" command_delay. . . . : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_COMMAND_DELAY]));
/* autojoin */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AUTOJOIN]))
weechat_log_printf (" autojoin . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_AUTOJOIN));
else
weechat_log_printf (" autojoin . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_AUTOJOIN]));
/* autorejoin */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AUTOREJOIN]))
weechat_log_printf (" autorejoin . . . . . : null (%s)",
(IRC_SERVER_OPTION_BOOLEAN(ptr_server, IRC_SERVER_OPTION_AUTOREJOIN)) ?
"on" : "off");
else
weechat_log_printf (" autorejoin . . . . . : %s",
(weechat_config_boolean (ptr_server->options[IRC_SERVER_OPTION_AUTOREJOIN])) ?
"on" : "off");
/* autorejoin_delay */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AUTOREJOIN_DELAY]))
weechat_log_printf (" autorejoin_delay . . : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_AUTOREJOIN_DELAY));
else
weechat_log_printf (" autorejoin_delay . . : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_AUTOREJOIN_DELAY]));
/* connection_timeout */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_CONNECTION_TIMEOUT]))
weechat_log_printf (" connection_timeout . : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_CONNECTION_TIMEOUT));
else
weechat_log_printf (" connection_timeout . : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_CONNECTION_TIMEOUT]));
/* anti_flood_prio_high */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_HIGH]))
weechat_log_printf (" anti_flood_prio_high : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_HIGH));
else
weechat_log_printf (" anti_flood_prio_high : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_HIGH]));
/* anti_flood_prio_low */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_LOW]))
weechat_log_printf (" anti_flood_prio_low. : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_LOW));
else
weechat_log_printf (" anti_flood_prio_low. : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_ANTI_FLOOD_PRIO_LOW]));
/* away_check */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AWAY_CHECK]))
weechat_log_printf (" away_check . . . . . : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_AWAY_CHECK));
else
weechat_log_printf (" away_check . . . . . : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_AWAY_CHECK]));
/* away_check_max_nicks */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_AWAY_CHECK_MAX_NICKS]))
weechat_log_printf (" away_check_max_nicks : null (%d)",
IRC_SERVER_OPTION_INTEGER(ptr_server, IRC_SERVER_OPTION_AWAY_CHECK_MAX_NICKS));
else
weechat_log_printf (" away_check_max_nicks : %d",
weechat_config_integer (ptr_server->options[IRC_SERVER_OPTION_AWAY_CHECK_MAX_NICKS]));
/* msg_kick */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_MSG_KICK]))
weechat_log_printf (" msg_kick . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_MSG_KICK));
else
weechat_log_printf (" msg_kick . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_MSG_KICK]));
/* msg_part */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_MSG_PART]))
weechat_log_printf (" msg_part . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_MSG_PART));
else
weechat_log_printf (" msg_part . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_MSG_PART]));
/* msg_quit */
if (weechat_config_option_is_null (ptr_server->options[IRC_SERVER_OPTION_MSG_QUIT]))
weechat_log_printf (" msg_quit . . . . . . : null ('%s')",
IRC_SERVER_OPTION_STRING(ptr_server, IRC_SERVER_OPTION_MSG_QUIT));
else
weechat_log_printf (" msg_quit . . . . . . : '%s'",
weechat_config_string (ptr_server->options[IRC_SERVER_OPTION_MSG_QUIT]));
/* other server variables */
weechat_log_printf (" temp_server. . . . . : %d", ptr_server->temp_server);
weechat_log_printf (" reloading_from_config: %d", ptr_server->reloaded_from_config);
weechat_log_printf (" reloaded_from_config : %d", ptr_server->reloaded_from_config);
weechat_log_printf (" addresses_eval . . . : '%s'", ptr_server->addresses_eval);
weechat_log_printf (" addresses_count. . . : %d", ptr_server->addresses_count);
weechat_log_printf (" addresses_array. . . : 0x%lx", ptr_server->addresses_array);
weechat_log_printf (" ports_array. . . . . : 0x%lx", ptr_server->ports_array);
weechat_log_printf (" retry_array. . . . . : 0x%lx", ptr_server->retry_array);
weechat_log_printf (" index_current_address: %d", ptr_server->index_current_address);
weechat_log_printf (" current_address. . . : '%s'", ptr_server->current_address);
weechat_log_printf (" current_ip . . . . . : '%s'", ptr_server->current_ip);
weechat_log_printf (" current_port . . . . : %d", ptr_server->current_port);
weechat_log_printf (" current_retry. . . . : %d", ptr_server->current_retry);
weechat_log_printf (" sock . . . . . . . . : %d", ptr_server->sock);
weechat_log_printf (" hook_connect . . . . : 0x%lx", ptr_server->hook_connect);
weechat_log_printf (" hook_fd. . . . . . . : 0x%lx", ptr_server->hook_fd);
weechat_log_printf (" hook_timer_connection: 0x%lx", ptr_server->hook_timer_connection);
weechat_log_printf (" hook_timer_sasl. . . : 0x%lx", ptr_server->hook_timer_sasl);
weechat_log_printf (" is_connected . . . . : %d", ptr_server->is_connected);
weechat_log_printf (" ssl_connected. . . . : %d", ptr_server->ssl_connected);
weechat_log_printf (" disconnected . . . . : %d", ptr_server->disconnected);
#ifdef HAVE_GNUTLS
weechat_log_printf (" gnutls_sess. . . . . : 0x%lx", ptr_server->gnutls_sess);
#endif /* HAVE_GNUTLS */
weechat_log_printf (" unterminated_message : '%s'", ptr_server->unterminated_message);
weechat_log_printf (" nicks_count. . . . . : %d", ptr_server->nicks_count);
weechat_log_printf (" nicks_array. . . . . : 0x%lx", ptr_server->nicks_array);
weechat_log_printf (" nick_first_tried . . : %d", ptr_server->nick_first_tried);
weechat_log_printf (" nick_alternate_number: %d", ptr_server->nick_alternate_number);
weechat_log_printf (" nick . . . . . . . . : '%s'", ptr_server->nick);
weechat_log_printf (" nick_modes . . . . . : '%s'", ptr_server->nick_modes);
weechat_log_printf (" host . . . . . . . . : '%s'", ptr_server->host);
weechat_log_printf (" checking_cap_ls. . . : %d", ptr_server->checking_cap_ls);
weechat_log_printf (" cap_ls . . . . . . . : 0x%lx (hashtable: '%s')",
ptr_server->cap_ls,
weechat_hashtable_get_string (ptr_server->cap_ls, "keys_values"));
weechat_log_printf (" checking_cap_list. . : %d", ptr_server->checking_cap_list);
weechat_log_printf (" cap_list . . . . . . : 0x%lx (hashtable: '%s')",
ptr_server->cap_list,
weechat_hashtable_get_string (ptr_server->cap_list, "keys_values"));
weechat_log_printf (" isupport . . . . . . : '%s'", ptr_server->isupport);
weechat_log_printf (" prefix_modes . . . . : '%s'", ptr_server->prefix_modes);
weechat_log_printf (" prefix_chars . . . . : '%s'", ptr_server->prefix_chars);
weechat_log_printf (" nick_max_length. . . : %d", ptr_server->nick_max_length);
weechat_log_printf (" user_max_length. . . : %d", ptr_server->user_max_length);
weechat_log_printf (" host_max_length. . . : %d", ptr_server->host_max_length);
weechat_log_printf (" casemapping. . . . . : %d (%s)",
ptr_server->casemapping,
irc_server_casemapping_string[ptr_server->casemapping]);
weechat_log_printf (" chantypes. . . . . . : '%s'", ptr_server->chantypes);
weechat_log_printf (" chanmodes. . . . . . : '%s'", ptr_server->chanmodes);
weechat_log_printf (" monitor. . . . . . . : %d", ptr_server->monitor);
weechat_log_printf (" monitor_time . . . . : %lld", (long long)ptr_server->monitor_time);
weechat_log_printf (" reconnect_delay. . . : %d", ptr_server->reconnect_delay);
weechat_log_printf (" reconnect_start. . . : %lld", (long long)ptr_server->reconnect_start);
weechat_log_printf (" command_time . . . . : %lld", (long long)ptr_server->command_time);
weechat_log_printf (" reconnect_join . . . : %d", ptr_server->reconnect_join);
weechat_log_printf (" disable_autojoin . . : %d", ptr_server->disable_autojoin);
weechat_log_printf (" is_away. . . . . . . : %d", ptr_server->is_away);
weechat_log_printf (" away_message . . . . : '%s'", ptr_server->away_message);
weechat_log_printf (" away_time. . . . . . : %lld", (long long)ptr_server->away_time);
weechat_log_printf (" lag. . . . . . . . . : %d", ptr_server->lag);
weechat_log_printf (" lag_displayed. . . . : %d", ptr_server->lag_displayed);
weechat_log_printf (" lag_check_time . . . : tv_sec:%d, tv_usec:%d",
ptr_server->lag_check_time.tv_sec,
ptr_server->lag_check_time.tv_usec);
weechat_log_printf (" lag_next_check . . . : %lld", (long long)ptr_server->lag_next_check);
weechat_log_printf (" lag_last_refresh . . : %lld", (long long)ptr_server->lag_last_refresh);
weechat_log_printf (" cmd_list_regexp. . . : 0x%lx", ptr_server->cmd_list_regexp);
weechat_log_printf (" last_user_message. . : %lld", (long long)ptr_server->last_user_message);
weechat_log_printf (" last_away_check. . . : %lld", (long long)ptr_server->last_away_check);
weechat_log_printf (" last_data_purge. . . : %lld", (long long)ptr_server->last_data_purge);
for (i = 0; i < IRC_SERVER_NUM_OUTQUEUES_PRIO; i++)
{
weechat_log_printf (" outqueue[%02d] . . . . : 0x%lx", i, ptr_server->outqueue[i]);
weechat_log_printf (" last_outqueue[%02d]. . : 0x%lx", i, ptr_server->last_outqueue[i]);
}
weechat_log_printf (" redirects. . . . . . : 0x%lx", ptr_server->redirects);
weechat_log_printf (" last_redirect. . . . : 0x%lx", ptr_server->last_redirect);
weechat_log_printf (" notify_list. . . . . : 0x%lx", ptr_server->notify_list);
weechat_log_printf (" last_notify. . . . . : 0x%lx", ptr_server->last_notify);
weechat_log_printf (" notify_count . . . . : %d", ptr_server->notify_count);
weechat_log_printf (" join_manual. . . . . : 0x%lx (hashtable: '%s')",
ptr_server->join_manual,
weechat_hashtable_get_string (ptr_server->join_manual, "keys_values"));
weechat_log_printf (" join_channel_key . . : 0x%lx (hashtable: '%s')",
ptr_server->join_channel_key,
weechat_hashtable_get_string (ptr_server->join_channel_key, "keys_values"));
weechat_log_printf (" join_noswitch. . . . : 0x%lx (hashtable: '%s')",
ptr_server->join_noswitch,
weechat_hashtable_get_string (ptr_server->join_noswitch, "keys_values"));
weechat_log_printf (" buffer . . . . . . . : 0x%lx", ptr_server->buffer);
weechat_log_printf (" buffer_as_string . . : 0x%lx", ptr_server->buffer_as_string);
weechat_log_printf (" channels . . . . . . : 0x%lx", ptr_server->channels);
weechat_log_printf (" last_channel . . . . : 0x%lx", ptr_server->last_channel);
weechat_log_printf (" prev_server. . . . . : 0x%lx", ptr_server->prev_server);
weechat_log_printf (" next_server. . . . . : 0x%lx", ptr_server->next_server);
irc_redirect_print_log (ptr_server);
irc_notify_print_log (ptr_server);
for (ptr_channel = ptr_server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
irc_channel_print_log (ptr_channel);
}
}
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_4697_3 |
crossvul-cpp_data_good_4588_2 | /*
* The Python Imaging Library.
* $Id$
*
* decoder for PCX image data.
*
* history:
* 95-09-14 fl Created
*
* Copyright (c) Fredrik Lundh 1995.
* Copyright (c) Secret Labs AB 1997.
*
* See the README file for information on usage and redistribution.
*/
#include "Imaging.h"
int
ImagingPcxDecode(Imaging im, ImagingCodecState state, UINT8* buf, Py_ssize_t bytes)
{
UINT8 n;
UINT8* ptr;
if (strcmp(im->mode, "1") == 0 && state->xsize > state->bytes * 8) {
state->errcode = IMAGING_CODEC_OVERRUN;
return -1;
} else if (strcmp(im->mode, "P") == 0 && state->xsize > state->bytes) {
state->errcode = IMAGING_CODEC_OVERRUN;
return -1;
}
ptr = buf;
for (;;) {
if (bytes < 1)
return ptr - buf;
if ((*ptr & 0xC0) == 0xC0) {
/* Run */
if (bytes < 2)
return ptr - buf;
n = ptr[0] & 0x3F;
while (n > 0) {
if (state->x >= state->bytes) {
state->errcode = IMAGING_CODEC_OVERRUN;
break;
}
state->buffer[state->x++] = ptr[1];
n--;
}
ptr += 2; bytes -= 2;
} else {
/* Literal */
state->buffer[state->x++] = ptr[0];
ptr++; bytes--;
}
if (state->x >= state->bytes) {
if (state->bytes % state->xsize && state->bytes > state->xsize) {
int bands = state->bytes / state->xsize;
int stride = state->bytes / bands;
int i;
for (i=1; i< bands; i++) { // note -- skipping first band
memmove(&state->buffer[i*state->xsize],
&state->buffer[i*stride],
state->xsize);
}
}
/* Got a full line, unpack it */
state->shuffle((UINT8*) im->image[state->y + state->yoff] +
state->xoff * im->pixelsize, state->buffer,
state->xsize);
state->x = 0;
if (++state->y >= state->ysize) {
/* End of file (errcode = 0) */
return -1;
}
}
}
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_4588_2 |
crossvul-cpp_data_bad_3927_1 | /**
* @file atecc608a-tnglora-se.c
*
* @brief ATECC608A-TNGLORA Secure Element hardware implementation
*
* @remark Current implementation only supports LoRaWAN 1.0.x version
*
* @copyright Copyright (c) 2020 The Things Industries B.V.
*
* Revised BSD License
* Copyright The Things Industries B.V 2020. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Things Industries B.V nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE THINGS INDUSTRIES B.V BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "atca_basic.h"
#include "cryptoauthlib.h"
#include "atca_devtypes.h"
#include "secure-element.h"
#include "se-identity.h"
#include "atecc608a-tnglora-se-hal.h"
/*!
* Number of supported crypto keys
*/
#define NUM_OF_KEYS 15
#define DEV_EUI_ASCII_SIZE_BYTE 16U
/*!
* Identifier value pair type for Keys
*/
typedef struct sKey
{
/*
* Key identifier (used for maping the stack MAC key to the ATECC608A-TNGLoRaWAN slot)
*/
KeyIdentifier_t KeyID;
/*
* Key slot number
*/
uint16_t KeySlotNumber;
/*
* Key block index within slot (each block can contain two keys, so index is either 0 or 1)
*/
uint8_t KeyBlockIndex;
} Key_t;
/*
* Secure Element Non Volatile Context structure
*/
typedef struct sSecureElementNvCtx
{
/*!
* DevEUI storage
*/
uint8_t DevEui[SE_EUI_SIZE];
/*!
* Join EUI storage
*/
uint8_t JoinEui[SE_EUI_SIZE];
/*!
* Pin storage
*/
uint8_t Pin[SE_PIN_SIZE];
/*!
* CMAC computation context variable
*/
atca_aes_cmac_ctx_t AtcaAesCmacCtx;
/*!
* LoRaWAN key list
*/
Key_t KeyList[NUM_OF_KEYS];
} SecureElementNvCtx_t;
/*!
* Secure element context
*/
static SecureElementNvCtx_t SeNvmCtx = {
/*!
* end-device IEEE EUI (big endian)
*/
.DevEui = { 0 },
/*!
* App/Join server IEEE EUI (big endian)
*/
.JoinEui = { 0 },
/*!
* Secure-element pin (big endian)
*/
.Pin = SECURE_ELEMENT_PIN,
/*!
* LoRaWAN key list
*/
.KeyList = ATECC608A_SE_KEY_LIST
};
static SecureElementNvmEvent SeNvmCtxChanged;
static ATCAIfaceCfg atecc608_i2c_config;
static ATCA_STATUS convert_ascii_devEUI( uint8_t* devEUI_ascii, uint8_t* devEUI );
static ATCA_STATUS atcab_read_joinEUI( uint8_t* joinEUI )
{
ATCA_STATUS status = ATCA_GEN_FAIL;
uint8_t read_buf[ATCA_BLOCK_SIZE];
if( joinEUI == NULL )
{
return ATCA_BAD_PARAM;
}
do
{
status = atcab_read_zone( ATCA_ZONE_DATA, TNGLORA_JOIN_EUI_SLOT, 0, 0, read_buf, ATCA_BLOCK_SIZE );
if( status != ATCA_SUCCESS )
{
break;
}
memcpy1( joinEUI, read_buf, SE_EUI_SIZE );
} while( 0 );
return status;
}
static ATCA_STATUS atcab_read_ascii_devEUI( uint8_t* devEUI_ascii )
{
ATCA_STATUS status = ATCA_GEN_FAIL;
uint8_t read_buf[ATCA_BLOCK_SIZE];
if( devEUI_ascii == NULL )
{
return ATCA_BAD_PARAM;
}
do
{
status = atcab_read_zone( ATCA_ZONE_DATA, TNGLORA_DEV_EUI_SLOT, 0, 0, read_buf, ATCA_BLOCK_SIZE );
if( status != ATCA_SUCCESS )
{
break;
}
memcpy1( devEUI_ascii, read_buf, DEV_EUI_ASCII_SIZE_BYTE );
} while( 0 );
return status;
}
static ATCA_STATUS convert_ascii_devEUI( uint8_t* devEUI_ascii, uint8_t* devEUI )
{
for( size_t pos = 0; pos < DEV_EUI_ASCII_SIZE_BYTE; pos += 2 )
{
uint8_t temp = 0;
if( ( devEUI_ascii[pos] >= '0' ) && ( devEUI_ascii[pos] <= '9' ) )
{
temp = ( devEUI_ascii[pos] - '0' ) << 4;
}
else if( ( devEUI_ascii[pos] >= 'A' ) && ( devEUI_ascii[pos] <= 'F' ) )
{
temp = ( ( devEUI_ascii[pos] - 'A' ) + 10 ) << 4;
}
else
{
return ATCA_BAD_PARAM;
}
if( ( devEUI_ascii[pos + 1] >= '0' ) && ( devEUI_ascii[pos + 1] <= '9' ) )
{
temp |= devEUI_ascii[pos + 1] - '0';
}
else if( ( devEUI_ascii[pos + 1] >= 'A' ) && ( devEUI_ascii[pos + 1] <= 'F' ) )
{
temp |= ( devEUI_ascii[pos + 1] - 'A' ) + 10;
}
else
{
return ATCA_BAD_PARAM;
}
devEUI[pos / 2] = temp;
}
return ATCA_SUCCESS;
}
static ATCA_STATUS atcab_read_devEUI( uint8_t* devEUI )
{
ATCA_STATUS status = ATCA_GEN_FAIL;
uint8_t devEUI_ascii[DEV_EUI_ASCII_SIZE_BYTE];
status = atcab_read_ascii_devEUI( devEUI_ascii );
if( status != ATCA_SUCCESS )
{
return status;
}
status = convert_ascii_devEUI( devEUI_ascii, devEUI );
return status;
}
/*
* Gets key item from key list.
*
* cmac = aes128_cmac(keyID, B0 | msg)
*
* \param[IN] keyID - Key identifier
* \param[OUT] keyItem - Key item reference
* \retval - Status of the operation
*/
SecureElementStatus_t GetKeyByID( KeyIdentifier_t keyID, Key_t** keyItem )
{
for( uint8_t i = 0; i < NUM_OF_KEYS; i++ )
{
if( SeNvmCtx.KeyList[i].KeyID == keyID )
{
*keyItem = &( SeNvmCtx.KeyList[i] );
return SECURE_ELEMENT_SUCCESS;
}
}
return SECURE_ELEMENT_ERROR_INVALID_KEY_ID;
}
/*
* Dummy callback in case if the user provides NULL function pointer
*/
static void DummyCB( void )
{
return;
}
/*
* Computes a CMAC of a message using provided initial Bx block
*
* cmac = aes128_cmac(keyID, blocks[i].Buffer)
*
* \param[IN] micBxBuffer - Buffer containing the initial Bx block
* \param[IN] buffer - Data buffer
* \param[IN] size - Data buffer size
* \param[IN] keyID - Key identifier to determine the AES key to be used
* \param[OUT] cmac - Computed cmac
* \retval - Status of the operation
*/
static SecureElementStatus_t ComputeCmac( uint8_t* micBxBuffer, uint8_t* buffer, uint16_t size, KeyIdentifier_t keyID,
uint32_t* cmac )
{
if( ( buffer == NULL ) || ( cmac == NULL ) )
{
return SECURE_ELEMENT_ERROR_NPE;
}
uint8_t Cmac[16] = { 0 };
Key_t* keyItem;
SecureElementStatus_t retval = GetKeyByID( keyID, &keyItem );
if( retval != SECURE_ELEMENT_SUCCESS )
{
return retval;
}
ATCA_STATUS status =
atcab_aes_cmac_init( &SeNvmCtx.AtcaAesCmacCtx, keyItem->KeySlotNumber, keyItem->KeyBlockIndex );
if( ATCA_SUCCESS == status )
{
if( micBxBuffer != NULL )
{
atcab_aes_cmac_update( &SeNvmCtx.AtcaAesCmacCtx, micBxBuffer, 16 );
}
atcab_aes_cmac_update( &SeNvmCtx.AtcaAesCmacCtx, buffer, size );
atcab_aes_cmac_finish( &SeNvmCtx.AtcaAesCmacCtx, Cmac, 16 );
*cmac = ( uint32_t )( ( uint32_t ) Cmac[3] << 24 | ( uint32_t ) Cmac[2] << 16 | ( uint32_t ) Cmac[1] << 8 |
( uint32_t ) Cmac[0] );
return SECURE_ELEMENT_SUCCESS;
}
else
{
return SECURE_ELEMENT_ERROR;
}
}
SecureElementStatus_t SecureElementInit( SecureElementNvmEvent seNvmCtxChanged )
{
#if !defined( SECURE_ELEMENT_PRE_PROVISIONED )
#error "ATECC608A is always pre-provisioned. Please set SECURE_ELEMENT_PRE_PROVISIONED to ON"
#endif
atecc608_i2c_config.iface_type = ATCA_I2C_IFACE;
atecc608_i2c_config.atcai2c.baud = ATCA_HAL_ATECC608A_I2C_FREQUENCY;
atecc608_i2c_config.atcai2c.bus = ATCA_HAL_ATECC608A_I2C_BUS_PINS;
atecc608_i2c_config.atcai2c.slave_address = ATCA_HAL_ATECC608A_I2C_ADDRESS;
atecc608_i2c_config.devtype = ATECC608A;
atecc608_i2c_config.rx_retries = ATCA_HAL_ATECC608A_I2C_RX_RETRIES;
atecc608_i2c_config.wake_delay = ATCA_HAL_ATECC608A_I2C_WAKEUP_DELAY;
if( atcab_init( &atecc608_i2c_config ) != ATCA_SUCCESS )
{
return SECURE_ELEMENT_ERROR;
}
if( atcab_read_devEUI( SeNvmCtx.DevEui ) != ATCA_SUCCESS )
{
return SECURE_ELEMENT_ERROR;
}
if( atcab_read_joinEUI( SeNvmCtx.JoinEui ) != ATCA_SUCCESS )
{
return SECURE_ELEMENT_ERROR;
}
// Assign callback
if( seNvmCtxChanged != 0 )
{
SeNvmCtxChanged = seNvmCtxChanged;
}
else
{
SeNvmCtxChanged = DummyCB;
}
return SECURE_ELEMENT_SUCCESS;
}
SecureElementStatus_t SecureElementRestoreNvmCtx( void* seNvmCtx )
{
// Restore nvm context
if( seNvmCtx != 0 )
{
memcpy1( ( uint8_t* ) &SeNvmCtx, ( uint8_t* ) seNvmCtx, sizeof( SeNvmCtx ) );
return SECURE_ELEMENT_SUCCESS;
}
else
{
return SECURE_ELEMENT_ERROR_NPE;
}
}
void* SecureElementGetNvmCtx( size_t* seNvmCtxSize )
{
*seNvmCtxSize = sizeof( SeNvmCtx );
return &SeNvmCtx;
}
SecureElementStatus_t SecureElementSetKey( KeyIdentifier_t keyID, uint8_t* key )
{
// No key setting for HW SE, can only derive keys
return SECURE_ELEMENT_SUCCESS;
}
SecureElementStatus_t SecureElementComputeAesCmac( uint8_t* micBxBuffer, uint8_t* buffer, uint16_t size,
KeyIdentifier_t keyID, uint32_t* cmac )
{
if( keyID >= LORAMAC_CRYPTO_MULTICAST_KEYS )
{
// Never accept multicast key identifier for cmac computation
return SECURE_ELEMENT_ERROR_INVALID_KEY_ID;
}
return ComputeCmac( micBxBuffer, buffer, size, keyID, cmac );
}
SecureElementStatus_t SecureElementVerifyAesCmac( uint8_t* buffer, uint16_t size, uint32_t expectedCmac,
KeyIdentifier_t keyID )
{
if( buffer == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
SecureElementStatus_t retval = SECURE_ELEMENT_ERROR;
uint32_t compCmac = 0;
retval = ComputeCmac( NULL, buffer, size, keyID, &compCmac );
if( retval != SECURE_ELEMENT_SUCCESS )
{
return retval;
}
if( expectedCmac != compCmac )
{
retval = SECURE_ELEMENT_FAIL_CMAC;
}
return retval;
}
SecureElementStatus_t SecureElementAesEncrypt( uint8_t* buffer, uint16_t size, KeyIdentifier_t keyID,
uint8_t* encBuffer )
{
if( buffer == NULL || encBuffer == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
// Check if the size is divisible by 16,
if( ( size % 16 ) != 0 )
{
return SECURE_ELEMENT_ERROR_BUF_SIZE;
}
Key_t* pItem;
SecureElementStatus_t retval = GetKeyByID( keyID, &pItem );
if( retval == SECURE_ELEMENT_SUCCESS )
{
uint8_t block = 0;
while( size != 0 )
{
atcab_aes_encrypt( pItem->KeySlotNumber, pItem->KeyBlockIndex, &buffer[block], &encBuffer[block] );
block = block + 16;
size = size - 16;
}
}
return retval;
}
SecureElementStatus_t SecureElementDeriveAndStoreKey( Version_t version, uint8_t* input, KeyIdentifier_t rootKeyID,
KeyIdentifier_t targetKeyID )
{
if( input == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
// Source key slot is the LSB and target key slot is the MSB
uint16_t source_target_ids = 0;
Key_t* source_key;
Key_t* target_key;
ATCA_STATUS status = ATCA_SUCCESS;
// In case of MC_KE_KEY, only McRootKey can be used as root key
if( targetKeyID == MC_KE_KEY )
{
if( rootKeyID != MC_ROOT_KEY )
{
return SECURE_ELEMENT_ERROR_INVALID_KEY_ID;
}
}
if( ( rootKeyID == APP_KEY ) || ( rootKeyID == MC_ROOT_KEY ) || ( rootKeyID == MC_KE_KEY ) )
{
// Allow the stack to move forward as these rootkeys dont exist inside SE.
return SECURE_ELEMENT_SUCCESS;
}
if( GetKeyByID( rootKeyID, &source_key ) != SECURE_ELEMENT_SUCCESS )
{
return SECURE_ELEMENT_ERROR_INVALID_KEY_ID;
}
if( GetKeyByID( targetKeyID, &target_key ) != SECURE_ELEMENT_SUCCESS )
{
return SECURE_ELEMENT_ERROR_INVALID_KEY_ID;
}
source_target_ids = target_key->KeySlotNumber << 8;
source_target_ids += source_key->KeySlotNumber;
uint32_t detail = source_key->KeyBlockIndex;
status = atcab_kdf( KDF_MODE_ALG_AES | KDF_MODE_SOURCE_SLOT | KDF_MODE_TARGET_SLOT, source_target_ids, detail,
input, NULL, NULL );
if( status == ATCA_SUCCESS )
{
return SECURE_ELEMENT_SUCCESS;
}
else
{
return SECURE_ELEMENT_ERROR;
}
}
SecureElementStatus_t SecureElementProcessJoinAccept( JoinReqIdentifier_t joinReqType, uint8_t* joinEui,
uint16_t devNonce, uint8_t* encJoinAccept,
uint8_t encJoinAcceptSize, uint8_t* decJoinAccept,
uint8_t* versionMinor )
{
if( ( encJoinAccept == NULL ) || ( decJoinAccept == NULL ) || ( versionMinor == NULL ) )
{
return SECURE_ELEMENT_ERROR_NPE;
}
// Determine decryption key
KeyIdentifier_t encKeyID = NWK_KEY;
if( joinReqType != JOIN_REQ )
{
encKeyID = J_S_ENC_KEY;
}
memcpy1( decJoinAccept, encJoinAccept, encJoinAcceptSize );
// Decrypt JoinAccept, skip MHDR
if( SecureElementAesEncrypt( encJoinAccept + LORAMAC_MHDR_FIELD_SIZE, encJoinAcceptSize - LORAMAC_MHDR_FIELD_SIZE,
encKeyID, decJoinAccept + LORAMAC_MHDR_FIELD_SIZE ) != SECURE_ELEMENT_SUCCESS )
{
return SECURE_ELEMENT_FAIL_ENCRYPT;
}
*versionMinor = ( ( decJoinAccept[11] & 0x80 ) == 0x80 ) ? 1 : 0;
uint32_t mic = 0;
mic = ( ( uint32_t ) decJoinAccept[encJoinAcceptSize - LORAMAC_MIC_FIELD_SIZE] << 0 );
mic |= ( ( uint32_t ) decJoinAccept[encJoinAcceptSize - LORAMAC_MIC_FIELD_SIZE + 1] << 8 );
mic |= ( ( uint32_t ) decJoinAccept[encJoinAcceptSize - LORAMAC_MIC_FIELD_SIZE + 2] << 16 );
mic |= ( ( uint32_t ) decJoinAccept[encJoinAcceptSize - LORAMAC_MIC_FIELD_SIZE + 3] << 24 );
// - Header buffer to be used for MIC computation
// - LoRaWAN 1.0.x : micHeader = [MHDR(1)]
// - LoRaWAN 1.1.x : micHeader = [JoinReqType(1), JoinEUI(8), DevNonce(2), MHDR(1)]
// Verify mic
if( *versionMinor == 0 )
{
// For LoRaWAN 1.0.x
// cmac = aes128_cmac(NwkKey, MHDR | JoinNonce | NetID | DevAddr | DLSettings | RxDelay | CFList |
// CFListType)
if( SecureElementVerifyAesCmac( decJoinAccept, ( encJoinAcceptSize - LORAMAC_MIC_FIELD_SIZE ), mic, NWK_KEY ) !=
SECURE_ELEMENT_SUCCESS )
{
return SECURE_ELEMENT_FAIL_CMAC;
}
}
#if( USE_LRWAN_1_1_X_CRYPTO == 1 )
else if( *versionMinor == 1 )
{
uint8_t micHeader11[JOIN_ACCEPT_MIC_COMPUTATION_OFFSET] = { 0 };
uint16_t bufItr = 0;
micHeader11[bufItr++] = ( uint8_t ) joinReqType;
memcpyr( micHeader11 + bufItr, joinEui, LORAMAC_JOIN_EUI_FIELD_SIZE );
bufItr += LORAMAC_JOIN_EUI_FIELD_SIZE;
micHeader11[bufItr++] = devNonce & 0xFF;
micHeader11[bufItr++] = ( devNonce >> 8 ) & 0xFF;
// For LoRaWAN 1.1.x and later:
// cmac = aes128_cmac(JSIntKey, JoinReqType | JoinEUI | DevNonce | MHDR | JoinNonce | NetID | DevAddr |
// DLSettings | RxDelay | CFList | CFListType)
// Prepare the msg for integrity check (adding JoinReqType, JoinEUI and DevNonce)
uint8_t localBuffer[LORAMAC_JOIN_ACCEPT_FRAME_MAX_SIZE + JOIN_ACCEPT_MIC_COMPUTATION_OFFSET] = { 0 };
memcpy1( localBuffer, micHeader11, JOIN_ACCEPT_MIC_COMPUTATION_OFFSET );
memcpy1( localBuffer + JOIN_ACCEPT_MIC_COMPUTATION_OFFSET - 1, decJoinAccept, encJoinAcceptSize );
if( SecureElementVerifyAesCmac( localBuffer,
encJoinAcceptSize + JOIN_ACCEPT_MIC_COMPUTATION_OFFSET -
LORAMAC_MHDR_FIELD_SIZE - LORAMAC_MIC_FIELD_SIZE,
mic, J_S_INT_KEY ) != SECURE_ELEMENT_SUCCESS )
{
return SECURE_ELEMENT_FAIL_CMAC;
}
}
#endif
else
{
return SECURE_ELEMENT_ERROR_INVALID_LORAWAM_SPEC_VERSION;
}
return SECURE_ELEMENT_SUCCESS;
}
SecureElementStatus_t SecureElementRandomNumber( uint32_t* randomNum )
{
if( randomNum == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
*randomNum = ATECC608ASeHalGetRandomNumber( );
return SECURE_ELEMENT_SUCCESS;
}
SecureElementStatus_t SecureElementSetDevEui( uint8_t* devEui )
{
if( devEui == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
memcpy1( SeNvmCtx.DevEui, devEui, SE_EUI_SIZE );
SeNvmCtxChanged( );
return SECURE_ELEMENT_SUCCESS;
}
uint8_t* SecureElementGetDevEui( void )
{
return SeNvmCtx.DevEui;
}
SecureElementStatus_t SecureElementSetJoinEui( uint8_t* joinEui )
{
if( joinEui == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
memcpy1( SeNvmCtx.JoinEui, joinEui, SE_EUI_SIZE );
SeNvmCtxChanged( );
return SECURE_ELEMENT_SUCCESS;
}
uint8_t* SecureElementGetJoinEui( void )
{
return SeNvmCtx.JoinEui;
}
SecureElementStatus_t SecureElementSetPin( uint8_t* pin )
{
if( pin == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
memcpy1( SeNvmCtx.Pin, pin, SE_PIN_SIZE );
SeNvmCtxChanged( );
return SECURE_ELEMENT_SUCCESS;
}
uint8_t* SecureElementGetPin( void )
{
return SeNvmCtx.Pin;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_3927_1 |
crossvul-cpp_data_good_2394_0 | /*****************************************************************************
* update.c: VLC update checking and downloading
*****************************************************************************
* Copyright © 2005-2008 VLC authors and VideoLAN
* $Id$
*
* Authors: Antoine Cellerier <dionoea -at- videolan -dot- org>
* Rémi Duraffort <ivoire at via.ecp.fr>
Rafaël Carré <funman@videolanorg>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either release 2 of the License, or
* (at your option) any later release.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
/**
* \file
* This file contains functions related to VLC update management
*/
/*****************************************************************************
* Preamble
*****************************************************************************/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <vlc_common.h>
#include <vlc_update.h>
#ifdef UPDATE_CHECK
#include <assert.h>
#include <vlc_pgpkey.h>
#include <vlc_stream.h>
#include <vlc_strings.h>
#include <vlc_fs.h>
#include <vlc_dialog.h>
#include <vlc_interface.h>
#include <gcrypt.h>
#include <vlc_gcrypt.h>
#ifdef _WIN32
#include <shellapi.h>
#endif
#include "update.h"
#include "../libvlc.h"
/*****************************************************************************
* Misc defines
*****************************************************************************/
/*
* Here is the format of these "status files" :
* First line is the last version: "X.Y.Z.E" where:
* * X is the major number
* * Y is the minor number
* * Z is the revision number
* * .E is an OPTIONAL extra number
* * IE "1.2.0" or "1.1.10.1"
* Second line is a url of the binary for this last version
* Remaining text is a required description of the update
*/
#if defined( _WIN64 )
# define UPDATE_OS_SUFFIX "-win-x64"
#elif defined( _WIN32 )
# define UPDATE_OS_SUFFIX "-win-x86"
#else
# define UPDATE_OS_SUFFIX ""
#endif
#ifndef NDEBUG
# define UPDATE_VLC_STATUS_URL "http://update-test.videolan.org/vlc/status-win-x86"
#else
# define UPDATE_VLC_STATUS_URL "http://update.videolan.org/vlc/status" UPDATE_OS_SUFFIX
#endif
/*****************************************************************************
* Update_t functions
*****************************************************************************/
#undef update_New
/**
* Create a new update VLC struct
*
* \param p_this the calling vlc_object
* \return pointer to new update_t or NULL
*/
update_t *update_New( vlc_object_t *p_this )
{
update_t *p_update;
assert( p_this );
p_update = (update_t *)malloc( sizeof( update_t ) );
if( !p_update ) return NULL;
vlc_mutex_init( &p_update->lock );
p_update->p_libvlc = p_this->p_libvlc;
p_update->release.psz_url = NULL;
p_update->release.psz_desc = NULL;
p_update->p_download = NULL;
p_update->p_check = NULL;
p_update->p_pkey = NULL;
vlc_gcrypt_init();
return p_update;
}
/**
* Delete an update_t struct
*
* \param p_update update_t* pointer
* \return nothing
*/
void update_Delete( update_t *p_update )
{
assert( p_update );
if( p_update->p_check )
{
vlc_join( p_update->p_check->thread, NULL );
free( p_update->p_check );
}
if( p_update->p_download )
{
atomic_store( &p_update->p_download->aborted, true );
vlc_join( p_update->p_download->thread, NULL );
vlc_object_release( p_update->p_download );
}
vlc_mutex_destroy( &p_update->lock );
free( p_update->release.psz_url );
free( p_update->release.psz_desc );
free( p_update->p_pkey );
free( p_update );
}
/**
* Empty the release struct
*
* \param p_update update_t* pointer
* \return nothing
*/
static void EmptyRelease( update_t *p_update )
{
p_update->release.i_major = 0;
p_update->release.i_minor = 0;
p_update->release.i_revision = 0;
FREENULL( p_update->release.psz_url );
FREENULL( p_update->release.psz_desc );
}
/**
* Get the update file and parse it
* p_update has to be locked when calling this function
*
* \param p_update pointer to update struct
* \return true if the update is valid and authenticated
*/
static bool GetUpdateFile( update_t *p_update )
{
stream_t *p_stream = NULL;
char *psz_version_line = NULL;
char *psz_update_data = NULL;
p_stream = stream_UrlNew( p_update->p_libvlc, UPDATE_VLC_STATUS_URL );
if( !p_stream )
{
msg_Err( p_update->p_libvlc, "Failed to open %s for reading",
UPDATE_VLC_STATUS_URL );
goto error;
}
const int64_t i_read = stream_Size( p_stream );
if( i_read < 0 || i_read >= UINT16_MAX)
{
msg_Err(p_update->p_libvlc, "Status file too large");
goto error;
}
psz_update_data = malloc( i_read + 1 ); /* terminating '\0' */
if( !psz_update_data )
goto error;
if( stream_Read( p_stream, psz_update_data, i_read ) != i_read )
{
msg_Err( p_update->p_libvlc, "Couldn't download update file %s",
UPDATE_VLC_STATUS_URL );
goto error;
}
psz_update_data[i_read] = '\0';
stream_Delete( p_stream );
p_stream = NULL;
/* first line : version number */
char *psz_update_data_parser = psz_update_data;
size_t i_len = strcspn( psz_update_data, "\r\n" );
psz_update_data_parser += i_len;
while( *psz_update_data_parser == '\r' || *psz_update_data_parser == '\n' )
psz_update_data_parser++;
if( !(psz_version_line = malloc( i_len + 1)) )
goto error;
strncpy( psz_version_line, psz_update_data, i_len );
psz_version_line[i_len] = '\0';
p_update->release.i_extra = 0;
int ret = sscanf( psz_version_line, "%i.%i.%i.%i",
&p_update->release.i_major, &p_update->release.i_minor,
&p_update->release.i_revision, &p_update->release.i_extra);
if( ret != 3 && ret != 4 )
{
msg_Err( p_update->p_libvlc, "Update version false formated" );
goto error;
}
/* second line : URL */
i_len = strcspn( psz_update_data_parser, "\r\n" );
if( i_len == 0 )
{
msg_Err( p_update->p_libvlc, "Update file %s is corrupted: URL missing",
UPDATE_VLC_STATUS_URL );
goto error;
}
if( !(p_update->release.psz_url = malloc( i_len + 1)) )
goto error;
strncpy( p_update->release.psz_url, psz_update_data_parser, i_len );
p_update->release.psz_url[i_len] = '\0';
psz_update_data_parser += i_len;
while( *psz_update_data_parser == '\r' || *psz_update_data_parser == '\n' )
psz_update_data_parser++;
/* Remaining data : description */
i_len = strlen( psz_update_data_parser );
if( i_len == 0 )
{
msg_Err( p_update->p_libvlc,
"Update file %s is corrupted: description missing",
UPDATE_VLC_STATUS_URL );
goto error;
}
if( !(p_update->release.psz_desc = malloc( i_len + 1)) )
goto error;
strncpy( p_update->release.psz_desc, psz_update_data_parser, i_len );
p_update->release.psz_desc[i_len] = '\0';
/* Now that we know the status is valid, we must download its signature
* to authenticate it */
signature_packet_t sign;
if( download_signature( VLC_OBJECT( p_update->p_libvlc ), &sign,
UPDATE_VLC_STATUS_URL ) != VLC_SUCCESS )
{
msg_Err( p_update->p_libvlc, "Couldn't download signature of status file" );
goto error;
}
if( sign.type != BINARY_SIGNATURE && sign.type != TEXT_SIGNATURE )
{
msg_Err( p_update->p_libvlc, "Invalid signature type" );
goto error;
}
p_update->p_pkey = (public_key_t*)malloc( sizeof( public_key_t ) );
if( !p_update->p_pkey )
goto error;
if( parse_public_key( videolan_public_key, sizeof( videolan_public_key ),
p_update->p_pkey, NULL ) != VLC_SUCCESS )
{
msg_Err( p_update->p_libvlc, "Couldn't parse embedded public key, something went really wrong..." );
FREENULL( p_update->p_pkey );
goto error;
}
memcpy( p_update->p_pkey->longid, videolan_public_key_longid, 8 );
if( memcmp( sign.issuer_longid, p_update->p_pkey->longid , 8 ) != 0 )
{
msg_Dbg( p_update->p_libvlc, "Need to download the GPG key" );
public_key_t *p_new_pkey = download_key(
VLC_OBJECT(p_update->p_libvlc),
sign.issuer_longid, videolan_public_key_longid );
if( !p_new_pkey )
{
msg_Err( p_update->p_libvlc, "Couldn't download GPG key" );
FREENULL( p_update->p_pkey );
goto error;
}
uint8_t *p_hash = hash_from_public_key( p_new_pkey );
if( !p_hash )
{
msg_Err( p_update->p_libvlc, "Failed to hash signature" );
free( p_new_pkey );
FREENULL( p_update->p_pkey );
goto error;
}
if( verify_signature( &p_new_pkey->sig,
&p_update->p_pkey->key, p_hash ) == VLC_SUCCESS )
{
free( p_hash );
msg_Info( p_update->p_libvlc, "Key authenticated" );
free( p_update->p_pkey );
p_update->p_pkey = p_new_pkey;
}
else
{
free( p_hash );
msg_Err( p_update->p_libvlc, "Key signature invalid !" );
goto error;
}
}
uint8_t *p_hash = hash_from_text( psz_update_data, &sign );
if( !p_hash )
{
msg_Warn( p_update->p_libvlc, "Can't compute hash for status file" );
goto error;
}
else if( p_hash[0] != sign.hash_verification[0] ||
p_hash[1] != sign.hash_verification[1] )
{
msg_Warn( p_update->p_libvlc, "Bad hash for status file" );
free( p_hash );
goto error;
}
else if( verify_signature( &sign, &p_update->p_pkey->key, p_hash )
!= VLC_SUCCESS )
{
msg_Err( p_update->p_libvlc, "BAD SIGNATURE for status file" );
free( p_hash );
goto error;
}
else
{
msg_Info( p_update->p_libvlc, "Status file authenticated" );
free( p_hash );
free( psz_version_line );
free( psz_update_data );
return true;
}
error:
if( p_stream )
stream_Delete( p_stream );
free( psz_version_line );
free( psz_update_data );
return false;
}
static void* update_CheckReal( void * );
/**
* Check for updates
*
* \param p_update pointer to update struct
* \param pf_callback pointer to a function to call when the update_check is finished
* \param p_data pointer to some datas to give to the callback
* \returns nothing
*/
void update_Check( update_t *p_update, void (*pf_callback)( void*, bool ), void *p_data )
{
assert( p_update );
// If the object already exist, destroy it
if( p_update->p_check )
{
vlc_join( p_update->p_check->thread, NULL );
free( p_update->p_check );
}
update_check_thread_t *p_uct = calloc( 1, sizeof( *p_uct ) );
if( !p_uct ) return;
p_uct->p_update = p_update;
p_update->p_check = p_uct;
p_uct->pf_callback = pf_callback;
p_uct->p_data = p_data;
vlc_clone( &p_uct->thread, update_CheckReal, p_uct, VLC_THREAD_PRIORITY_LOW );
}
void* update_CheckReal( void *obj )
{
update_check_thread_t *p_uct = (update_check_thread_t *)obj;
bool b_ret;
int canc;
canc = vlc_savecancel ();
vlc_mutex_lock( &p_uct->p_update->lock );
EmptyRelease( p_uct->p_update );
b_ret = GetUpdateFile( p_uct->p_update );
vlc_mutex_unlock( &p_uct->p_update->lock );
if( p_uct->pf_callback )
(p_uct->pf_callback)( p_uct->p_data, b_ret );
vlc_restorecancel (canc);
return NULL;
}
bool update_NeedUpgrade( update_t *p_update )
{
assert( p_update );
static const int current[4] = {
PACKAGE_VERSION_MAJOR,
PACKAGE_VERSION_MINOR,
PACKAGE_VERSION_REVISION,
PACKAGE_VERSION_EXTRA
};
const int latest[4] = {
p_update->release.i_major,
p_update->release.i_minor,
p_update->release.i_revision,
p_update->release.i_extra
};
for (unsigned i = 0; i < sizeof latest / sizeof *latest; i++) {
/* there is a new version available */
if (latest[i] > current[i])
return true;
/* current version is more recent than the latest version ?! */
if (latest[i] < current[i])
return false;
}
/* current version is not a release, it's a -git or -rc version */
if (*PACKAGE_VERSION_DEV)
return true;
/* current version is latest version */
return false;
}
/**
* Convert a long int size in bytes to a string
*
* \param l_size the size in bytes
* \return the size as a string
*/
static char *size_str( long int l_size )
{
char *psz_tmp = NULL;
int i_retval = 0;
if( l_size >> 30 )
i_retval = asprintf( &psz_tmp, _("%.1f GiB"), (float)l_size/(1<<30) );
else if( l_size >> 20 )
i_retval = asprintf( &psz_tmp, _("%.1f MiB"), (float)l_size/(1<<20) );
else if( l_size >> 10 )
i_retval = asprintf( &psz_tmp, _("%.1f KiB"), (float)l_size/(1<<10) );
else
i_retval = asprintf( &psz_tmp, _("%ld B"), l_size );
return i_retval == -1 ? NULL : psz_tmp;
}
static void* update_DownloadReal( void * );
/**
* Download the file given in the update_t
*
* \param p_update structure
* \param dir to store the download file
* \return nothing
*/
void update_Download( update_t *p_update, const char *psz_destdir )
{
assert( p_update );
// If the object already exist, destroy it
if( p_update->p_download )
{
atomic_store( &p_update->p_download->aborted, true );
vlc_join( p_update->p_download->thread, NULL );
vlc_object_release( p_update->p_download );
}
update_download_thread_t *p_udt =
vlc_custom_create( p_update->p_libvlc, sizeof( *p_udt ),
"update download" );
if( !p_udt )
return;
p_udt->p_update = p_update;
p_update->p_download = p_udt;
p_udt->psz_destdir = psz_destdir ? strdup( psz_destdir ) : NULL;
atomic_store(&p_udt->aborted, false);
vlc_clone( &p_udt->thread, update_DownloadReal, p_udt, VLC_THREAD_PRIORITY_LOW );
}
static void* update_DownloadReal( void *obj )
{
update_download_thread_t *p_udt = (update_download_thread_t *)obj;
dialog_progress_bar_t *p_progress = NULL;
long int l_size;
long int l_downloaded = 0;
float f_progress;
char *psz_status;
char *psz_downloaded = NULL;
char *psz_size = NULL;
char *psz_destfile = NULL;
char *psz_tmpdestfile = NULL;
FILE *p_file = NULL;
stream_t *p_stream = NULL;
void* p_buffer = NULL;
int i_read;
int canc;
update_t *p_update = p_udt->p_update;
char *psz_destdir = p_udt->psz_destdir;
msg_Dbg( p_udt, "Opening Stream '%s'", p_update->release.psz_url );
canc = vlc_savecancel ();
/* Open the stream */
p_stream = stream_UrlNew( p_udt, p_update->release.psz_url );
if( !p_stream )
{
msg_Err( p_udt, "Failed to open %s for reading", p_update->release.psz_url );
goto end;
}
/* Get the stream size */
l_size = stream_Size( p_stream );
/* Get the file name and open it*/
psz_tmpdestfile = strrchr( p_update->release.psz_url, '/' );
if( !psz_tmpdestfile )
{
msg_Err( p_udt, "The URL %s is badly formated",
p_update->release.psz_url );
goto end;
}
psz_tmpdestfile++;
if( asprintf( &psz_destfile, "%s%s", psz_destdir, psz_tmpdestfile ) == -1 )
goto end;
p_file = vlc_fopen( psz_destfile, "w" );
if( !p_file )
{
msg_Err( p_udt, "Failed to open %s for writing", psz_destfile );
dialog_FatalWait( p_udt, _("Saving file failed"),
_("Failed to open \"%s\" for writing"),
psz_destfile );
goto end;
}
/* Create a buffer and fill it with the downloaded file */
p_buffer = (void *)malloc( 1 << 10 );
if( unlikely(p_buffer == NULL) )
goto end;
msg_Dbg( p_udt, "Downloading Stream '%s'", p_update->release.psz_url );
psz_size = size_str( l_size );
if( asprintf( &psz_status, _("%s\nDownloading... %s/%s %.1f%% done"),
p_update->release.psz_url, "0.0", psz_size, 0.0 ) == -1 )
goto end;
p_progress = dialog_ProgressCreate( p_udt, _( "Downloading ..."),
psz_status, _("Cancel") );
free( psz_status );
if( p_progress == NULL )
goto end;
while( !atomic_load( &p_udt->aborted ) &&
( i_read = stream_Read( p_stream, p_buffer, 1 << 10 ) ) &&
!dialog_ProgressCancelled( p_progress ) )
{
if( fwrite( p_buffer, i_read, 1, p_file ) < 1 )
{
msg_Err( p_udt, "Failed to write into %s", psz_destfile );
break;
}
l_downloaded += i_read;
psz_downloaded = size_str( l_downloaded );
f_progress = (float)l_downloaded/(float)l_size;
if( asprintf( &psz_status, _( "%s\nDownloading... %s/%s - %.1f%% done" ),
p_update->release.psz_url, psz_downloaded, psz_size,
f_progress*100 ) != -1 )
{
dialog_ProgressSet( p_progress, psz_status, f_progress );
free( psz_status );
}
free( psz_downloaded );
}
/* Finish the progress bar or delete the file if the user had canceled */
fclose( p_file );
p_file = NULL;
if( !atomic_load( &p_udt->aborted ) &&
!dialog_ProgressCancelled( p_progress ) )
{
dialog_ProgressDestroy( p_progress );
p_progress = NULL;
}
else
{
vlc_unlink( psz_destfile );
goto end;
}
signature_packet_t sign;
if( download_signature( VLC_OBJECT( p_udt ), &sign,
p_update->release.psz_url ) != VLC_SUCCESS )
{
vlc_unlink( psz_destfile );
dialog_FatalWait( p_udt, _("File could not be verified"),
_("It was not possible to download a cryptographic signature for "
"the downloaded file \"%s\". Thus, it was deleted."),
psz_destfile );
msg_Err( p_udt, "Couldn't download signature of downloaded file" );
goto end;
}
if( memcmp( sign.issuer_longid, p_update->p_pkey->longid, 8 ) )
{
vlc_unlink( psz_destfile );
msg_Err( p_udt, "Invalid signature issuer" );
dialog_FatalWait( p_udt, _("Invalid signature"),
_("The cryptographic signature for the downloaded file \"%s\" was "
"invalid and could not be used to securely verify it. Thus, the "
"file was deleted."),
psz_destfile );
goto end;
}
if( sign.type != BINARY_SIGNATURE )
{
vlc_unlink( psz_destfile );
msg_Err( p_udt, "Invalid signature type" );
dialog_FatalWait( p_udt, _("Invalid signature"),
_("The cryptographic signature for the downloaded file \"%s\" was "
"invalid and could not be used to securely verify it. Thus, the "
"file was deleted."),
psz_destfile );
goto end;
}
uint8_t *p_hash = hash_from_file( psz_destfile, &sign );
if( !p_hash )
{
msg_Err( p_udt, "Unable to hash %s", psz_destfile );
vlc_unlink( psz_destfile );
dialog_FatalWait( p_udt, _("File not verifiable"),
_("It was not possible to securely verify the downloaded file"
" \"%s\". Thus, it was deleted."),
psz_destfile );
goto end;
}
if( p_hash[0] != sign.hash_verification[0] ||
p_hash[1] != sign.hash_verification[1] )
{
vlc_unlink( psz_destfile );
dialog_FatalWait( p_udt, _("File corrupted"),
_("Downloaded file \"%s\" was corrupted. Thus, it was deleted."),
psz_destfile );
msg_Err( p_udt, "Bad hash for %s", psz_destfile );
free( p_hash );
goto end;
}
if( verify_signature( &sign, &p_update->p_pkey->key, p_hash )
!= VLC_SUCCESS )
{
vlc_unlink( psz_destfile );
dialog_FatalWait( p_udt, _("File corrupted"),
_("Downloaded file \"%s\" was corrupted. Thus, it was deleted."),
psz_destfile );
msg_Err( p_udt, "BAD SIGNATURE for %s", psz_destfile );
free( p_hash );
goto end;
}
msg_Info( p_udt, "%s authenticated", psz_destfile );
free( p_hash );
#ifdef _WIN32
int answer = dialog_Question( p_udt, _("Update VLC media player"),
_("The new version was successfully downloaded. Do you want to close VLC and install it now?"),
_("Install"), _("Cancel"), NULL);
if(answer == 1)
{
wchar_t psz_wdestfile[MAX_PATH];
MultiByteToWideChar( CP_UTF8, 0, psz_destfile, -1, psz_wdestfile, MAX_PATH );
answer = (int)ShellExecuteW( NULL, L"open", psz_wdestfile, NULL, NULL, SW_SHOW);
if(answer > 32)
libvlc_Quit(p_udt->p_libvlc);
}
#endif
end:
if( p_progress )
dialog_ProgressDestroy( p_progress );
if( p_stream )
stream_Delete( p_stream );
if( p_file )
fclose( p_file );
free( psz_destdir );
free( psz_destfile );
free( p_buffer );
free( psz_size );
vlc_restorecancel( canc );
return NULL;
}
update_release_t *update_GetRelease( update_t *p_update )
{
return &p_update->release;
}
#else
#undef update_New
update_t *update_New( vlc_object_t *p_this )
{
(void)p_this;
return NULL;
}
void update_Delete( update_t *p_update )
{
(void)p_update;
}
void update_Check( update_t *p_update, void (*pf_callback)( void*, bool ),
void *p_data )
{
(void)p_update; (void)pf_callback; (void)p_data;
}
bool update_NeedUpgrade( update_t *p_update )
{
(void)p_update;
return false;
}
void update_Download( update_t *p_update, const char *psz_destdir )
{
(void)p_update; (void)psz_destdir;
}
update_release_t *update_GetRelease( update_t *p_update )
{
(void)p_update;
return NULL;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_2394_0 |
crossvul-cpp_data_good_4711_1 | /*
ettercap -- GTK+ GUI
Copyright (C) ALoR & NaGA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
$Id: ec_gtk_mitm.c,v 1.2 2004/02/27 20:03:40 daten Exp $
*/
#include <ec.h>
#include <ec_gtk.h>
#include <ec_mitm.h>
/* proto */
void gtkui_arp_poisoning(void);
void gtkui_icmp_redir(void);
void gtkui_port_stealing(void);
void gtkui_dhcp_spoofing(void);
void gtkui_mitm_stop(void);
static void gtkui_start_mitm(void);
/* globals */
#define PARAMS_LEN 512
static char params[PARAMS_LEN+1];
/*******************************************/
void gtkui_arp_poisoning(void)
{
GtkWidget *dialog, *vbox, *hbox, *image, *button1, *button2, *frame;
gint response = 0;
gboolean remote = FALSE;
gboolean oneway = FALSE;
DEBUG_MSG("gtk_arp_poisoning");
memset(params, '\0', PARAMS_LEN+1);
dialog = gtk_dialog_new_with_buttons("MITM Attack: ARP Poisoning", GTK_WINDOW (window),
GTK_DIALOG_MODAL, GTK_STOCK_OK, GTK_RESPONSE_OK,
GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL, NULL);
gtk_container_set_border_width(GTK_CONTAINER (dialog), 5);
gtk_dialog_set_has_separator(GTK_DIALOG (dialog), FALSE);
hbox = gtk_hbox_new (FALSE, 5);
gtk_box_pack_start (GTK_BOX (GTK_DIALOG (dialog)->vbox), hbox, FALSE, FALSE, 0);
gtk_widget_show(hbox);
image = gtk_image_new_from_stock (GTK_STOCK_DIALOG_QUESTION, GTK_ICON_SIZE_DIALOG);
gtk_misc_set_alignment (GTK_MISC (image), 0.5, 0.1);
gtk_box_pack_start (GTK_BOX (hbox), image, FALSE, FALSE, 5);
gtk_widget_show(image);
frame = gtk_frame_new("Optional parameters");
gtk_container_set_border_width(GTK_CONTAINER (frame), 5);
gtk_box_pack_start (GTK_BOX (hbox), frame, TRUE, TRUE, 0);
gtk_widget_show(frame);
vbox = gtk_vbox_new (FALSE, 2);
gtk_container_set_border_width(GTK_CONTAINER (vbox), 5);
gtk_container_add(GTK_CONTAINER (frame), vbox);
gtk_widget_show(vbox);
button1 = gtk_check_button_new_with_label("Sniff remote connections.");
gtk_box_pack_start(GTK_BOX (vbox), button1, FALSE, FALSE, 0);
gtk_widget_show(button1);
button2 = gtk_check_button_new_with_label("Only poison one-way.");
gtk_box_pack_start(GTK_BOX (vbox), button2, FALSE, FALSE, 0);
gtk_widget_show(button2);
response = gtk_dialog_run(GTK_DIALOG(dialog));
if(response == GTK_RESPONSE_OK) {
gtk_widget_hide(dialog);
memset(params, '\0', PARAMS_LEN+1);
snprintf(params, 5, "arp:");
if(gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON (button1))) {
strcat(params, "remote");
remote = TRUE;
}
if(gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON (button2))) {
if(remote)
strcat(params, ",");
strcat(params, "oneway");
oneway = TRUE;
}
if(!remote && !oneway) {
ui_error("You must select at least one ARP mode");
return;
}
gtkui_start_mitm();
}
gtk_widget_destroy(dialog);
/* a simpler method:
gtkui_input_call("Parameters :", params + strlen("arp:"), PARAMS_LEN - strlen("arp:"), gtkui_start_mitm);
*/
}
void gtkui_icmp_redir(void)
{
GtkWidget *dialog, *table, *hbox, *image, *label, *entry1, *entry2, *frame;
gint response = 0;
DEBUG_MSG("gtk_icmp_redir");
dialog = gtk_dialog_new_with_buttons("MITM Attack: ICMP Redirect", GTK_WINDOW (window),
GTK_DIALOG_MODAL, GTK_STOCK_OK, GTK_RESPONSE_OK,
GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL, NULL);
gtk_container_set_border_width(GTK_CONTAINER (dialog), 5);
gtk_dialog_set_has_separator(GTK_DIALOG (dialog), FALSE);
hbox = gtk_hbox_new (FALSE, 5);
gtk_box_pack_start (GTK_BOX (GTK_DIALOG (dialog)->vbox), hbox, FALSE, FALSE, 0);
gtk_widget_show(hbox);
image = gtk_image_new_from_stock (GTK_STOCK_DIALOG_QUESTION, GTK_ICON_SIZE_DIALOG);
gtk_misc_set_alignment (GTK_MISC (image), 0.5, 0.1);
gtk_box_pack_start (GTK_BOX (hbox), image, FALSE, FALSE, 5);
gtk_widget_show(image);
frame = gtk_frame_new("Gateway Information");
gtk_container_set_border_width(GTK_CONTAINER (frame), 5);
gtk_box_pack_start (GTK_BOX (hbox), frame, TRUE, TRUE, 0);
gtk_widget_show(frame);
table = gtk_table_new(2, 2, FALSE);
gtk_table_set_row_spacings(GTK_TABLE (table), 5);
gtk_table_set_col_spacings(GTK_TABLE (table), 5);
gtk_container_set_border_width(GTK_CONTAINER (table), 8);
gtk_container_add(GTK_CONTAINER (frame), table);
gtk_widget_show(table);
label = gtk_label_new("MAC Address");
gtk_misc_set_alignment(GTK_MISC (label), 0, 0.5);
gtk_table_attach(GTK_TABLE (table), label, 0, 1, 0, 1, GTK_FILL, GTK_FILL, 0, 0);
gtk_widget_show(label);
entry1 = gtk_entry_new();
gtk_entry_set_max_length(GTK_ENTRY (entry1), ETH_ASCII_ADDR_LEN);
gtk_table_attach_defaults(GTK_TABLE (table), entry1, 1, 2, 0, 1);
gtk_widget_show(entry1);
label = gtk_label_new("IP Address");
gtk_misc_set_alignment(GTK_MISC (label), 0, 0.5);
gtk_table_attach(GTK_TABLE (table), label, 0, 1, 1, 2, GTK_FILL, GTK_FILL, 0, 0);
gtk_widget_show(label);
entry2 = gtk_entry_new();
gtk_entry_set_max_length(GTK_ENTRY (entry2), IP6_ASCII_ADDR_LEN);
gtk_table_attach_defaults(GTK_TABLE (table), entry2, 1, 2, 1, 2);
gtk_widget_show(entry2);
response = gtk_dialog_run(GTK_DIALOG(dialog));
if(response == GTK_RESPONSE_OK) {
gtk_widget_hide(dialog);
snprintf(params, 6, "icmp:");
strncat(params, gtk_entry_get_text(GTK_ENTRY(entry1)), ETH_ASCII_ADDR_LEN);
strncat(params, "/", 1);
strncat(params, gtk_entry_get_text(GTK_ENTRY(entry2)), IP6_ASCII_ADDR_LEN);
gtkui_start_mitm();
}
gtk_widget_destroy(dialog);
/* a simpler method:
gtkui_input_call("Parameters :", params + strlen("icmp:"), PARAMS_LEN - strlen("icmp:"), gtkui_start_mitm);
*/
}
void gtkui_port_stealing(void)
{
GtkWidget *dialog, *vbox, *hbox, *image, *button1, *button2, *frame;
gint response = 0;
gboolean remote = FALSE;
DEBUG_MSG("gtk_port_stealing");
dialog = gtk_dialog_new_with_buttons("MITM Attack: Port Stealing", GTK_WINDOW (window),
GTK_DIALOG_MODAL, GTK_STOCK_OK, GTK_RESPONSE_OK,
GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL, NULL);
gtk_container_set_border_width(GTK_CONTAINER (dialog), 5);
gtk_dialog_set_has_separator(GTK_DIALOG (dialog), FALSE);
hbox = gtk_hbox_new (FALSE, 5);
gtk_box_pack_start (GTK_BOX (GTK_DIALOG (dialog)->vbox), hbox, FALSE, FALSE, 0);
gtk_widget_show(hbox);
image = gtk_image_new_from_stock (GTK_STOCK_DIALOG_QUESTION, GTK_ICON_SIZE_DIALOG);
gtk_misc_set_alignment (GTK_MISC (image), 0.5, 0.1);
gtk_box_pack_start (GTK_BOX (hbox), image, FALSE, FALSE, 5);
gtk_widget_show(image);
frame = gtk_frame_new("Optional parameters");
gtk_container_set_border_width(GTK_CONTAINER (frame), 5);
gtk_box_pack_start (GTK_BOX (hbox), frame, TRUE, TRUE, 0);
gtk_widget_show(frame);
vbox = gtk_vbox_new (FALSE, 2);
gtk_container_set_border_width(GTK_CONTAINER (vbox), 5);
gtk_container_add(GTK_CONTAINER (frame), vbox);
gtk_widget_show(vbox);
button1 = gtk_check_button_new_with_label("Sniff remote connections.");
gtk_box_pack_start(GTK_BOX (vbox), button1, FALSE, FALSE, 0);
gtk_widget_show(button1);
button2 = gtk_check_button_new_with_label("Propagate to other switches.");
gtk_box_pack_start(GTK_BOX (vbox), button2, FALSE, FALSE, 0);
gtk_widget_show(button2);
response = gtk_dialog_run(GTK_DIALOG(dialog));
if(response == GTK_RESPONSE_OK) {
gtk_widget_hide(dialog);
snprintf(params, 6, "port:");
if(gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON (button1))) {
strcat(params, "remote");
remote = TRUE;
}
if(gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON (button2))) {
if(remote)
strcat(params, ",");
strcat(params, "tree");
}
gtkui_start_mitm();
}
gtk_widget_destroy(dialog);
/* a simpler method:
gtkui_input_call("Parameters :", params + strlen("port:"), PARAMS_LEN - strlen("port:"), gtkui_start_mitm);
*/
}
void gtkui_dhcp_spoofing(void)
{
GtkWidget *dialog, *table, *hbox, *image, *label, *entry1, *entry2, *entry3, *frame;
gint response = 0;
DEBUG_MSG("gtk_dhcp_spoofing");
memset(params, '\0', PARAMS_LEN+1);
dialog = gtk_dialog_new_with_buttons("MITM Attack: DHCP Spoofing", GTK_WINDOW (window),
GTK_DIALOG_MODAL, GTK_STOCK_OK, GTK_RESPONSE_OK,
GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL, NULL);
gtk_container_set_border_width(GTK_CONTAINER (dialog), 5);
gtk_dialog_set_has_separator(GTK_DIALOG (dialog), FALSE);
hbox = gtk_hbox_new (FALSE, 5);
gtk_box_pack_start (GTK_BOX (GTK_DIALOG (dialog)->vbox), hbox, FALSE, FALSE, 0);
gtk_widget_show(hbox);
image = gtk_image_new_from_stock (GTK_STOCK_DIALOG_QUESTION, GTK_ICON_SIZE_DIALOG);
gtk_misc_set_alignment (GTK_MISC (image), 0.5, 0.1);
gtk_box_pack_start (GTK_BOX (hbox), image, FALSE, FALSE, 5);
gtk_widget_show(image);
frame = gtk_frame_new("Server Information");
gtk_container_set_border_width(GTK_CONTAINER (frame), 5);
gtk_box_pack_start (GTK_BOX (hbox), frame, TRUE, TRUE, 0);
gtk_widget_show(frame);
table = gtk_table_new(3, 2, FALSE);
gtk_table_set_row_spacings(GTK_TABLE (table), 5);
gtk_table_set_col_spacings(GTK_TABLE (table), 5);
gtk_container_set_border_width(GTK_CONTAINER (table), 8);
gtk_container_add(GTK_CONTAINER (frame), table);
gtk_widget_show(table);
label = gtk_label_new("IP Pool (optional)");
gtk_misc_set_alignment(GTK_MISC (label), 0, 0.5);
gtk_table_attach(GTK_TABLE (table), label, 0, 1, 0, 1, GTK_FILL, GTK_FILL, 0, 0);
gtk_widget_show(label);
entry1 = gtk_entry_new();
gtk_table_attach_defaults(GTK_TABLE (table), entry1, 1, 2, 0, 1);
gtk_widget_show(entry1);
label = gtk_label_new("Netmask");
gtk_misc_set_alignment(GTK_MISC (label), 0, 0.5);
gtk_table_attach(GTK_TABLE (table), label, 0, 1, 1, 2, GTK_FILL, GTK_FILL, 0, 0);
gtk_widget_show(label);
entry2 = gtk_entry_new();
gtk_entry_set_max_length(GTK_ENTRY (entry2), IP6_ASCII_ADDR_LEN);
gtk_table_attach_defaults(GTK_TABLE (table), entry2, 1, 2, 1, 2);
gtk_widget_show(entry2);
label = gtk_label_new("DNS Server IP");
gtk_misc_set_alignment(GTK_MISC (label), 0, 0.5);
gtk_table_attach(GTK_TABLE (table), label, 0, 1, 2, 3, GTK_FILL, GTK_FILL, 0, 0);
gtk_widget_show(label);
entry3 = gtk_entry_new();
gtk_entry_set_max_length(GTK_ENTRY (entry3), IP6_ASCII_ADDR_LEN);
gtk_table_attach_defaults(GTK_TABLE (table), entry3, 1, 2, 2, 3);
gtk_widget_show(entry3);
response = gtk_dialog_run(GTK_DIALOG(dialog));
if(response == GTK_RESPONSE_OK) {
gtk_widget_hide(dialog);
snprintf(params, 6, "dhcp:");
strncat(params, gtk_entry_get_text(GTK_ENTRY(entry1)), PARAMS_LEN);
strncat(params, "/", PARAMS_LEN);
strncat(params, gtk_entry_get_text(GTK_ENTRY(entry2)), PARAMS_LEN);
strncat(params, "/", PARAMS_LEN);
strncat(params, gtk_entry_get_text(GTK_ENTRY(entry3)), PARAMS_LEN);
gtkui_start_mitm();
}
gtk_widget_destroy(dialog);
/* a simpler method:
gtkui_input_call("Parameters :", params + strlen("dhcp:"), PARAMS_LEN - strlen("dhcp:"), gtkui_start_mitm);
*/
}
/*
* start the mitm attack by passing the name and parameters
*/
static void gtkui_start_mitm(void)
{
DEBUG_MSG("gtk_start_mitm");
mitm_set(params);
mitm_start();
}
/*
* stop all the mitm attack(s)
*/
void gtkui_mitm_stop(void)
{
GtkWidget *dialog;
DEBUG_MSG("gtk_mitm_stop");
/* create the dialog */
dialog = gtk_message_dialog_new(GTK_WINDOW (window), GTK_DIALOG_MODAL,
GTK_MESSAGE_INFO, 0, "Stopping the mitm attack...");
gtk_window_set_position(GTK_WINDOW (dialog), GTK_WIN_POS_CENTER);
gtk_window_set_resizable(GTK_WINDOW (dialog), FALSE);
gtk_widget_show(dialog);
/* for GTK to display the dialog now */
while (gtk_events_pending ())
gtk_main_iteration ();
/* stop the mitm process */
mitm_stop();
gtk_widget_destroy(dialog);
gtkui_message("MITM attack(s) stopped");
}
/* EOF */
// vim:ts=3:expandtab
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_4711_1 |
crossvul-cpp_data_good_4403_1 | /* Generated by Cython 0.29.21 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [],
"name": "clickhouse_driver.bufferedreader",
"sources": [
"/home/klebedev/work/clickhouse-driver/clickhouse_driver/bufferedreader.pyx"
]
},
"module_name": "clickhouse_driver.bufferedreader"
}
END: Cython Metadata */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_21"
#define CYTHON_HEX_VERSION 0x001D15F0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
#endif
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
#define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
#define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_MARK_ERR_POS(f_index, lineno) \
{ __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__clickhouse_driver__bufferedreader
#define __PYX_HAVE_API__clickhouse_driver__bufferedreader
/* Early includes */
#include <string.h>
#include <stdio.h>
#include "pythread.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"clickhouse_driver/bufferedreader.pyx",
"stringsource",
"type.pxd",
"bool.pxd",
"complex.pxd",
};
/*--- Type declarations ---*/
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader;
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader;
struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader;
/* "clickhouse_driver/bufferedreader.pyx":10
*
*
* cdef class BufferedReader(object): # <<<<<<<<<<<<<<
* cdef public unsigned long long position, current_buffer_size
* cdef public bytearray buffer
*/
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader {
PyObject_HEAD
unsigned PY_LONG_LONG position;
unsigned PY_LONG_LONG current_buffer_size;
PyObject *buffer;
};
/* "clickhouse_driver/bufferedreader.pyx":180
*
*
* cdef class BufferedSocketReader(BufferedReader): # <<<<<<<<<<<<<<
* cdef object sock
*
*/
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader __pyx_base;
PyObject *sock;
};
/* "clickhouse_driver/bufferedreader.pyx":194
*
*
* cdef class CompressedBufferedReader(BufferedReader): # <<<<<<<<<<<<<<
* cdef object read_block
*
*/
struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader __pyx_base;
PyObject *read_block;
};
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* PyObjectCallNoArg.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
#endif
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* GetItemIntByteArray.proto */
#define __Pyx_GetItemInt_ByteArray(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_ByteArray_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "bytearray index out of range"), -1))
static CYTHON_INLINE int __Pyx_GetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i,
int wraparound, int boundscheck);
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* IncludeStringH.proto */
#include <string.h>
/* decode_c_string_utf16.proto */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* HasAttr.proto */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
/* CallNextTpTraverse.proto */
static int __Pyx_call_next_tp_traverse(PyObject* obj, visitproc v, void *a, traverseproc current_tp_traverse);
/* CallNextTpClear.proto */
static void __Pyx_call_next_tp_clear(PyObject* obj, inquiry current_tp_dealloc);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* PyObjectGetAttrStrNoError.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* TypeImport.proto */
#ifndef __PYX_HAVE_RT_ImportType_proto
#define __PYX_HAVE_RT_ImportType_proto
enum __Pyx_ImportType_CheckSize {
__Pyx_ImportType_CheckSize_Error = 0,
__Pyx_ImportType_CheckSize_Warn = 1,
__Pyx_ImportType_CheckSize_Ignore = 2
};
static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
#endif
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_PY_LONG_LONG(unsigned PY_LONG_LONG value);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_char(unsigned char value);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_As_unsigned_PY_LONG_LONG(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'cpython.version' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'cpython.exc' */
/* Module declarations from 'cpython.module' */
/* Module declarations from 'cpython.mem' */
/* Module declarations from 'cpython.tuple' */
/* Module declarations from 'cpython.list' */
/* Module declarations from 'cpython.sequence' */
/* Module declarations from 'cpython.mapping' */
/* Module declarations from 'cpython.iterator' */
/* Module declarations from 'cpython.number' */
/* Module declarations from 'cpython.int' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.bool' */
static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0;
/* Module declarations from 'cpython.long' */
/* Module declarations from 'cpython.float' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.complex' */
static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0;
/* Module declarations from 'cpython.string' */
/* Module declarations from 'cpython.unicode' */
/* Module declarations from 'cpython.dict' */
/* Module declarations from 'cpython.instance' */
/* Module declarations from 'cpython.function' */
/* Module declarations from 'cpython.method' */
/* Module declarations from 'cpython.weakref' */
/* Module declarations from 'cpython.getargs' */
/* Module declarations from 'cpython.pythread' */
/* Module declarations from 'cpython.pystate' */
/* Module declarations from 'cpython.cobject' */
/* Module declarations from 'cpython.oldbuffer' */
/* Module declarations from 'cpython.set' */
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'cpython.bytes' */
/* Module declarations from 'cpython.pycapsule' */
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.bytearray' */
/* Module declarations from 'clickhouse_driver.bufferedreader' */
static PyTypeObject *__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader = 0;
static PyTypeObject *__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedSocketReader = 0;
static PyTypeObject *__pyx_ptype_17clickhouse_driver_14bufferedreader_CompressedBufferedReader = 0;
static PyObject *__pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedReader__set_state(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *, PyObject *); /*proto*/
static PyObject *__pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedSocketReader__set_state(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *, PyObject *); /*proto*/
static PyObject *__pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_CompressedBufferedReader__set_state(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *, PyObject *); /*proto*/
#define __Pyx_MODULE_NAME "clickhouse_driver.bufferedreader"
extern int __pyx_module_is_main_clickhouse_driver__bufferedreader;
int __pyx_module_is_main_clickhouse_driver__bufferedreader = 0;
/* Implementation of 'clickhouse_driver.bufferedreader' */
static PyObject *__pyx_builtin_super;
static PyObject *__pyx_builtin_NotImplementedError;
static PyObject *__pyx_builtin_object;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_UnicodeDecodeError;
static PyObject *__pyx_builtin_EOFError;
static const char __pyx_k_new[] = "__new__";
static const char __pyx_k_dict[] = "__dict__";
static const char __pyx_k_init[] = "__init__";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_name[] = "__name__";
static const char __pyx_k_sock[] = "sock";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_super[] = "super";
static const char __pyx_k_utf_8[] = "utf-8";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_object[] = "object";
static const char __pyx_k_pickle[] = "pickle";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_bufsize[] = "bufsize";
static const char __pyx_k_n_items[] = "n_items";
static const char __pyx_k_EOFError[] = "EOFError";
static const char __pyx_k_encoding[] = "encoding";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_pyx_type[] = "__pyx_type";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_pyx_state[] = "__pyx_state";
static const char __pyx_k_recv_into[] = "recv_into";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_pyx_result[] = "__pyx_result";
static const char __pyx_k_read_block[] = "read_block";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_PickleError[] = "PickleError";
static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_BufferedReader[] = "BufferedReader";
static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_read_into_buffer[] = "read_into_buffer";
static const char __pyx_k_UnicodeDecodeError[] = "UnicodeDecodeError";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_NotImplementedError[] = "NotImplementedError";
static const char __pyx_k_BufferedSocketReader[] = "BufferedSocketReader";
static const char __pyx_k_CompressedBufferedReader[] = "CompressedBufferedReader";
static const char __pyx_k_pyx_unpickle_BufferedReader[] = "__pyx_unpickle_BufferedReader";
static const char __pyx_k_pyx_unpickle_BufferedSocketRea[] = "__pyx_unpickle_BufferedSocketReader";
static const char __pyx_k_pyx_unpickle_CompressedBuffere[] = "__pyx_unpickle_CompressedBufferedReader";
static const char __pyx_k_Incompatible_checksums_s_vs_0x18[] = "Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))";
static const char __pyx_k_Incompatible_checksums_s_vs_0x2a[] = "Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))";
static const char __pyx_k_Incompatible_checksums_s_vs_0xef[] = "Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))";
static const char __pyx_k_Unexpected_EOF_while_reading_byt[] = "Unexpected EOF while reading bytes";
static const char __pyx_k_clickhouse_driver_bufferedreader[] = "clickhouse_driver.bufferedreader";
static PyObject *__pyx_n_s_BufferedReader;
static PyObject *__pyx_n_s_BufferedSocketReader;
static PyObject *__pyx_n_s_CompressedBufferedReader;
static PyObject *__pyx_n_s_EOFError;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0x18;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0x2a;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xef;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_n_s_NotImplementedError;
static PyObject *__pyx_n_s_PickleError;
static PyObject *__pyx_kp_s_Unexpected_EOF_while_reading_byt;
static PyObject *__pyx_n_s_UnicodeDecodeError;
static PyObject *__pyx_n_s_bufsize;
static PyObject *__pyx_n_s_clickhouse_driver_bufferedreader;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_n_s_dict;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_encoding;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_init;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_n_items;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_new;
static PyObject *__pyx_n_s_object;
static PyObject *__pyx_n_s_pickle;
static PyObject *__pyx_n_s_pyx_PickleError;
static PyObject *__pyx_n_s_pyx_checksum;
static PyObject *__pyx_n_s_pyx_result;
static PyObject *__pyx_n_s_pyx_state;
static PyObject *__pyx_n_s_pyx_type;
static PyObject *__pyx_n_s_pyx_unpickle_BufferedReader;
static PyObject *__pyx_n_s_pyx_unpickle_BufferedSocketRea;
static PyObject *__pyx_n_s_pyx_unpickle_CompressedBuffere;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_read_block;
static PyObject *__pyx_n_s_read_into_buffer;
static PyObject *__pyx_n_s_recv_into;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_sock;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_super;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_kp_s_utf_8;
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader___init__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_bufsize); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_2read_into_buffer(CYTHON_UNUSED struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_4read(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, unsigned PY_LONG_LONG __pyx_v_unread); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6read_one(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8read_strings(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, unsigned PY_LONG_LONG __pyx_v_n_items, PyObject *__pyx_v_encoding); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8position___get__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self); /* proto */
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_2__set__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size___get__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self); /* proto */
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_2__set__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer___get__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self); /* proto */
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_2__set__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_4__del__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_10__reduce_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_12__setstate_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader___init__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self, PyObject *__pyx_v_sock, PyObject *__pyx_v_bufsize); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_2read_into_buffer(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_4__reduce_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_6__setstate_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader___init__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self, PyObject *__pyx_v_read_block, PyObject *__pyx_v_bufsize); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_2read_into_buffer(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_4__reduce_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_6__setstate_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedReader(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_2__pyx_unpickle_BufferedSocketReader(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_4__pyx_unpickle_CompressedBufferedReader(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedReader(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedSocketReader(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_17clickhouse_driver_14bufferedreader_CompressedBufferedReader(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_int_25411819;
static PyObject *__pyx_int_44607813;
static PyObject *__pyx_int_251251440;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_codeobj__3;
static PyObject *__pyx_codeobj__5;
static PyObject *__pyx_codeobj__7;
/* Late includes */
/* "clickhouse_driver/bufferedreader.pyx":14
* cdef public bytearray buffer
*
* def __init__(self, bufsize): # <<<<<<<<<<<<<<
* self.buffer = bytearray(bufsize)
*
*/
/* Python wrapper */
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_bufsize = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_bufsize,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bufsize)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 14, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_bufsize = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 14, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader___init__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self), __pyx_v_bufsize);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader___init__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_bufsize) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__init__", 0);
/* "clickhouse_driver/bufferedreader.pyx":15
*
* def __init__(self, bufsize):
* self.buffer = bytearray(bufsize) # <<<<<<<<<<<<<<
*
* self.position = 0
*/
__pyx_t_1 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyByteArray_Type)), __pyx_v_bufsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->buffer);
__Pyx_DECREF(__pyx_v_self->buffer);
__pyx_v_self->buffer = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":17
* self.buffer = bytearray(bufsize)
*
* self.position = 0 # <<<<<<<<<<<<<<
* self.current_buffer_size = 0
*
*/
__pyx_v_self->position = 0;
/* "clickhouse_driver/bufferedreader.pyx":18
*
* self.position = 0
* self.current_buffer_size = 0 # <<<<<<<<<<<<<<
*
* super(BufferedReader, self).__init__()
*/
__pyx_v_self->current_buffer_size = 0;
/* "clickhouse_driver/bufferedreader.pyx":20
* self.current_buffer_size = 0
*
* super(BufferedReader, self).__init__() # <<<<<<<<<<<<<<
*
* def read_into_buffer(self):
*/
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader));
__Pyx_GIVEREF(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader));
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_self));
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_init); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":14
* cdef public bytearray buffer
*
* def __init__(self, bufsize): # <<<<<<<<<<<<<<
* self.buffer = bytearray(bufsize)
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":22
* super(BufferedReader, self).__init__()
*
* def read_into_buffer(self): # <<<<<<<<<<<<<<
* raise NotImplementedError
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_3read_into_buffer(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_3read_into_buffer(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("read_into_buffer (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_2read_into_buffer(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_2read_into_buffer(CYTHON_UNUSED struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("read_into_buffer", 0);
/* "clickhouse_driver/bufferedreader.pyx":23
*
* def read_into_buffer(self):
* raise NotImplementedError # <<<<<<<<<<<<<<
*
* def read(self, unsigned long long unread):
*/
__Pyx_Raise(__pyx_builtin_NotImplementedError, 0, 0, 0);
__PYX_ERR(0, 23, __pyx_L1_error)
/* "clickhouse_driver/bufferedreader.pyx":22
* super(BufferedReader, self).__init__()
*
* def read_into_buffer(self): # <<<<<<<<<<<<<<
* raise NotImplementedError
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.read_into_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":25
* raise NotImplementedError
*
* def read(self, unsigned long long unread): # <<<<<<<<<<<<<<
* # When the buffer is large enough bytes read are almost
* # always hit the buffer.
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_5read(PyObject *__pyx_v_self, PyObject *__pyx_arg_unread); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_5read(PyObject *__pyx_v_self, PyObject *__pyx_arg_unread) {
unsigned PY_LONG_LONG __pyx_v_unread;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("read (wrapper)", 0);
assert(__pyx_arg_unread); {
__pyx_v_unread = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(__pyx_arg_unread); if (unlikely((__pyx_v_unread == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.read", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_4read(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self), ((unsigned PY_LONG_LONG)__pyx_v_unread));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_4read(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, unsigned PY_LONG_LONG __pyx_v_unread) {
unsigned PY_LONG_LONG __pyx_v_next_position;
unsigned PY_LONG_LONG __pyx_v_t;
char *__pyx_v_buffer_ptr;
unsigned PY_LONG_LONG __pyx_v_read_bytes;
PyObject *__pyx_v_rv = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
unsigned PY_LONG_LONG __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
unsigned PY_LONG_LONG __pyx_t_6;
unsigned PY_LONG_LONG __pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("read", 0);
/* "clickhouse_driver/bufferedreader.pyx":28
* # When the buffer is large enough bytes read are almost
* # always hit the buffer.
* cdef unsigned long long next_position = unread + self.position # <<<<<<<<<<<<<<
* if next_position < self.current_buffer_size:
* t = self.position
*/
__pyx_v_next_position = (__pyx_v_unread + __pyx_v_self->position);
/* "clickhouse_driver/bufferedreader.pyx":29
* # always hit the buffer.
* cdef unsigned long long next_position = unread + self.position
* if next_position < self.current_buffer_size: # <<<<<<<<<<<<<<
* t = self.position
* self.position = next_position
*/
__pyx_t_1 = ((__pyx_v_next_position < __pyx_v_self->current_buffer_size) != 0);
if (__pyx_t_1) {
/* "clickhouse_driver/bufferedreader.pyx":30
* cdef unsigned long long next_position = unread + self.position
* if next_position < self.current_buffer_size:
* t = self.position # <<<<<<<<<<<<<<
* self.position = next_position
* return bytes(self.buffer[t:self.position])
*/
__pyx_t_2 = __pyx_v_self->position;
__pyx_v_t = __pyx_t_2;
/* "clickhouse_driver/bufferedreader.pyx":31
* if next_position < self.current_buffer_size:
* t = self.position
* self.position = next_position # <<<<<<<<<<<<<<
* return bytes(self.buffer[t:self.position])
*
*/
__pyx_v_self->position = __pyx_v_next_position;
/* "clickhouse_driver/bufferedreader.pyx":32
* t = self.position
* self.position = next_position
* return bytes(self.buffer[t:self.position]) # <<<<<<<<<<<<<<
*
* cdef char* buffer_ptr = PyByteArray_AsString(self.buffer)
*/
__Pyx_XDECREF(__pyx_r);
if (unlikely(__pyx_v_self->buffer == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 32, __pyx_L1_error)
}
__pyx_t_3 = PySequence_GetSlice(__pyx_v_self->buffer, __pyx_v_t, __pyx_v_self->position); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyBytes_Type)), __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
/* "clickhouse_driver/bufferedreader.pyx":29
* # always hit the buffer.
* cdef unsigned long long next_position = unread + self.position
* if next_position < self.current_buffer_size: # <<<<<<<<<<<<<<
* t = self.position
* self.position = next_position
*/
}
/* "clickhouse_driver/bufferedreader.pyx":34
* return bytes(self.buffer[t:self.position])
*
* cdef char* buffer_ptr = PyByteArray_AsString(self.buffer) # <<<<<<<<<<<<<<
* cdef unsigned long long read_bytes
* rv = bytes()
*/
__pyx_t_4 = __pyx_v_self->buffer;
__Pyx_INCREF(__pyx_t_4);
__pyx_v_buffer_ptr = PyByteArray_AsString(__pyx_t_4);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "clickhouse_driver/bufferedreader.pyx":36
* cdef char* buffer_ptr = PyByteArray_AsString(self.buffer)
* cdef unsigned long long read_bytes
* rv = bytes() # <<<<<<<<<<<<<<
*
* while unread > 0:
*/
__pyx_t_4 = __Pyx_PyObject_CallNoArg(((PyObject *)(&PyBytes_Type))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_v_rv = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "clickhouse_driver/bufferedreader.pyx":38
* rv = bytes()
*
* while unread > 0: # <<<<<<<<<<<<<<
* if self.position == self.current_buffer_size:
* self.read_into_buffer()
*/
while (1) {
__pyx_t_1 = ((__pyx_v_unread > 0) != 0);
if (!__pyx_t_1) break;
/* "clickhouse_driver/bufferedreader.pyx":39
*
* while unread > 0:
* if self.position == self.current_buffer_size: # <<<<<<<<<<<<<<
* self.read_into_buffer()
* buffer_ptr = PyByteArray_AsString(self.buffer)
*/
__pyx_t_1 = ((__pyx_v_self->position == __pyx_v_self->current_buffer_size) != 0);
if (__pyx_t_1) {
/* "clickhouse_driver/bufferedreader.pyx":40
* while unread > 0:
* if self.position == self.current_buffer_size:
* self.read_into_buffer() # <<<<<<<<<<<<<<
* buffer_ptr = PyByteArray_AsString(self.buffer)
* self.position = 0
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_read_into_buffer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 40, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_4 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 40, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "clickhouse_driver/bufferedreader.pyx":41
* if self.position == self.current_buffer_size:
* self.read_into_buffer()
* buffer_ptr = PyByteArray_AsString(self.buffer) # <<<<<<<<<<<<<<
* self.position = 0
*
*/
__pyx_t_4 = __pyx_v_self->buffer;
__Pyx_INCREF(__pyx_t_4);
__pyx_v_buffer_ptr = PyByteArray_AsString(__pyx_t_4);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "clickhouse_driver/bufferedreader.pyx":42
* self.read_into_buffer()
* buffer_ptr = PyByteArray_AsString(self.buffer)
* self.position = 0 # <<<<<<<<<<<<<<
*
* read_bytes = min(unread, self.current_buffer_size - self.position)
*/
__pyx_v_self->position = 0;
/* "clickhouse_driver/bufferedreader.pyx":39
*
* while unread > 0:
* if self.position == self.current_buffer_size: # <<<<<<<<<<<<<<
* self.read_into_buffer()
* buffer_ptr = PyByteArray_AsString(self.buffer)
*/
}
/* "clickhouse_driver/bufferedreader.pyx":44
* self.position = 0
*
* read_bytes = min(unread, self.current_buffer_size - self.position) # <<<<<<<<<<<<<<
* rv += PyBytes_FromStringAndSize(
* &buffer_ptr[self.position], read_bytes
*/
__pyx_t_2 = (__pyx_v_self->current_buffer_size - __pyx_v_self->position);
__pyx_t_6 = __pyx_v_unread;
if (((__pyx_t_2 < __pyx_t_6) != 0)) {
__pyx_t_7 = __pyx_t_2;
} else {
__pyx_t_7 = __pyx_t_6;
}
__pyx_v_read_bytes = __pyx_t_7;
/* "clickhouse_driver/bufferedreader.pyx":45
*
* read_bytes = min(unread, self.current_buffer_size - self.position)
* rv += PyBytes_FromStringAndSize( # <<<<<<<<<<<<<<
* &buffer_ptr[self.position], read_bytes
* )
*/
__pyx_t_4 = PyBytes_FromStringAndSize((&(__pyx_v_buffer_ptr[__pyx_v_self->position])), __pyx_v_read_bytes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_rv, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF_SET(__pyx_v_rv, ((PyObject*)__pyx_t_3));
__pyx_t_3 = 0;
/* "clickhouse_driver/bufferedreader.pyx":48
* &buffer_ptr[self.position], read_bytes
* )
* self.position += read_bytes # <<<<<<<<<<<<<<
* unread -= read_bytes
*
*/
__pyx_v_self->position = (__pyx_v_self->position + __pyx_v_read_bytes);
/* "clickhouse_driver/bufferedreader.pyx":49
* )
* self.position += read_bytes
* unread -= read_bytes # <<<<<<<<<<<<<<
*
* return rv
*/
__pyx_v_unread = (__pyx_v_unread - __pyx_v_read_bytes);
}
/* "clickhouse_driver/bufferedreader.pyx":51
* unread -= read_bytes
*
* return rv # <<<<<<<<<<<<<<
*
* def read_one(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_rv);
__pyx_r = __pyx_v_rv;
goto __pyx_L0;
/* "clickhouse_driver/bufferedreader.pyx":25
* raise NotImplementedError
*
* def read(self, unsigned long long unread): # <<<<<<<<<<<<<<
* # When the buffer is large enough bytes read are almost
* # always hit the buffer.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.read", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_rv);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":53
* return rv
*
* def read_one(self): # <<<<<<<<<<<<<<
* if self.position == self.current_buffer_size:
* self.read_into_buffer()
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_7read_one(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_7read_one(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("read_one (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6read_one(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6read_one(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) {
unsigned char __pyx_v_rv;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("read_one", 0);
/* "clickhouse_driver/bufferedreader.pyx":54
*
* def read_one(self):
* if self.position == self.current_buffer_size: # <<<<<<<<<<<<<<
* self.read_into_buffer()
* self.position = 0
*/
__pyx_t_1 = ((__pyx_v_self->position == __pyx_v_self->current_buffer_size) != 0);
if (__pyx_t_1) {
/* "clickhouse_driver/bufferedreader.pyx":55
* def read_one(self):
* if self.position == self.current_buffer_size:
* self.read_into_buffer() # <<<<<<<<<<<<<<
* self.position = 0
*
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_read_into_buffer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_2 = (__pyx_t_4) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4) : __Pyx_PyObject_CallNoArg(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "clickhouse_driver/bufferedreader.pyx":56
* if self.position == self.current_buffer_size:
* self.read_into_buffer()
* self.position = 0 # <<<<<<<<<<<<<<
*
* rv = self.buffer[self.position]
*/
__pyx_v_self->position = 0;
/* "clickhouse_driver/bufferedreader.pyx":54
*
* def read_one(self):
* if self.position == self.current_buffer_size: # <<<<<<<<<<<<<<
* self.read_into_buffer()
* self.position = 0
*/
}
/* "clickhouse_driver/bufferedreader.pyx":58
* self.position = 0
*
* rv = self.buffer[self.position] # <<<<<<<<<<<<<<
* self.position += 1
* return rv
*/
__pyx_t_5 = __Pyx_GetItemInt_ByteArray(__pyx_v_self->buffer, __pyx_v_self->position, unsigned PY_LONG_LONG, 0, __Pyx_PyInt_From_unsigned_PY_LONG_LONG, 0, 0, 1); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_rv = __pyx_t_5;
/* "clickhouse_driver/bufferedreader.pyx":59
*
* rv = self.buffer[self.position]
* self.position += 1 # <<<<<<<<<<<<<<
* return rv
*
*/
__pyx_v_self->position = (__pyx_v_self->position + 1);
/* "clickhouse_driver/bufferedreader.pyx":60
* rv = self.buffer[self.position]
* self.position += 1
* return rv # <<<<<<<<<<<<<<
*
* def read_strings(self, unsigned long long n_items, encoding=None):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_unsigned_char(__pyx_v_rv); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "clickhouse_driver/bufferedreader.pyx":53
* return rv
*
* def read_one(self): # <<<<<<<<<<<<<<
* if self.position == self.current_buffer_size:
* self.read_into_buffer()
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.read_one", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":62
* return rv
*
* def read_strings(self, unsigned long long n_items, encoding=None): # <<<<<<<<<<<<<<
* """
* Python has great overhead between function calls.
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_9read_strings(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_17clickhouse_driver_14bufferedreader_14BufferedReader_8read_strings[] = "\n Python has great overhead between function calls.\n We inline strings reading logic here to avoid this overhead.\n ";
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_9read_strings(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
unsigned PY_LONG_LONG __pyx_v_n_items;
PyObject *__pyx_v_encoding = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("read_strings (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_n_items,&__pyx_n_s_encoding,0};
PyObject* values[2] = {0,0};
values[1] = ((PyObject *)Py_None);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_n_items)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_encoding);
if (value) { values[1] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "read_strings") < 0)) __PYX_ERR(0, 62, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_n_items = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v_n_items == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 62, __pyx_L3_error)
__pyx_v_encoding = values[1];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("read_strings", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 62, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.read_strings", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8read_strings(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self), __pyx_v_n_items, __pyx_v_encoding);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8read_strings(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, unsigned PY_LONG_LONG __pyx_v_n_items, PyObject *__pyx_v_encoding) {
PyObject *__pyx_v_items = NULL;
unsigned PY_LONG_LONG __pyx_v_i;
char *__pyx_v_buffer_ptr;
unsigned PY_LONG_LONG __pyx_v_right;
unsigned PY_LONG_LONG __pyx_v_size;
unsigned PY_LONG_LONG __pyx_v_shift;
unsigned PY_LONG_LONG __pyx_v_bytes_read;
unsigned PY_LONG_LONG __pyx_v_b;
char *__pyx_v_c_string;
unsigned PY_LONG_LONG __pyx_v_c_string_size;
char *__pyx_v_c_encoding;
PyObject *__pyx_v_rv = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
char *__pyx_t_5;
unsigned PY_LONG_LONG __pyx_t_6;
unsigned PY_LONG_LONG __pyx_t_7;
unsigned PY_LONG_LONG __pyx_t_8;
unsigned PY_LONG_LONG __pyx_t_9;
unsigned PY_LONG_LONG __pyx_t_10;
unsigned PY_LONG_LONG __pyx_t_11;
PyObject *__pyx_t_12 = NULL;
PyObject *__pyx_t_13 = NULL;
PyObject *__pyx_t_14 = NULL;
int __pyx_t_15;
PyObject *__pyx_t_16 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("read_strings", 0);
__Pyx_INCREF(__pyx_v_encoding);
/* "clickhouse_driver/bufferedreader.pyx":67
* We inline strings reading logic here to avoid this overhead.
* """
* items = PyTuple_New(n_items) # <<<<<<<<<<<<<<
*
* cdef unsigned long long i
*/
__pyx_t_1 = PyTuple_New(__pyx_v_n_items); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_items = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":71
* cdef unsigned long long i
* # Buffer vars
* cdef char* buffer_ptr = PyByteArray_AsString(self.buffer) # <<<<<<<<<<<<<<
* cdef unsigned long long right
* # String length vars
*/
__pyx_t_1 = __pyx_v_self->buffer;
__Pyx_INCREF(__pyx_t_1);
__pyx_v_buffer_ptr = PyByteArray_AsString(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":78
*
* # String for decode vars.
* cdef char *c_string = NULL # <<<<<<<<<<<<<<
* cdef unsigned long long c_string_size = 1024
* cdef char *c_encoding = NULL
*/
__pyx_v_c_string = NULL;
/* "clickhouse_driver/bufferedreader.pyx":79
* # String for decode vars.
* cdef char *c_string = NULL
* cdef unsigned long long c_string_size = 1024 # <<<<<<<<<<<<<<
* cdef char *c_encoding = NULL
* if encoding:
*/
__pyx_v_c_string_size = 0x400;
/* "clickhouse_driver/bufferedreader.pyx":80
* cdef char *c_string = NULL
* cdef unsigned long long c_string_size = 1024
* cdef char *c_encoding = NULL # <<<<<<<<<<<<<<
* if encoding:
* encoding = encoding.encode('utf-8')
*/
__pyx_v_c_encoding = NULL;
/* "clickhouse_driver/bufferedreader.pyx":81
* cdef unsigned long long c_string_size = 1024
* cdef char *c_encoding = NULL
* if encoding: # <<<<<<<<<<<<<<
* encoding = encoding.encode('utf-8')
* c_encoding = encoding
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_encoding); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 81, __pyx_L1_error)
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":82
* cdef char *c_encoding = NULL
* if encoding:
* encoding = encoding.encode('utf-8') # <<<<<<<<<<<<<<
* c_encoding = encoding
*
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_encoding, __pyx_n_s_encode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 82, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_kp_s_utf_8) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_kp_s_utf_8);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 82, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_encoding, __pyx_t_1);
__pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":83
* if encoding:
* encoding = encoding.encode('utf-8')
* c_encoding = encoding # <<<<<<<<<<<<<<
*
* cdef object rv = object()
*/
__pyx_t_5 = __Pyx_PyObject_AsWritableString(__pyx_v_encoding); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) __PYX_ERR(0, 83, __pyx_L1_error)
__pyx_v_c_encoding = __pyx_t_5;
/* "clickhouse_driver/bufferedreader.pyx":81
* cdef unsigned long long c_string_size = 1024
* cdef char *c_encoding = NULL
* if encoding: # <<<<<<<<<<<<<<
* encoding = encoding.encode('utf-8')
* c_encoding = encoding
*/
}
/* "clickhouse_driver/bufferedreader.pyx":85
* c_encoding = encoding
*
* cdef object rv = object() # <<<<<<<<<<<<<<
* # String for decode vars.
* if c_encoding:
*/
__pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_builtin_object); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_rv = __pyx_t_1;
__pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":87
* cdef object rv = object()
* # String for decode vars.
* if c_encoding: # <<<<<<<<<<<<<<
* c_string = <char *> PyMem_Realloc(NULL, c_string_size)
*
*/
__pyx_t_2 = (__pyx_v_c_encoding != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":88
* # String for decode vars.
* if c_encoding:
* c_string = <char *> PyMem_Realloc(NULL, c_string_size) # <<<<<<<<<<<<<<
*
* for i in range(n_items):
*/
__pyx_v_c_string = ((char *)PyMem_Realloc(NULL, __pyx_v_c_string_size));
/* "clickhouse_driver/bufferedreader.pyx":87
* cdef object rv = object()
* # String for decode vars.
* if c_encoding: # <<<<<<<<<<<<<<
* c_string = <char *> PyMem_Realloc(NULL, c_string_size)
*
*/
}
/* "clickhouse_driver/bufferedreader.pyx":90
* c_string = <char *> PyMem_Realloc(NULL, c_string_size)
*
* for i in range(n_items): # <<<<<<<<<<<<<<
* shift = size = 0
*
*/
__pyx_t_6 = __pyx_v_n_items;
__pyx_t_7 = __pyx_t_6;
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) {
__pyx_v_i = __pyx_t_8;
/* "clickhouse_driver/bufferedreader.pyx":91
*
* for i in range(n_items):
* shift = size = 0 # <<<<<<<<<<<<<<
*
* # Read string size
*/
__pyx_v_shift = 0;
__pyx_v_size = 0;
/* "clickhouse_driver/bufferedreader.pyx":94
*
* # Read string size
* while True: # <<<<<<<<<<<<<<
* if self.position == self.current_buffer_size:
* self.read_into_buffer()
*/
while (1) {
/* "clickhouse_driver/bufferedreader.pyx":95
* # Read string size
* while True:
* if self.position == self.current_buffer_size: # <<<<<<<<<<<<<<
* self.read_into_buffer()
* # `read_into_buffer` can override buffer
*/
__pyx_t_2 = ((__pyx_v_self->position == __pyx_v_self->current_buffer_size) != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":96
* while True:
* if self.position == self.current_buffer_size:
* self.read_into_buffer() # <<<<<<<<<<<<<<
* # `read_into_buffer` can override buffer
* buffer_ptr = PyByteArray_AsString(self.buffer)
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_read_into_buffer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 96, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4) : __Pyx_PyObject_CallNoArg(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 96, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":98
* self.read_into_buffer()
* # `read_into_buffer` can override buffer
* buffer_ptr = PyByteArray_AsString(self.buffer) # <<<<<<<<<<<<<<
* self.position = 0
*
*/
__pyx_t_1 = __pyx_v_self->buffer;
__Pyx_INCREF(__pyx_t_1);
__pyx_v_buffer_ptr = PyByteArray_AsString(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":99
* # `read_into_buffer` can override buffer
* buffer_ptr = PyByteArray_AsString(self.buffer)
* self.position = 0 # <<<<<<<<<<<<<<
*
* b = buffer_ptr[self.position]
*/
__pyx_v_self->position = 0;
/* "clickhouse_driver/bufferedreader.pyx":95
* # Read string size
* while True:
* if self.position == self.current_buffer_size: # <<<<<<<<<<<<<<
* self.read_into_buffer()
* # `read_into_buffer` can override buffer
*/
}
/* "clickhouse_driver/bufferedreader.pyx":101
* self.position = 0
*
* b = buffer_ptr[self.position] # <<<<<<<<<<<<<<
* self.position += 1
*
*/
__pyx_v_b = (__pyx_v_buffer_ptr[__pyx_v_self->position]);
/* "clickhouse_driver/bufferedreader.pyx":102
*
* b = buffer_ptr[self.position]
* self.position += 1 # <<<<<<<<<<<<<<
*
* size |= (b & 0x7f) << shift
*/
__pyx_v_self->position = (__pyx_v_self->position + 1);
/* "clickhouse_driver/bufferedreader.pyx":104
* self.position += 1
*
* size |= (b & 0x7f) << shift # <<<<<<<<<<<<<<
* if b < 0x80:
* break
*/
__pyx_v_size = (__pyx_v_size | ((__pyx_v_b & 0x7f) << __pyx_v_shift));
/* "clickhouse_driver/bufferedreader.pyx":105
*
* size |= (b & 0x7f) << shift
* if b < 0x80: # <<<<<<<<<<<<<<
* break
*
*/
__pyx_t_2 = ((__pyx_v_b < 0x80) != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":106
* size |= (b & 0x7f) << shift
* if b < 0x80:
* break # <<<<<<<<<<<<<<
*
* shift += 7
*/
goto __pyx_L8_break;
/* "clickhouse_driver/bufferedreader.pyx":105
*
* size |= (b & 0x7f) << shift
* if b < 0x80: # <<<<<<<<<<<<<<
* break
*
*/
}
/* "clickhouse_driver/bufferedreader.pyx":108
* break
*
* shift += 7 # <<<<<<<<<<<<<<
*
* right = self.position + size
*/
__pyx_v_shift = (__pyx_v_shift + 7);
}
__pyx_L8_break:;
/* "clickhouse_driver/bufferedreader.pyx":110
* shift += 7
*
* right = self.position + size # <<<<<<<<<<<<<<
*
* if c_encoding:
*/
__pyx_v_right = (__pyx_v_self->position + __pyx_v_size);
/* "clickhouse_driver/bufferedreader.pyx":112
* right = self.position + size
*
* if c_encoding: # <<<<<<<<<<<<<<
* if size + 1 > c_string_size:
* c_string_size = size + 1
*/
__pyx_t_2 = (__pyx_v_c_encoding != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":113
*
* if c_encoding:
* if size + 1 > c_string_size: # <<<<<<<<<<<<<<
* c_string_size = size + 1
* c_string = <char *> PyMem_Realloc(c_string, c_string_size)
*/
__pyx_t_2 = (((__pyx_v_size + 1) > __pyx_v_c_string_size) != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":114
* if c_encoding:
* if size + 1 > c_string_size:
* c_string_size = size + 1 # <<<<<<<<<<<<<<
* c_string = <char *> PyMem_Realloc(c_string, c_string_size)
* if c_string is NULL:
*/
__pyx_v_c_string_size = (__pyx_v_size + 1);
/* "clickhouse_driver/bufferedreader.pyx":115
* if size + 1 > c_string_size:
* c_string_size = size + 1
* c_string = <char *> PyMem_Realloc(c_string, c_string_size) # <<<<<<<<<<<<<<
* if c_string is NULL:
* raise MemoryError()
*/
__pyx_v_c_string = ((char *)PyMem_Realloc(__pyx_v_c_string, __pyx_v_c_string_size));
/* "clickhouse_driver/bufferedreader.pyx":116
* c_string_size = size + 1
* c_string = <char *> PyMem_Realloc(c_string, c_string_size)
* if c_string is NULL: # <<<<<<<<<<<<<<
* raise MemoryError()
* c_string[size] = 0
*/
__pyx_t_2 = ((__pyx_v_c_string == NULL) != 0);
if (unlikely(__pyx_t_2)) {
/* "clickhouse_driver/bufferedreader.pyx":117
* c_string = <char *> PyMem_Realloc(c_string, c_string_size)
* if c_string is NULL:
* raise MemoryError() # <<<<<<<<<<<<<<
* c_string[size] = 0
* bytes_read = 0
*/
PyErr_NoMemory(); __PYX_ERR(0, 117, __pyx_L1_error)
/* "clickhouse_driver/bufferedreader.pyx":116
* c_string_size = size + 1
* c_string = <char *> PyMem_Realloc(c_string, c_string_size)
* if c_string is NULL: # <<<<<<<<<<<<<<
* raise MemoryError()
* c_string[size] = 0
*/
}
/* "clickhouse_driver/bufferedreader.pyx":113
*
* if c_encoding:
* if size + 1 > c_string_size: # <<<<<<<<<<<<<<
* c_string_size = size + 1
* c_string = <char *> PyMem_Realloc(c_string, c_string_size)
*/
}
/* "clickhouse_driver/bufferedreader.pyx":118
* if c_string is NULL:
* raise MemoryError()
* c_string[size] = 0 # <<<<<<<<<<<<<<
* bytes_read = 0
*
*/
(__pyx_v_c_string[__pyx_v_size]) = 0;
/* "clickhouse_driver/bufferedreader.pyx":119
* raise MemoryError()
* c_string[size] = 0
* bytes_read = 0 # <<<<<<<<<<<<<<
*
* # Decoding pure c strings in Cython is faster than in pure Python.
*/
__pyx_v_bytes_read = 0;
/* "clickhouse_driver/bufferedreader.pyx":112
* right = self.position + size
*
* if c_encoding: # <<<<<<<<<<<<<<
* if size + 1 > c_string_size:
* c_string_size = size + 1
*/
}
/* "clickhouse_driver/bufferedreader.pyx":124
* # We need to copy it into buffer for adding null symbol at the end.
* # In ClickHouse block there is no null
* if right > self.current_buffer_size: # <<<<<<<<<<<<<<
* if c_encoding:
* memcpy(&c_string[bytes_read], &buffer_ptr[self.position],
*/
__pyx_t_2 = ((__pyx_v_right > __pyx_v_self->current_buffer_size) != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":125
* # In ClickHouse block there is no null
* if right > self.current_buffer_size:
* if c_encoding: # <<<<<<<<<<<<<<
* memcpy(&c_string[bytes_read], &buffer_ptr[self.position],
* self.current_buffer_size - self.position)
*/
__pyx_t_2 = (__pyx_v_c_encoding != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":126
* if right > self.current_buffer_size:
* if c_encoding:
* memcpy(&c_string[bytes_read], &buffer_ptr[self.position], # <<<<<<<<<<<<<<
* self.current_buffer_size - self.position)
* else:
*/
(void)(memcpy((&(__pyx_v_c_string[__pyx_v_bytes_read])), (&(__pyx_v_buffer_ptr[__pyx_v_self->position])), (__pyx_v_self->current_buffer_size - __pyx_v_self->position)));
/* "clickhouse_driver/bufferedreader.pyx":125
* # In ClickHouse block there is no null
* if right > self.current_buffer_size:
* if c_encoding: # <<<<<<<<<<<<<<
* memcpy(&c_string[bytes_read], &buffer_ptr[self.position],
* self.current_buffer_size - self.position)
*/
goto __pyx_L15;
}
/* "clickhouse_driver/bufferedreader.pyx":129
* self.current_buffer_size - self.position)
* else:
* rv = PyBytes_FromStringAndSize( # <<<<<<<<<<<<<<
* &buffer_ptr[self.position],
* self.current_buffer_size - self.position
*/
/*else*/ {
/* "clickhouse_driver/bufferedreader.pyx":131
* rv = PyBytes_FromStringAndSize(
* &buffer_ptr[self.position],
* self.current_buffer_size - self.position # <<<<<<<<<<<<<<
* )
*
*/
__pyx_t_1 = PyBytes_FromStringAndSize((&(__pyx_v_buffer_ptr[__pyx_v_self->position])), (__pyx_v_self->current_buffer_size - __pyx_v_self->position)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 129, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF_SET(__pyx_v_rv, __pyx_t_1);
__pyx_t_1 = 0;
}
__pyx_L15:;
/* "clickhouse_driver/bufferedreader.pyx":134
* )
*
* bytes_read = self.current_buffer_size - self.position # <<<<<<<<<<<<<<
* # Read the rest of the string.
* while bytes_read != size:
*/
__pyx_v_bytes_read = (__pyx_v_self->current_buffer_size - __pyx_v_self->position);
/* "clickhouse_driver/bufferedreader.pyx":136
* bytes_read = self.current_buffer_size - self.position
* # Read the rest of the string.
* while bytes_read != size: # <<<<<<<<<<<<<<
* self.position = size - bytes_read
*
*/
while (1) {
__pyx_t_2 = ((__pyx_v_bytes_read != __pyx_v_size) != 0);
if (!__pyx_t_2) break;
/* "clickhouse_driver/bufferedreader.pyx":137
* # Read the rest of the string.
* while bytes_read != size:
* self.position = size - bytes_read # <<<<<<<<<<<<<<
*
* self.read_into_buffer()
*/
__pyx_v_self->position = (__pyx_v_size - __pyx_v_bytes_read);
/* "clickhouse_driver/bufferedreader.pyx":139
* self.position = size - bytes_read
*
* self.read_into_buffer() # <<<<<<<<<<<<<<
* # `read_into_buffer` can override buffer
* buffer_ptr = PyByteArray_AsString(self.buffer)
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_read_into_buffer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4) : __Pyx_PyObject_CallNoArg(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":141
* self.read_into_buffer()
* # `read_into_buffer` can override buffer
* buffer_ptr = PyByteArray_AsString(self.buffer) # <<<<<<<<<<<<<<
* # There can be not enough data in buffer.
* self.position = min(
*/
__pyx_t_1 = __pyx_v_self->buffer;
__Pyx_INCREF(__pyx_t_1);
__pyx_v_buffer_ptr = PyByteArray_AsString(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":144
* # There can be not enough data in buffer.
* self.position = min(
* self.position, self.current_buffer_size # <<<<<<<<<<<<<<
* )
* if c_encoding:
*/
__pyx_t_9 = __pyx_v_self->current_buffer_size;
__pyx_t_10 = __pyx_v_self->position;
if (((__pyx_t_9 < __pyx_t_10) != 0)) {
__pyx_t_11 = __pyx_t_9;
} else {
__pyx_t_11 = __pyx_t_10;
}
/* "clickhouse_driver/bufferedreader.pyx":143
* buffer_ptr = PyByteArray_AsString(self.buffer)
* # There can be not enough data in buffer.
* self.position = min( # <<<<<<<<<<<<<<
* self.position, self.current_buffer_size
* )
*/
__pyx_v_self->position = __pyx_t_11;
/* "clickhouse_driver/bufferedreader.pyx":146
* self.position, self.current_buffer_size
* )
* if c_encoding: # <<<<<<<<<<<<<<
* memcpy(
* &c_string[bytes_read], buffer_ptr, self.position
*/
__pyx_t_2 = (__pyx_v_c_encoding != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":147
* )
* if c_encoding:
* memcpy( # <<<<<<<<<<<<<<
* &c_string[bytes_read], buffer_ptr, self.position
* )
*/
(void)(memcpy((&(__pyx_v_c_string[__pyx_v_bytes_read])), __pyx_v_buffer_ptr, __pyx_v_self->position));
/* "clickhouse_driver/bufferedreader.pyx":146
* self.position, self.current_buffer_size
* )
* if c_encoding: # <<<<<<<<<<<<<<
* memcpy(
* &c_string[bytes_read], buffer_ptr, self.position
*/
goto __pyx_L18;
}
/* "clickhouse_driver/bufferedreader.pyx":151
* )
* else:
* rv += PyBytes_FromStringAndSize( # <<<<<<<<<<<<<<
* buffer_ptr, self.position
* )
*/
/*else*/ {
/* "clickhouse_driver/bufferedreader.pyx":152
* else:
* rv += PyBytes_FromStringAndSize(
* buffer_ptr, self.position # <<<<<<<<<<<<<<
* )
* bytes_read += self.position
*/
__pyx_t_1 = PyBytes_FromStringAndSize(__pyx_v_buffer_ptr, __pyx_v_self->position); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
/* "clickhouse_driver/bufferedreader.pyx":151
* )
* else:
* rv += PyBytes_FromStringAndSize( # <<<<<<<<<<<<<<
* buffer_ptr, self.position
* )
*/
__pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_rv, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_rv, __pyx_t_3);
__pyx_t_3 = 0;
}
__pyx_L18:;
/* "clickhouse_driver/bufferedreader.pyx":154
* buffer_ptr, self.position
* )
* bytes_read += self.position # <<<<<<<<<<<<<<
*
* else:
*/
__pyx_v_bytes_read = (__pyx_v_bytes_read + __pyx_v_self->position);
}
/* "clickhouse_driver/bufferedreader.pyx":124
* # We need to copy it into buffer for adding null symbol at the end.
* # In ClickHouse block there is no null
* if right > self.current_buffer_size: # <<<<<<<<<<<<<<
* if c_encoding:
* memcpy(&c_string[bytes_read], &buffer_ptr[self.position],
*/
goto __pyx_L14;
}
/* "clickhouse_driver/bufferedreader.pyx":157
*
* else:
* if c_encoding: # <<<<<<<<<<<<<<
* memcpy(c_string, &buffer_ptr[self.position], size)
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_c_encoding != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":158
* else:
* if c_encoding:
* memcpy(c_string, &buffer_ptr[self.position], size) # <<<<<<<<<<<<<<
* else:
* rv = PyBytes_FromStringAndSize(
*/
(void)(memcpy(__pyx_v_c_string, (&(__pyx_v_buffer_ptr[__pyx_v_self->position])), __pyx_v_size));
/* "clickhouse_driver/bufferedreader.pyx":157
*
* else:
* if c_encoding: # <<<<<<<<<<<<<<
* memcpy(c_string, &buffer_ptr[self.position], size)
* else:
*/
goto __pyx_L19;
}
/* "clickhouse_driver/bufferedreader.pyx":160
* memcpy(c_string, &buffer_ptr[self.position], size)
* else:
* rv = PyBytes_FromStringAndSize( # <<<<<<<<<<<<<<
* &buffer_ptr[self.position], size
* )
*/
/*else*/ {
/* "clickhouse_driver/bufferedreader.pyx":161
* else:
* rv = PyBytes_FromStringAndSize(
* &buffer_ptr[self.position], size # <<<<<<<<<<<<<<
* )
* self.position = right
*/
__pyx_t_3 = PyBytes_FromStringAndSize((&(__pyx_v_buffer_ptr[__pyx_v_self->position])), __pyx_v_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF_SET(__pyx_v_rv, __pyx_t_3);
__pyx_t_3 = 0;
}
__pyx_L19:;
/* "clickhouse_driver/bufferedreader.pyx":163
* &buffer_ptr[self.position], size
* )
* self.position = right # <<<<<<<<<<<<<<
*
* if c_encoding:
*/
__pyx_v_self->position = __pyx_v_right;
}
__pyx_L14:;
/* "clickhouse_driver/bufferedreader.pyx":165
* self.position = right
*
* if c_encoding: # <<<<<<<<<<<<<<
* try:
* rv = c_string[:size].decode(c_encoding)
*/
__pyx_t_2 = (__pyx_v_c_encoding != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":166
*
* if c_encoding:
* try: # <<<<<<<<<<<<<<
* rv = c_string[:size].decode(c_encoding)
* except UnicodeDecodeError:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14);
__Pyx_XGOTREF(__pyx_t_12);
__Pyx_XGOTREF(__pyx_t_13);
__Pyx_XGOTREF(__pyx_t_14);
/*try:*/ {
/* "clickhouse_driver/bufferedreader.pyx":167
* if c_encoding:
* try:
* rv = c_string[:size].decode(c_encoding) # <<<<<<<<<<<<<<
* except UnicodeDecodeError:
* rv = PyBytes_FromStringAndSize(c_string, size)
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_c_string, 0, __pyx_v_size, __pyx_v_c_encoding, NULL, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 167, __pyx_L21_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF_SET(__pyx_v_rv, __pyx_t_3);
__pyx_t_3 = 0;
/* "clickhouse_driver/bufferedreader.pyx":166
*
* if c_encoding:
* try: # <<<<<<<<<<<<<<
* rv = c_string[:size].decode(c_encoding)
* except UnicodeDecodeError:
*/
}
__Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
__Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
__Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0;
goto __pyx_L28_try_end;
__pyx_L21_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "clickhouse_driver/bufferedreader.pyx":168
* try:
* rv = c_string[:size].decode(c_encoding)
* except UnicodeDecodeError: # <<<<<<<<<<<<<<
* rv = PyBytes_FromStringAndSize(c_string, size)
*
*/
__pyx_t_15 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_UnicodeDecodeError);
if (__pyx_t_15) {
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.read_strings", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_1, &__pyx_t_4) < 0) __PYX_ERR(0, 168, __pyx_L23_except_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_t_4);
/* "clickhouse_driver/bufferedreader.pyx":169
* rv = c_string[:size].decode(c_encoding)
* except UnicodeDecodeError:
* rv = PyBytes_FromStringAndSize(c_string, size) # <<<<<<<<<<<<<<
*
* Py_INCREF(rv)
*/
__pyx_t_16 = PyBytes_FromStringAndSize(__pyx_v_c_string, __pyx_v_size); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 169, __pyx_L23_except_error)
__Pyx_GOTREF(__pyx_t_16);
__Pyx_DECREF_SET(__pyx_v_rv, __pyx_t_16);
__pyx_t_16 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
goto __pyx_L22_exception_handled;
}
goto __pyx_L23_except_error;
__pyx_L23_except_error:;
/* "clickhouse_driver/bufferedreader.pyx":166
*
* if c_encoding:
* try: # <<<<<<<<<<<<<<
* rv = c_string[:size].decode(c_encoding)
* except UnicodeDecodeError:
*/
__Pyx_XGIVEREF(__pyx_t_12);
__Pyx_XGIVEREF(__pyx_t_13);
__Pyx_XGIVEREF(__pyx_t_14);
__Pyx_ExceptionReset(__pyx_t_12, __pyx_t_13, __pyx_t_14);
goto __pyx_L1_error;
__pyx_L22_exception_handled:;
__Pyx_XGIVEREF(__pyx_t_12);
__Pyx_XGIVEREF(__pyx_t_13);
__Pyx_XGIVEREF(__pyx_t_14);
__Pyx_ExceptionReset(__pyx_t_12, __pyx_t_13, __pyx_t_14);
__pyx_L28_try_end:;
}
/* "clickhouse_driver/bufferedreader.pyx":165
* self.position = right
*
* if c_encoding: # <<<<<<<<<<<<<<
* try:
* rv = c_string[:size].decode(c_encoding)
*/
}
/* "clickhouse_driver/bufferedreader.pyx":171
* rv = PyBytes_FromStringAndSize(c_string, size)
*
* Py_INCREF(rv) # <<<<<<<<<<<<<<
* PyTuple_SET_ITEM(items, i, rv)
*
*/
Py_INCREF(__pyx_v_rv);
/* "clickhouse_driver/bufferedreader.pyx":172
*
* Py_INCREF(rv)
* PyTuple_SET_ITEM(items, i, rv) # <<<<<<<<<<<<<<
*
* if c_string:
*/
PyTuple_SET_ITEM(__pyx_v_items, __pyx_v_i, __pyx_v_rv);
}
/* "clickhouse_driver/bufferedreader.pyx":174
* PyTuple_SET_ITEM(items, i, rv)
*
* if c_string: # <<<<<<<<<<<<<<
* PyMem_Free(c_string)
*
*/
__pyx_t_2 = (__pyx_v_c_string != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":175
*
* if c_string:
* PyMem_Free(c_string) # <<<<<<<<<<<<<<
*
* return items
*/
PyMem_Free(__pyx_v_c_string);
/* "clickhouse_driver/bufferedreader.pyx":174
* PyTuple_SET_ITEM(items, i, rv)
*
* if c_string: # <<<<<<<<<<<<<<
* PyMem_Free(c_string)
*
*/
}
/* "clickhouse_driver/bufferedreader.pyx":177
* PyMem_Free(c_string)
*
* return items # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_items);
__pyx_r = __pyx_v_items;
goto __pyx_L0;
/* "clickhouse_driver/bufferedreader.pyx":62
* return rv
*
* def read_strings(self, unsigned long long n_items, encoding=None): # <<<<<<<<<<<<<<
* """
* Python has great overhead between function calls.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_16);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.read_strings", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_items);
__Pyx_XDECREF(__pyx_v_rv);
__Pyx_XDECREF(__pyx_v_encoding);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":11
*
* cdef class BufferedReader(object):
* cdef public unsigned long long position, current_buffer_size # <<<<<<<<<<<<<<
* cdef public bytearray buffer
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8position___get__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8position___get__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_unsigned_PY_LONG_LONG(__pyx_v_self->position); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.position.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_2__set__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_2__set__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
unsigned PY_LONG_LONG __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(__pyx_v_value); if (unlikely((__pyx_t_1 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 11, __pyx_L1_error)
__pyx_v_self->position = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.position.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size___get__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size___get__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_unsigned_PY_LONG_LONG(__pyx_v_self->current_buffer_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.current_buffer_size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_2__set__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_2__set__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
unsigned PY_LONG_LONG __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(__pyx_v_value); if (unlikely((__pyx_t_1 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 11, __pyx_L1_error)
__pyx_v_self->current_buffer_size = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.current_buffer_size.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":12
* cdef class BufferedReader(object):
* cdef public unsigned long long position, current_buffer_size
* cdef public bytearray buffer # <<<<<<<<<<<<<<
*
* def __init__(self, bufsize):
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer___get__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer___get__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->buffer);
__pyx_r = __pyx_v_self->buffer;
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_2__set__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_2__set__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
if (!(likely(PyByteArray_CheckExact(__pyx_v_value))||((__pyx_v_value) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytearray", Py_TYPE(__pyx_v_value)->tp_name), 0))) __PYX_ERR(0, 12, __pyx_L1_error)
__pyx_t_1 = __pyx_v_value;
__Pyx_INCREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->buffer);
__Pyx_DECREF(__pyx_v_self->buffer);
__pyx_v_self->buffer = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.buffer.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_5__del__(PyObject *__pyx_v_self); /*proto*/
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_5__del__(PyObject *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_4__del__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_4__del__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__", 0);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_self->buffer);
__Pyx_DECREF(__pyx_v_self->buffer);
__pyx_v_self->buffer = ((PyObject*)Py_None);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_11__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_11__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_10__reduce_cython__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_10__reduce_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.buffer, self.current_buffer_size, self.position) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = __Pyx_PyInt_From_unsigned_PY_LONG_LONG(__pyx_v_self->current_buffer_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_From_unsigned_PY_LONG_LONG(__pyx_v_self->position); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_self->buffer);
__Pyx_GIVEREF(__pyx_v_self->buffer);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_self->buffer);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_v_state = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.buffer, self.current_buffer_size, self.position)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_3 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v__dict = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":7
* state = (self.buffer, self.current_buffer_size, self.position)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_4 = (__pyx_v__dict != Py_None);
__pyx_t_5 = (__pyx_t_4 != 0);
if (__pyx_t_5) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v__dict);
__pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_2));
__pyx_t_2 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.buffer is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.buffer, self.current_buffer_size, self.position)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.buffer is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, None), state
*/
/*else*/ {
__pyx_t_5 = (__pyx_v_self->buffer != ((PyObject*)Py_None));
__pyx_v_use_setstate = __pyx_t_5;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.buffer is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, None), state
* else:
*/
__pyx_t_5 = (__pyx_v_use_setstate != 0);
if (__pyx_t_5) {
/* "(tree fragment)":13
* use_setstate = self.buffer is not None
* if use_setstate:
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_pyx_unpickle_BufferedReader); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_44607813);
__Pyx_GIVEREF(__pyx_int_44607813);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_44607813);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 2, Py_None);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.buffer is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, None), state
* else:
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_BufferedReader__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_pyx_unpickle_BufferedReader); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_44607813);
__Pyx_GIVEREF(__pyx_int_44607813);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_44607813);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_state);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedReader__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_13__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_13__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_12__setstate_cython__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_12__setstate_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_BufferedReader__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedReader__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedReader__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":183
* cdef object sock
*
* def __init__(self, sock, bufsize): # <<<<<<<<<<<<<<
* self.sock = sock
* super(BufferedSocketReader, self).__init__(bufsize)
*/
/* Python wrapper */
static int __pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_sock = 0;
PyObject *__pyx_v_bufsize = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_sock,&__pyx_n_s_bufsize,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sock)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bufsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, 1); __PYX_ERR(0, 183, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 183, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_sock = values[0];
__pyx_v_bufsize = values[1];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 183, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedSocketReader.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader___init__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)__pyx_v_self), __pyx_v_sock, __pyx_v_bufsize);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader___init__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self, PyObject *__pyx_v_sock, PyObject *__pyx_v_bufsize) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__init__", 0);
/* "clickhouse_driver/bufferedreader.pyx":184
*
* def __init__(self, sock, bufsize):
* self.sock = sock # <<<<<<<<<<<<<<
* super(BufferedSocketReader, self).__init__(bufsize)
*
*/
__Pyx_INCREF(__pyx_v_sock);
__Pyx_GIVEREF(__pyx_v_sock);
__Pyx_GOTREF(__pyx_v_self->sock);
__Pyx_DECREF(__pyx_v_self->sock);
__pyx_v_self->sock = __pyx_v_sock;
/* "clickhouse_driver/bufferedreader.pyx":185
* def __init__(self, sock, bufsize):
* self.sock = sock
* super(BufferedSocketReader, self).__init__(bufsize) # <<<<<<<<<<<<<<
*
* def read_into_buffer(self):
*/
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedSocketReader));
__Pyx_GIVEREF(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedSocketReader));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedSocketReader));
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_self));
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 185, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_init); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_bufsize) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_bufsize);
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 185, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":183
* cdef object sock
*
* def __init__(self, sock, bufsize): # <<<<<<<<<<<<<<
* self.sock = sock
* super(BufferedSocketReader, self).__init__(bufsize)
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedSocketReader.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":187
* super(BufferedSocketReader, self).__init__(bufsize)
*
* def read_into_buffer(self): # <<<<<<<<<<<<<<
* self.current_buffer_size = self.sock.recv_into(self.buffer)
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_3read_into_buffer(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_3read_into_buffer(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("read_into_buffer (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_2read_into_buffer(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_2read_into_buffer(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
unsigned PY_LONG_LONG __pyx_t_4;
int __pyx_t_5;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("read_into_buffer", 0);
/* "clickhouse_driver/bufferedreader.pyx":188
*
* def read_into_buffer(self):
* self.current_buffer_size = self.sock.recv_into(self.buffer) # <<<<<<<<<<<<<<
*
* if self.current_buffer_size == 0:
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->sock, __pyx_n_s_recv_into); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 188, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_self->__pyx_base.buffer) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_self->__pyx_base.buffer);
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 188, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_4 = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 188, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_self->__pyx_base.current_buffer_size = __pyx_t_4;
/* "clickhouse_driver/bufferedreader.pyx":190
* self.current_buffer_size = self.sock.recv_into(self.buffer)
*
* if self.current_buffer_size == 0: # <<<<<<<<<<<<<<
* raise EOFError('Unexpected EOF while reading bytes')
*
*/
__pyx_t_5 = ((__pyx_v_self->__pyx_base.current_buffer_size == 0) != 0);
if (unlikely(__pyx_t_5)) {
/* "clickhouse_driver/bufferedreader.pyx":191
*
* if self.current_buffer_size == 0:
* raise EOFError('Unexpected EOF while reading bytes') # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_EOFError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 191, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(0, 191, __pyx_L1_error)
/* "clickhouse_driver/bufferedreader.pyx":190
* self.current_buffer_size = self.sock.recv_into(self.buffer)
*
* if self.current_buffer_size == 0: # <<<<<<<<<<<<<<
* raise EOFError('Unexpected EOF while reading bytes')
*
*/
}
/* "clickhouse_driver/bufferedreader.pyx":187
* super(BufferedSocketReader, self).__init__(bufsize)
*
* def read_into_buffer(self): # <<<<<<<<<<<<<<
* self.current_buffer_size = self.sock.recv_into(self.buffer)
*
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedSocketReader.read_into_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_4__reduce_cython__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_4__reduce_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.buffer, self.current_buffer_size, self.position, self.sock) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = __Pyx_PyInt_From_unsigned_PY_LONG_LONG(__pyx_v_self->__pyx_base.current_buffer_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_From_unsigned_PY_LONG_LONG(__pyx_v_self->__pyx_base.position); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_self->__pyx_base.buffer);
__Pyx_GIVEREF(__pyx_v_self->__pyx_base.buffer);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_self->__pyx_base.buffer);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_INCREF(__pyx_v_self->sock);
__Pyx_GIVEREF(__pyx_v_self->sock);
PyTuple_SET_ITEM(__pyx_t_3, 3, __pyx_v_self->sock);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_v_state = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.buffer, self.current_buffer_size, self.position, self.sock)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_3 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v__dict = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":7
* state = (self.buffer, self.current_buffer_size, self.position, self.sock)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_4 = (__pyx_v__dict != Py_None);
__pyx_t_5 = (__pyx_t_4 != 0);
if (__pyx_t_5) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v__dict);
__pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_2));
__pyx_t_2 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.buffer is not None or self.sock is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.buffer, self.current_buffer_size, self.position, self.sock)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.buffer is not None or self.sock is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, None), state
*/
/*else*/ {
__pyx_t_4 = (__pyx_v_self->__pyx_base.buffer != ((PyObject*)Py_None));
__pyx_t_6 = (__pyx_t_4 != 0);
if (!__pyx_t_6) {
} else {
__pyx_t_5 = __pyx_t_6;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_6 = (__pyx_v_self->sock != Py_None);
__pyx_t_4 = (__pyx_t_6 != 0);
__pyx_t_5 = __pyx_t_4;
__pyx_L4_bool_binop_done:;
__pyx_v_use_setstate = __pyx_t_5;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.buffer is not None or self.sock is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, None), state
* else:
*/
__pyx_t_5 = (__pyx_v_use_setstate != 0);
if (__pyx_t_5) {
/* "(tree fragment)":13
* use_setstate = self.buffer is not None or self.sock is not None
* if use_setstate:
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_pyx_unpickle_BufferedSocketRea); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_251251440);
__Pyx_GIVEREF(__pyx_int_251251440);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_251251440);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 2, Py_None);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.buffer is not None or self.sock is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, None), state
* else:
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_BufferedSocketReader__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_pyx_unpickle_BufferedSocketRea); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_251251440);
__Pyx_GIVEREF(__pyx_int_251251440);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_251251440);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_state);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedSocketReader.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedSocketReader__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_6__setstate_cython__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_6__setstate_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_BufferedSocketReader__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedSocketReader__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedSocketReader__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedSocketReader.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":197
* cdef object read_block
*
* def __init__(self, read_block, bufsize): # <<<<<<<<<<<<<<
* self.read_block = read_block
* super(CompressedBufferedReader, self).__init__(bufsize)
*/
/* Python wrapper */
static int __pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_read_block = 0;
PyObject *__pyx_v_bufsize = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_read_block,&__pyx_n_s_bufsize,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_read_block)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bufsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, 1); __PYX_ERR(0, 197, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 197, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_read_block = values[0];
__pyx_v_bufsize = values[1];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 197, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.CompressedBufferedReader.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader___init__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)__pyx_v_self), __pyx_v_read_block, __pyx_v_bufsize);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader___init__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self, PyObject *__pyx_v_read_block, PyObject *__pyx_v_bufsize) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__init__", 0);
/* "clickhouse_driver/bufferedreader.pyx":198
*
* def __init__(self, read_block, bufsize):
* self.read_block = read_block # <<<<<<<<<<<<<<
* super(CompressedBufferedReader, self).__init__(bufsize)
*
*/
__Pyx_INCREF(__pyx_v_read_block);
__Pyx_GIVEREF(__pyx_v_read_block);
__Pyx_GOTREF(__pyx_v_self->read_block);
__Pyx_DECREF(__pyx_v_self->read_block);
__pyx_v_self->read_block = __pyx_v_read_block;
/* "clickhouse_driver/bufferedreader.pyx":199
* def __init__(self, read_block, bufsize):
* self.read_block = read_block
* super(CompressedBufferedReader, self).__init__(bufsize) # <<<<<<<<<<<<<<
*
* def read_into_buffer(self):
*/
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 199, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_CompressedBufferedReader));
__Pyx_GIVEREF(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_CompressedBufferedReader));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_CompressedBufferedReader));
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_self));
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 199, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_init); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 199, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_bufsize) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_bufsize);
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 199, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":197
* cdef object read_block
*
* def __init__(self, read_block, bufsize): # <<<<<<<<<<<<<<
* self.read_block = read_block
* super(CompressedBufferedReader, self).__init__(bufsize)
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.CompressedBufferedReader.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":201
* super(CompressedBufferedReader, self).__init__(bufsize)
*
* def read_into_buffer(self): # <<<<<<<<<<<<<<
* self.buffer = bytearray(self.read_block())
* self.current_buffer_size = len(self.buffer)
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_3read_into_buffer(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_3read_into_buffer(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("read_into_buffer (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_2read_into_buffer(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_2read_into_buffer(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t __pyx_t_4;
int __pyx_t_5;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("read_into_buffer", 0);
/* "clickhouse_driver/bufferedreader.pyx":202
*
* def read_into_buffer(self):
* self.buffer = bytearray(self.read_block()) # <<<<<<<<<<<<<<
* self.current_buffer_size = len(self.buffer)
*
*/
__Pyx_INCREF(__pyx_v_self->read_block);
__pyx_t_2 = __pyx_v_self->read_block; __pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 202, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyByteArray_Type)), __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 202, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_self->__pyx_base.buffer);
__Pyx_DECREF(__pyx_v_self->__pyx_base.buffer);
__pyx_v_self->__pyx_base.buffer = ((PyObject*)__pyx_t_2);
__pyx_t_2 = 0;
/* "clickhouse_driver/bufferedreader.pyx":203
* def read_into_buffer(self):
* self.buffer = bytearray(self.read_block())
* self.current_buffer_size = len(self.buffer) # <<<<<<<<<<<<<<
*
* if self.current_buffer_size == 0:
*/
__pyx_t_2 = __pyx_v_self->__pyx_base.buffer;
__Pyx_INCREF(__pyx_t_2);
if (unlikely(__pyx_t_2 == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(0, 203, __pyx_L1_error)
}
__pyx_t_4 = PyByteArray_GET_SIZE(__pyx_t_2); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 203, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_self->__pyx_base.current_buffer_size = __pyx_t_4;
/* "clickhouse_driver/bufferedreader.pyx":205
* self.current_buffer_size = len(self.buffer)
*
* if self.current_buffer_size == 0: # <<<<<<<<<<<<<<
* raise EOFError('Unexpected EOF while reading bytes')
*/
__pyx_t_5 = ((__pyx_v_self->__pyx_base.current_buffer_size == 0) != 0);
if (unlikely(__pyx_t_5)) {
/* "clickhouse_driver/bufferedreader.pyx":206
*
* if self.current_buffer_size == 0:
* raise EOFError('Unexpected EOF while reading bytes') # <<<<<<<<<<<<<<
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_EOFError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 206, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(0, 206, __pyx_L1_error)
/* "clickhouse_driver/bufferedreader.pyx":205
* self.current_buffer_size = len(self.buffer)
*
* if self.current_buffer_size == 0: # <<<<<<<<<<<<<<
* raise EOFError('Unexpected EOF while reading bytes')
*/
}
/* "clickhouse_driver/bufferedreader.pyx":201
* super(CompressedBufferedReader, self).__init__(bufsize)
*
* def read_into_buffer(self): # <<<<<<<<<<<<<<
* self.buffer = bytearray(self.read_block())
* self.current_buffer_size = len(self.buffer)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.CompressedBufferedReader.read_into_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_4__reduce_cython__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_4__reduce_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.buffer, self.current_buffer_size, self.position, self.read_block) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = __Pyx_PyInt_From_unsigned_PY_LONG_LONG(__pyx_v_self->__pyx_base.current_buffer_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_From_unsigned_PY_LONG_LONG(__pyx_v_self->__pyx_base.position); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_self->__pyx_base.buffer);
__Pyx_GIVEREF(__pyx_v_self->__pyx_base.buffer);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_self->__pyx_base.buffer);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_INCREF(__pyx_v_self->read_block);
__Pyx_GIVEREF(__pyx_v_self->read_block);
PyTuple_SET_ITEM(__pyx_t_3, 3, __pyx_v_self->read_block);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_v_state = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.buffer, self.current_buffer_size, self.position, self.read_block)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_3 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v__dict = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":7
* state = (self.buffer, self.current_buffer_size, self.position, self.read_block)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_4 = (__pyx_v__dict != Py_None);
__pyx_t_5 = (__pyx_t_4 != 0);
if (__pyx_t_5) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v__dict);
__pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_2));
__pyx_t_2 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.buffer is not None or self.read_block is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.buffer, self.current_buffer_size, self.position, self.read_block)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.buffer is not None or self.read_block is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, None), state
*/
/*else*/ {
__pyx_t_4 = (__pyx_v_self->__pyx_base.buffer != ((PyObject*)Py_None));
__pyx_t_6 = (__pyx_t_4 != 0);
if (!__pyx_t_6) {
} else {
__pyx_t_5 = __pyx_t_6;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_6 = (__pyx_v_self->read_block != Py_None);
__pyx_t_4 = (__pyx_t_6 != 0);
__pyx_t_5 = __pyx_t_4;
__pyx_L4_bool_binop_done:;
__pyx_v_use_setstate = __pyx_t_5;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.buffer is not None or self.read_block is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, None), state
* else:
*/
__pyx_t_5 = (__pyx_v_use_setstate != 0);
if (__pyx_t_5) {
/* "(tree fragment)":13
* use_setstate = self.buffer is not None or self.read_block is not None
* if use_setstate:
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_pyx_unpickle_CompressedBuffere); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_25411819);
__Pyx_GIVEREF(__pyx_int_25411819);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_25411819);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 2, Py_None);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.buffer is not None or self.read_block is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, None), state
* else:
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_CompressedBufferedReader__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_pyx_unpickle_CompressedBuffere); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_25411819);
__Pyx_GIVEREF(__pyx_int_25411819);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_25411819);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_state);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.CompressedBufferedReader.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_CompressedBufferedReader__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_6__setstate_cython__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_6__setstate_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_CompressedBufferedReader__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_CompressedBufferedReader__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_CompressedBufferedReader__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.CompressedBufferedReader.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __pyx_unpickle_BufferedReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_1__pyx_unpickle_BufferedReader(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_17clickhouse_driver_14bufferedreader_1__pyx_unpickle_BufferedReader = {"__pyx_unpickle_BufferedReader", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_17clickhouse_driver_14bufferedreader_1__pyx_unpickle_BufferedReader, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_1__pyx_unpickle_BufferedReader(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_BufferedReader (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_BufferedReader", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_BufferedReader", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_BufferedReader") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_BufferedReader", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_BufferedReader", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedReader(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedReader(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_BufferedReader", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0x2a8a945: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0x2a8a945) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0x2a8a945:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))" % __pyx_checksum)
* __pyx_result = BufferedReader.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, -1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0x2a8a945:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = BufferedReader.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0x2a, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0x2a8a945: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))" % __pyx_checksum)
* __pyx_result = BufferedReader.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))" % __pyx_checksum)
* __pyx_result = BufferedReader.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = BufferedReader.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedReader__set_state(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))" % __pyx_checksum)
* __pyx_result = BufferedReader.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_BufferedReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_BufferedReader", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]
* if len(__pyx_state) > 3 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedReader__set_state(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
unsigned PY_LONG_LONG __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_BufferedReader__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 3 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[3])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(PyByteArray_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytearray", Py_TYPE(__pyx_t_1)->tp_name), 0))) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->buffer);
__Pyx_DECREF(__pyx_v___pyx_result->buffer);
__pyx_v___pyx_result->buffer = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_2 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->current_buffer_size = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_2 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->position = __pyx_t_2;
/* "(tree fragment)":13
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]
* if len(__pyx_state) > 3 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[3])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_4 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = ((__pyx_t_4 > 3) != 0);
if (__pyx_t_5) {
} else {
__pyx_t_3 = __pyx_t_5;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_5 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_6 = (__pyx_t_5 != 0);
__pyx_t_3 = __pyx_t_6;
__pyx_L4_bool_binop_done:;
if (__pyx_t_3) {
/* "(tree fragment)":14
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]
* if len(__pyx_state) > 3 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[3]) # <<<<<<<<<<<<<<
*/
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_update); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_8);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_8, function);
}
}
__pyx_t_1 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_8, __pyx_t_9, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_7);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]
* if len(__pyx_state) > 3 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[3])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]
* if len(__pyx_state) > 3 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_BufferedReader__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __pyx_unpickle_BufferedSocketReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_3__pyx_unpickle_BufferedSocketReader(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_17clickhouse_driver_14bufferedreader_3__pyx_unpickle_BufferedSocketReader = {"__pyx_unpickle_BufferedSocketReader", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_17clickhouse_driver_14bufferedreader_3__pyx_unpickle_BufferedSocketReader, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_3__pyx_unpickle_BufferedSocketReader(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_BufferedSocketReader (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_BufferedSocketReader", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_BufferedSocketReader", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_BufferedSocketReader") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_BufferedSocketReader", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_BufferedSocketReader", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_2__pyx_unpickle_BufferedSocketReader(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_2__pyx_unpickle_BufferedSocketReader(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_BufferedSocketReader", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xef9caf0: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0xef9caf0) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0xef9caf0:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))" % __pyx_checksum)
* __pyx_result = BufferedSocketReader.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, -1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0xef9caf0:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = BufferedSocketReader.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xef, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xef9caf0: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))" % __pyx_checksum)
* __pyx_result = BufferedSocketReader.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedSocketReader), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))" % __pyx_checksum)
* __pyx_result = BufferedSocketReader.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = BufferedSocketReader.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedSocketReader__set_state(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))" % __pyx_checksum)
* __pyx_result = BufferedSocketReader.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_BufferedSocketReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_BufferedSocketReader", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedSocketReader__set_state(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
unsigned PY_LONG_LONG __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_BufferedSocketReader__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[4])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(PyByteArray_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytearray", Py_TYPE(__pyx_t_1)->tp_name), 0))) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->__pyx_base.buffer);
__Pyx_DECREF(__pyx_v___pyx_result->__pyx_base.buffer);
__pyx_v___pyx_result->__pyx_base.buffer = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_2 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->__pyx_base.current_buffer_size = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_2 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->__pyx_base.position = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->sock);
__Pyx_DECREF(__pyx_v___pyx_result->sock);
__pyx_v___pyx_result->sock = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[4])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_4 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = ((__pyx_t_4 > 4) != 0);
if (__pyx_t_5) {
} else {
__pyx_t_3 = __pyx_t_5;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_5 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_6 = (__pyx_t_5 != 0);
__pyx_t_3 = __pyx_t_6;
__pyx_L4_bool_binop_done:;
if (__pyx_t_3) {
/* "(tree fragment)":14
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[4]) # <<<<<<<<<<<<<<
*/
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_update); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 4, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_8);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_8, function);
}
}
__pyx_t_1 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_8, __pyx_t_9, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_7);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[4])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_BufferedSocketReader__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __pyx_unpickle_CompressedBufferedReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_5__pyx_unpickle_CompressedBufferedReader(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_17clickhouse_driver_14bufferedreader_5__pyx_unpickle_CompressedBufferedReader = {"__pyx_unpickle_CompressedBufferedReader", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_17clickhouse_driver_14bufferedreader_5__pyx_unpickle_CompressedBufferedReader, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_5__pyx_unpickle_CompressedBufferedReader(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_CompressedBufferedReader (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_CompressedBufferedReader", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_CompressedBufferedReader", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_CompressedBufferedReader") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_CompressedBufferedReader", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_CompressedBufferedReader", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_4__pyx_unpickle_CompressedBufferedReader(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_4__pyx_unpickle_CompressedBufferedReader(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_CompressedBufferedReader", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0x183c0eb: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0x183c0eb) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0x183c0eb:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))" % __pyx_checksum)
* __pyx_result = CompressedBufferedReader.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, -1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0x183c0eb:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = CompressedBufferedReader.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0x18, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0x183c0eb: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))" % __pyx_checksum)
* __pyx_result = CompressedBufferedReader.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_CompressedBufferedReader__set_state(<CompressedBufferedReader> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_CompressedBufferedReader), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))" % __pyx_checksum)
* __pyx_result = CompressedBufferedReader.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_CompressedBufferedReader__set_state(<CompressedBufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = CompressedBufferedReader.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_CompressedBufferedReader__set_state(<CompressedBufferedReader> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_CompressedBufferedReader__set_state(CompressedBufferedReader __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_CompressedBufferedReader__set_state(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))" % __pyx_checksum)
* __pyx_result = CompressedBufferedReader.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_CompressedBufferedReader__set_state(<CompressedBufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_CompressedBufferedReader__set_state(<CompressedBufferedReader> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_CompressedBufferedReader__set_state(CompressedBufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.read_block = __pyx_state[3]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_CompressedBufferedReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_CompressedBufferedReader", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_CompressedBufferedReader__set_state(<CompressedBufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_CompressedBufferedReader__set_state(CompressedBufferedReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.read_block = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_CompressedBufferedReader__set_state(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
unsigned PY_LONG_LONG __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_CompressedBufferedReader__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_CompressedBufferedReader__set_state(CompressedBufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.read_block = __pyx_state[3] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[4])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(PyByteArray_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytearray", Py_TYPE(__pyx_t_1)->tp_name), 0))) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->__pyx_base.buffer);
__Pyx_DECREF(__pyx_v___pyx_result->__pyx_base.buffer);
__pyx_v___pyx_result->__pyx_base.buffer = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_2 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->__pyx_base.current_buffer_size = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_2 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->__pyx_base.position = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->read_block);
__Pyx_DECREF(__pyx_v___pyx_result->read_block);
__pyx_v___pyx_result->read_block = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_CompressedBufferedReader__set_state(CompressedBufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.read_block = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[4])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_4 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = ((__pyx_t_4 > 4) != 0);
if (__pyx_t_5) {
} else {
__pyx_t_3 = __pyx_t_5;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_5 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_6 = (__pyx_t_5 != 0);
__pyx_t_3 = __pyx_t_6;
__pyx_L4_bool_binop_done:;
if (__pyx_t_3) {
/* "(tree fragment)":14
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.read_block = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[4]) # <<<<<<<<<<<<<<
*/
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_update); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 4, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_8);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_8, function);
}
}
__pyx_t_1 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_8, __pyx_t_9, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_7);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_CompressedBufferedReader__set_state(CompressedBufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.read_block = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[4])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_CompressedBufferedReader__set_state(<CompressedBufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_CompressedBufferedReader__set_state(CompressedBufferedReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.read_block = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_CompressedBufferedReader__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedReader(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)o);
p->buffer = ((PyObject*)Py_None); Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_BufferedReader(PyObject *o) {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *p = (struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
Py_CLEAR(p->buffer);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_getprop_17clickhouse_driver_14bufferedreader_14BufferedReader_position(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_1__get__(o);
}
static int __pyx_setprop_17clickhouse_driver_14bufferedreader_14BufferedReader_position(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_17clickhouse_driver_14bufferedreader_14BufferedReader_current_buffer_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_1__get__(o);
}
static int __pyx_setprop_17clickhouse_driver_14bufferedreader_14BufferedReader_current_buffer_size(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_17clickhouse_driver_14bufferedreader_14BufferedReader_buffer(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_1__get__(o);
}
static int __pyx_setprop_17clickhouse_driver_14bufferedreader_14BufferedReader_buffer(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_3__set__(o, v);
}
else {
return __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_5__del__(o);
}
}
static PyMethodDef __pyx_methods_17clickhouse_driver_14bufferedreader_BufferedReader[] = {
{"read_into_buffer", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_3read_into_buffer, METH_NOARGS, 0},
{"read", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_5read, METH_O, 0},
{"read_one", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_7read_one, METH_NOARGS, 0},
{"read_strings", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_9read_strings, METH_VARARGS|METH_KEYWORDS, __pyx_doc_17clickhouse_driver_14bufferedreader_14BufferedReader_8read_strings},
{"__reduce_cython__", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_11__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_13__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_17clickhouse_driver_14bufferedreader_BufferedReader[] = {
{(char *)"position", __pyx_getprop_17clickhouse_driver_14bufferedreader_14BufferedReader_position, __pyx_setprop_17clickhouse_driver_14bufferedreader_14BufferedReader_position, (char *)0, 0},
{(char *)"current_buffer_size", __pyx_getprop_17clickhouse_driver_14bufferedreader_14BufferedReader_current_buffer_size, __pyx_setprop_17clickhouse_driver_14bufferedreader_14BufferedReader_current_buffer_size, (char *)0, 0},
{(char *)"buffer", __pyx_getprop_17clickhouse_driver_14bufferedreader_14BufferedReader_buffer, __pyx_setprop_17clickhouse_driver_14bufferedreader_14BufferedReader_buffer, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader = {
PyVarObject_HEAD_INIT(0, 0)
"clickhouse_driver.bufferedreader.BufferedReader", /*tp_name*/
sizeof(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_BufferedReader, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_17clickhouse_driver_14bufferedreader_BufferedReader, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_17clickhouse_driver_14bufferedreader_BufferedReader, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_1__init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedReader, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyObject *__pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedSocketReader(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *p;
PyObject *o = __pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedReader(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)o);
p->sock = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_BufferedSocketReader(PyObject *o) {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *p = (struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->sock);
#if CYTHON_USE_TYPE_SLOTS
if (PyType_IS_GC(Py_TYPE(o)->tp_base))
#endif
PyObject_GC_Track(o);
__pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_BufferedReader(o);
}
static int __pyx_tp_traverse_17clickhouse_driver_14bufferedreader_BufferedSocketReader(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *p = (struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)o;
e = ((likely(__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader)) ? ((__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_traverse) ? __pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_traverse(o, v, a) : 0) : __Pyx_call_next_tp_traverse(o, v, a, __pyx_tp_traverse_17clickhouse_driver_14bufferedreader_BufferedSocketReader)); if (e) return e;
if (p->sock) {
e = (*v)(p->sock, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_17clickhouse_driver_14bufferedreader_BufferedSocketReader(PyObject *o) {
PyObject* tmp;
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *p = (struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)o;
if (likely(__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader)) { if (__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_clear) __pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_clear(o); } else __Pyx_call_next_tp_clear(o, __pyx_tp_clear_17clickhouse_driver_14bufferedreader_BufferedSocketReader);
tmp = ((PyObject*)p->sock);
p->sock = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_17clickhouse_driver_14bufferedreader_BufferedSocketReader[] = {
{"read_into_buffer", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_3read_into_buffer, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_5__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_7__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader = {
PyVarObject_HEAD_INIT(0, 0)
"clickhouse_driver.bufferedreader.BufferedSocketReader", /*tp_name*/
sizeof(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_BufferedSocketReader, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_17clickhouse_driver_14bufferedreader_BufferedSocketReader, /*tp_traverse*/
__pyx_tp_clear_17clickhouse_driver_14bufferedreader_BufferedSocketReader, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_17clickhouse_driver_14bufferedreader_BufferedSocketReader, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_1__init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedSocketReader, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyObject *__pyx_tp_new_17clickhouse_driver_14bufferedreader_CompressedBufferedReader(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *p;
PyObject *o = __pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedReader(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)o);
p->read_block = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_CompressedBufferedReader(PyObject *o) {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *p = (struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->read_block);
#if CYTHON_USE_TYPE_SLOTS
if (PyType_IS_GC(Py_TYPE(o)->tp_base))
#endif
PyObject_GC_Track(o);
__pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_BufferedReader(o);
}
static int __pyx_tp_traverse_17clickhouse_driver_14bufferedreader_CompressedBufferedReader(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *p = (struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)o;
e = ((likely(__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader)) ? ((__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_traverse) ? __pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_traverse(o, v, a) : 0) : __Pyx_call_next_tp_traverse(o, v, a, __pyx_tp_traverse_17clickhouse_driver_14bufferedreader_CompressedBufferedReader)); if (e) return e;
if (p->read_block) {
e = (*v)(p->read_block, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_17clickhouse_driver_14bufferedreader_CompressedBufferedReader(PyObject *o) {
PyObject* tmp;
struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *p = (struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)o;
if (likely(__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader)) { if (__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_clear) __pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_clear(o); } else __Pyx_call_next_tp_clear(o, __pyx_tp_clear_17clickhouse_driver_14bufferedreader_CompressedBufferedReader);
tmp = ((PyObject*)p->read_block);
p->read_block = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_17clickhouse_driver_14bufferedreader_CompressedBufferedReader[] = {
{"read_into_buffer", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_3read_into_buffer, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_5__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_7__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader = {
PyVarObject_HEAD_INIT(0, 0)
"clickhouse_driver.bufferedreader.CompressedBufferedReader", /*tp_name*/
sizeof(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_CompressedBufferedReader, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_17clickhouse_driver_14bufferedreader_CompressedBufferedReader, /*tp_traverse*/
__pyx_tp_clear_17clickhouse_driver_14bufferedreader_CompressedBufferedReader, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_17clickhouse_driver_14bufferedreader_CompressedBufferedReader, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_1__init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_17clickhouse_driver_14bufferedreader_CompressedBufferedReader, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_bufferedreader(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_bufferedreader},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"bufferedreader",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_BufferedReader, __pyx_k_BufferedReader, sizeof(__pyx_k_BufferedReader), 0, 0, 1, 1},
{&__pyx_n_s_BufferedSocketReader, __pyx_k_BufferedSocketReader, sizeof(__pyx_k_BufferedSocketReader), 0, 0, 1, 1},
{&__pyx_n_s_CompressedBufferedReader, __pyx_k_CompressedBufferedReader, sizeof(__pyx_k_CompressedBufferedReader), 0, 0, 1, 1},
{&__pyx_n_s_EOFError, __pyx_k_EOFError, sizeof(__pyx_k_EOFError), 0, 0, 1, 1},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0x18, __pyx_k_Incompatible_checksums_s_vs_0x18, sizeof(__pyx_k_Incompatible_checksums_s_vs_0x18), 0, 0, 1, 0},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0x2a, __pyx_k_Incompatible_checksums_s_vs_0x2a, sizeof(__pyx_k_Incompatible_checksums_s_vs_0x2a), 0, 0, 1, 0},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0xef, __pyx_k_Incompatible_checksums_s_vs_0xef, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xef), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_n_s_NotImplementedError, __pyx_k_NotImplementedError, sizeof(__pyx_k_NotImplementedError), 0, 0, 1, 1},
{&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
{&__pyx_kp_s_Unexpected_EOF_while_reading_byt, __pyx_k_Unexpected_EOF_while_reading_byt, sizeof(__pyx_k_Unexpected_EOF_while_reading_byt), 0, 0, 1, 0},
{&__pyx_n_s_UnicodeDecodeError, __pyx_k_UnicodeDecodeError, sizeof(__pyx_k_UnicodeDecodeError), 0, 0, 1, 1},
{&__pyx_n_s_bufsize, __pyx_k_bufsize, sizeof(__pyx_k_bufsize), 0, 0, 1, 1},
{&__pyx_n_s_clickhouse_driver_bufferedreader, __pyx_k_clickhouse_driver_bufferedreader, sizeof(__pyx_k_clickhouse_driver_bufferedreader), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_encoding, __pyx_k_encoding, sizeof(__pyx_k_encoding), 0, 0, 1, 1},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_init, __pyx_k_init, sizeof(__pyx_k_init), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_n_items, __pyx_k_n_items, sizeof(__pyx_k_n_items), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
{&__pyx_n_s_object, __pyx_k_object, sizeof(__pyx_k_object), 0, 0, 1, 1},
{&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
{&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
{&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
{&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_BufferedReader, __pyx_k_pyx_unpickle_BufferedReader, sizeof(__pyx_k_pyx_unpickle_BufferedReader), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_BufferedSocketRea, __pyx_k_pyx_unpickle_BufferedSocketRea, sizeof(__pyx_k_pyx_unpickle_BufferedSocketRea), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_CompressedBuffere, __pyx_k_pyx_unpickle_CompressedBuffere, sizeof(__pyx_k_pyx_unpickle_CompressedBuffere), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_read_block, __pyx_k_read_block, sizeof(__pyx_k_read_block), 0, 0, 1, 1},
{&__pyx_n_s_read_into_buffer, __pyx_k_read_into_buffer, sizeof(__pyx_k_read_into_buffer), 0, 0, 1, 1},
{&__pyx_n_s_recv_into, __pyx_k_recv_into, sizeof(__pyx_k_recv_into), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_sock, __pyx_k_sock, sizeof(__pyx_k_sock), 0, 0, 1, 1},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_super, __pyx_k_super, sizeof(__pyx_k_super), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{&__pyx_kp_s_utf_8, __pyx_k_utf_8, sizeof(__pyx_k_utf_8), 0, 0, 1, 0},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_super = __Pyx_GetBuiltinName(__pyx_n_s_super); if (!__pyx_builtin_super) __PYX_ERR(0, 20, __pyx_L1_error)
__pyx_builtin_NotImplementedError = __Pyx_GetBuiltinName(__pyx_n_s_NotImplementedError); if (!__pyx_builtin_NotImplementedError) __PYX_ERR(0, 23, __pyx_L1_error)
__pyx_builtin_object = __Pyx_GetBuiltinName(__pyx_n_s_object); if (!__pyx_builtin_object) __PYX_ERR(0, 85, __pyx_L1_error)
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 90, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(0, 117, __pyx_L1_error)
__pyx_builtin_UnicodeDecodeError = __Pyx_GetBuiltinName(__pyx_n_s_UnicodeDecodeError); if (!__pyx_builtin_UnicodeDecodeError) __PYX_ERR(0, 168, __pyx_L1_error)
__pyx_builtin_EOFError = __Pyx_GetBuiltinName(__pyx_n_s_EOFError); if (!__pyx_builtin_EOFError) __PYX_ERR(0, 191, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "clickhouse_driver/bufferedreader.pyx":191
*
* if self.current_buffer_size == 0:
* raise EOFError('Unexpected EOF while reading bytes') # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Unexpected_EOF_while_reading_byt); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 191, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "(tree fragment)":1
* def __pyx_unpickle_BufferedReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_tuple__2 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
__pyx_codeobj__3 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__2, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_BufferedReader, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__3)) __PYX_ERR(1, 1, __pyx_L1_error)
__pyx_tuple__4 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
__pyx_codeobj__5 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__4, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_BufferedSocketRea, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__5)) __PYX_ERR(1, 1, __pyx_L1_error)
__pyx_tuple__6 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
__pyx_codeobj__7 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__6, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_CompressedBuffere, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__7)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_25411819 = PyInt_FromLong(25411819L); if (unlikely(!__pyx_int_25411819)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_44607813 = PyInt_FromLong(44607813L); if (unlikely(!__pyx_int_44607813)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_251251440 = PyInt_FromLong(251251440L); if (unlikely(!__pyx_int_251251440)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
if (PyType_Ready(&__pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader) < 0) __PYX_ERR(0, 10, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader.tp_dictoffset && __pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_BufferedReader, (PyObject *)&__pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader) < 0) __PYX_ERR(0, 10, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader) < 0) __PYX_ERR(0, 10, __pyx_L1_error)
__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader = &__pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader;
__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader.tp_base = __pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader;
if (PyType_Ready(&__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader) < 0) __PYX_ERR(0, 180, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader.tp_dictoffset && __pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_BufferedSocketReader, (PyObject *)&__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader) < 0) __PYX_ERR(0, 180, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader) < 0) __PYX_ERR(0, 180, __pyx_L1_error)
__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedSocketReader = &__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader;
__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader.tp_base = __pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader;
if (PyType_Ready(&__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader) < 0) __PYX_ERR(0, 194, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader.tp_dictoffset && __pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_CompressedBufferedReader, (PyObject *)&__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader) < 0) __PYX_ERR(0, 194, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader) < 0) __PYX_ERR(0, 194, __pyx_L1_error)
__pyx_ptype_17clickhouse_driver_14bufferedreader_CompressedBufferedReader = &__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type",
#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
__Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7cpython_4bool_bool) __PYX_ERR(3, 8, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(4, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7cpython_7complex_complex) __PYX_ERR(4, 15, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#ifndef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#elif PY_MAJOR_VERSION < 3
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
#define __Pyx_PyMODINIT_FUNC void
#endif
#else
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initbufferedreader(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initbufferedreader(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_bufferedreader(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_bufferedreader(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_bufferedreader(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'bufferedreader' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_bufferedreader(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("bufferedreader", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_clickhouse_driver__bufferedreader) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "clickhouse_driver.bufferedreader")) {
if (unlikely(PyDict_SetItemString(modules, "clickhouse_driver.bufferedreader", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "(tree fragment)":1
* def __pyx_unpickle_BufferedReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_17clickhouse_driver_14bufferedreader_1__pyx_unpickle_BufferedReader, NULL, __pyx_n_s_clickhouse_driver_bufferedreader); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_BufferedReader, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":11
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]
* if len(__pyx_state) > 3 and hasattr(__pyx_result, '__dict__'):
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_17clickhouse_driver_14bufferedreader_3__pyx_unpickle_BufferedSocketReader, NULL, __pyx_n_s_clickhouse_driver_bufferedreader); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_BufferedSocketRea, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":1
* def __pyx_unpickle_CompressedBufferedReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_17clickhouse_driver_14bufferedreader_5__pyx_unpickle_CompressedBufferedReader, NULL, __pyx_n_s_clickhouse_driver_bufferedreader); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_CompressedBuffere, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":1
* from cpython cimport Py_INCREF, PyBytes_FromStringAndSize # <<<<<<<<<<<<<<
* from cpython.bytearray cimport PyByteArray_AsString
* # Using python's versions of pure c memory management functions for
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init clickhouse_driver.bufferedreader", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init clickhouse_driver.bufferedreader");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* PyObjectCallNoArg */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, NULL, 0);
}
#endif
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))
#else
if (likely(PyCFunction_Check(func)))
#endif
{
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
}
#endif
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* GetItemIntByteArray */
static CYTHON_INLINE int __Pyx_GetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i,
int wraparound, int boundscheck) {
Py_ssize_t length;
if (wraparound | boundscheck) {
length = PyByteArray_GET_SIZE(string);
if (wraparound & unlikely(i < 0)) i += length;
if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) {
return (unsigned char) (PyByteArray_AS_STRING(string)[i]);
} else {
PyErr_SetString(PyExc_IndexError, "bytearray index out of range");
return -1;
}
} else {
return (unsigned char) (PyByteArray_AS_STRING(string)[i]);
}
}
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
if (unlikely(stop <= start))
return __Pyx_NewRef(__pyx_empty_unicode);
length = stop - start;
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* GetAttr3 */
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* HasAttr */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
/* CallNextTpTraverse */
static int __Pyx_call_next_tp_traverse(PyObject* obj, visitproc v, void *a, traverseproc current_tp_traverse) {
PyTypeObject* type = Py_TYPE(obj);
while (type && type->tp_traverse != current_tp_traverse)
type = type->tp_base;
while (type && type->tp_traverse == current_tp_traverse)
type = type->tp_base;
if (type && type->tp_traverse)
return type->tp_traverse(obj, v, a);
return 0;
}
/* CallNextTpClear */
static void __Pyx_call_next_tp_clear(PyObject* obj, inquiry current_tp_clear) {
PyTypeObject* type = Py_TYPE(obj);
while (type && type->tp_clear != current_tp_clear)
type = type->tp_base;
while (type && type->tp_clear == current_tp_clear)
type = type->tp_base;
if (type && type->tp_clear)
type->tp_clear(obj);
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* PyObjectGetAttrStrNoError */
static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
__Pyx_PyErr_Clear();
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
}
#endif
result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
if (unlikely(!result)) {
__Pyx_PyObject_GetAttrStr_ClearAttributeError();
}
return result;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
if (likely(reduce_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (reduce == object_reduce || PyErr_Occurred()) {
goto __PYX_BAD;
}
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
if (likely(setstate_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (!setstate || PyErr_Occurred()) {
goto __PYX_BAD;
}
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto __PYX_GOOD;
__PYX_BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
__PYX_GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
size_t size, enum __Pyx_ImportType_CheckSize check_size)
{
PyObject *result = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
result = PyObject_GetAttrString(module, class_name);
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if ((size_t)basicsize < size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(result);
return NULL;
}
#endif
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_PY_LONG_LONG(unsigned PY_LONG_LONG value) {
const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG) ((unsigned PY_LONG_LONG) 0 - (unsigned PY_LONG_LONG) 1), const_zero = (unsigned PY_LONG_LONG) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(unsigned PY_LONG_LONG) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(unsigned PY_LONG_LONG) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(unsigned PY_LONG_LONG),
little, !is_unsigned);
}
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_char(unsigned char value) {
const unsigned char neg_one = (unsigned char) ((unsigned char) 0 - (unsigned char) 1), const_zero = (unsigned char) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(unsigned char) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(unsigned char) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(unsigned char) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(unsigned char) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(unsigned char) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(unsigned char),
little, !is_unsigned);
}
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_As_unsigned_PY_LONG_LONG(PyObject *x) {
const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG) ((unsigned PY_LONG_LONG) 0 - (unsigned PY_LONG_LONG) 1), const_zero = (unsigned PY_LONG_LONG) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(unsigned PY_LONG_LONG) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (unsigned PY_LONG_LONG) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (unsigned PY_LONG_LONG) 0;
case 1: __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, digit, digits[0])
case 2:
if (8 * sizeof(unsigned PY_LONG_LONG) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned PY_LONG_LONG) >= 2 * PyLong_SHIFT) {
return (unsigned PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(unsigned PY_LONG_LONG) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned PY_LONG_LONG) >= 3 * PyLong_SHIFT) {
return (unsigned PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(unsigned PY_LONG_LONG) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned PY_LONG_LONG) >= 4 * PyLong_SHIFT) {
return (unsigned PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (unsigned PY_LONG_LONG) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (unsigned PY_LONG_LONG) 0;
case -1: __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, digit, +digits[0])
case -2:
if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
return (unsigned PY_LONG_LONG) (((unsigned PY_LONG_LONG)-1)*(((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(unsigned PY_LONG_LONG) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
return (unsigned PY_LONG_LONG) ((((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
return (unsigned PY_LONG_LONG) (((unsigned PY_LONG_LONG)-1)*(((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(unsigned PY_LONG_LONG) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
return (unsigned PY_LONG_LONG) ((((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
return (unsigned PY_LONG_LONG) (((unsigned PY_LONG_LONG)-1)*(((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(unsigned PY_LONG_LONG) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
return (unsigned PY_LONG_LONG) ((((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])));
}
}
break;
}
#endif
if (sizeof(unsigned PY_LONG_LONG) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
unsigned PY_LONG_LONG val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (unsigned PY_LONG_LONG) -1;
}
} else {
unsigned PY_LONG_LONG val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (unsigned PY_LONG_LONG) -1;
val = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to unsigned PY_LONG_LONG");
return (unsigned PY_LONG_LONG) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned PY_LONG_LONG");
return (unsigned PY_LONG_LONG) -1;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_4403_1 |
crossvul-cpp_data_good_4587_3 | /*
* The Python Imaging Library.
* $Id$
*
* decoder for Sgi RLE data.
*
* history:
* 2017-07-28 mb fixed for images larger than 64KB
* 2017-07-20 mb created
*
* Copyright (c) Mickael Bonfill 2017.
*
* See the README file for information on usage and redistribution.
*/
#include "Imaging.h"
#include "Sgi.h"
#define SGI_HEADER_SIZE 512
#define RLE_COPY_FLAG 0x80
#define RLE_MAX_RUN 0x7f
static void read4B(UINT32* dest, UINT8* buf)
{
*dest = (UINT32)((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]);
}
static int expandrow(UINT8* dest, UINT8* src, int n, int z, int xsize)
{
UINT8 pixel, count;
for (;n > 0; n--)
{
pixel = *src++;
if (n == 1 && pixel != 0)
return n;
count = pixel & RLE_MAX_RUN;
if (!count)
return count;
if (count > xsize) {
return -1;
}
if (pixel & RLE_COPY_FLAG) {
while(count--) {
*dest = *src++;
dest += z;
}
}
else {
pixel = *src++;
while (count--) {
*dest = pixel;
dest += z;
}
}
}
return 0;
}
static int expandrow2(UINT8* dest, const UINT8* src, int n, int z, int xsize)
{
UINT8 pixel, count;
for (;n > 0; n--)
{
pixel = src[1];
src+=2;
if (n == 1 && pixel != 0)
return n;
count = pixel & RLE_MAX_RUN;
if (!count)
return count;
if (count > xsize) {
return -1;
}
if (pixel & RLE_COPY_FLAG) {
while(count--) {
memcpy(dest, src, 2);
src += 2;
dest += z * 2;
}
}
else {
while (count--) {
memcpy(dest, src, 2);
dest += z * 2;
}
src+=2;
}
}
return 0;
}
int
ImagingSgiRleDecode(Imaging im, ImagingCodecState state,
UINT8* buf, Py_ssize_t bytes)
{
UINT8 *ptr;
SGISTATE *c;
int err = 0;
int status;
/* Get all data from File descriptor */
c = (SGISTATE*)state->context;
_imaging_seek_pyFd(state->fd, 0L, SEEK_END);
c->bufsize = _imaging_tell_pyFd(state->fd);
c->bufsize -= SGI_HEADER_SIZE;
ptr = malloc(sizeof(UINT8) * c->bufsize);
if (!ptr) {
return IMAGING_CODEC_MEMORY;
}
_imaging_seek_pyFd(state->fd, SGI_HEADER_SIZE, SEEK_SET);
_imaging_read_pyFd(state->fd, (char*)ptr, c->bufsize);
/* decoder initialization */
state->count = 0;
state->y = 0;
if (state->ystep < 0) {
state->y = im->ysize - 1;
} else {
state->ystep = 1;
}
if (im->xsize > INT_MAX / im->bands ||
im->ysize > INT_MAX / im->bands) {
err = IMAGING_CODEC_MEMORY;
goto sgi_finish_decode;
}
/* Allocate memory for RLE tables and rows */
free(state->buffer);
state->buffer = NULL;
/* malloc overflow check above */
state->buffer = calloc(im->xsize * im->bands, sizeof(UINT8) * 2);
c->tablen = im->bands * im->ysize;
c->starttab = calloc(c->tablen, sizeof(UINT32));
c->lengthtab = calloc(c->tablen, sizeof(UINT32));
if (!state->buffer ||
!c->starttab ||
!c->lengthtab) {
err = IMAGING_CODEC_MEMORY;
goto sgi_finish_decode;
}
/* populate offsets table */
for (c->tabindex = 0, c->bufindex = 0; c->tabindex < c->tablen; c->tabindex++, c->bufindex+=4)
read4B(&c->starttab[c->tabindex], &ptr[c->bufindex]);
/* populate lengths table */
for (c->tabindex = 0, c->bufindex = c->tablen * sizeof(UINT32); c->tabindex < c->tablen; c->tabindex++, c->bufindex+=4)
read4B(&c->lengthtab[c->tabindex], &ptr[c->bufindex]);
state->count += c->tablen * sizeof(UINT32) * 2;
/* read compressed rows */
for (c->rowno = 0; c->rowno < im->ysize; c->rowno++, state->y += state->ystep)
{
for (c->channo = 0; c->channo < im->bands; c->channo++)
{
c->rleoffset = c->starttab[c->rowno + c->channo * im->ysize];
c->rlelength = c->lengthtab[c->rowno + c->channo * im->ysize];
c->rleoffset -= SGI_HEADER_SIZE;
if (c->rleoffset + c->rlelength > c->bufsize) {
state->errcode = IMAGING_CODEC_OVERRUN;
return -1;
}
/* row decompression */
if (c->bpc ==1) {
status = expandrow(&state->buffer[c->channo], &ptr[c->rleoffset], c->rlelength, im->bands, im->xsize);
}
else {
status = expandrow2(&state->buffer[c->channo * 2], &ptr[c->rleoffset], c->rlelength, im->bands, im->xsize);
}
if (status == -1) {
state->errcode = IMAGING_CODEC_OVERRUN;
return -1;
} else if (status == 1) {
goto sgi_finish_decode;
}
state->count += c->rlelength;
}
/* store decompressed data in image */
state->shuffle((UINT8*)im->image[state->y], state->buffer, im->xsize);
}
c->bufsize++;
sgi_finish_decode: ;
free(c->starttab);
free(c->lengthtab);
free(ptr);
if (err != 0){
return err;
}
return state->count - c->bufsize;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_4587_3 |
crossvul-cpp_data_bad_2396_0 | /*****************************************************************************
* schroedinger.c: Dirac decoder module making use of libschroedinger.
* (http://www.bbc.co.uk/rd/projects/dirac/index.shtml)
* (http://diracvideo.org)
*****************************************************************************
* Copyright (C) 2008-2011 VLC authors and VideoLAN
*
* Authors: Jonathan Rosser <jonathan.rosser@gmail.com>
* David Flynn <davidf at rd dot bbc.co.uk>
* Anuradha Suraparaju <asuraparaju at gmail dot com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
/*****************************************************************************
* Preamble
*****************************************************************************/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <assert.h>
#include <vlc_common.h>
#include <vlc_plugin.h>
#include <vlc_codec.h>
#include <schroedinger/schro.h>
/*****************************************************************************
* Module descriptor
*****************************************************************************/
static int OpenDecoder ( vlc_object_t * );
static void CloseDecoder ( vlc_object_t * );
static int OpenEncoder ( vlc_object_t * );
static void CloseEncoder ( vlc_object_t * );
#define ENC_CFG_PREFIX "sout-schro-"
#define ENC_CHROMAFMT "chroma-fmt"
#define ENC_CHROMAFMT_TEXT N_("Chroma format")
#define ENC_CHROMAFMT_LONGTEXT N_("Picking chroma format will force a " \
"conversion of the video into that format")
static const char *const enc_chromafmt_list[] =
{ "420", "422", "444" };
static const char *const enc_chromafmt_list_text[] =
{ N_("4:2:0"), N_("4:2:2"), N_("4:4:4") };
#define ENC_RATE_CONTROL "rate-control"
#define ENC_RATE_CONTROL_TEXT N_("Rate control method")
#define ENC_RATE_CONTROL_LONGTEXT N_("Method used to encode the video sequence")
static const char *enc_rate_control_list[] = {
"constant_noise_threshold",
"constant_bitrate",
"low_delay",
"lossless",
"constant_lambda",
"constant_error",
"constant_quality"
};
static const char *enc_rate_control_list_text[] = {
N_("Constant noise threshold mode"),
N_("Constant bitrate mode (CBR)"),
N_("Low Delay mode"),
N_("Lossless mode"),
N_("Constant lambda mode"),
N_("Constant error mode"),
N_("Constant quality mode")
};
#define ENC_GOP_STRUCTURE "gop-structure"
#define ENC_GOP_STRUCTURE_TEXT N_("GOP structure")
#define ENC_GOP_STRUCTURE_LONGTEXT N_("GOP structure used to encode the video sequence")
static const char *enc_gop_structure_list[] = {
"adaptive",
"intra_only",
"backref",
"chained_backref",
"biref",
"chained_biref"
};
static const char *enc_gop_structure_list_text[] = {
N_("No fixed gop structure. A picture can be intra or inter and refer to previous or future pictures."),
N_("I-frame only sequence"),
N_("Inter pictures refere to previous pictures only"),
N_("Inter pictures refere to previous pictures only"),
N_("Inter pictures can refer to previous or future pictures"),
N_("Inter pictures can refer to previous or future pictures")
};
#define ENC_QUALITY "quality"
#define ENC_QUALITY_TEXT N_("Constant quality factor")
#define ENC_QUALITY_LONGTEXT N_("Quality factor to use in constant quality mode")
#define ENC_NOISE_THRESHOLD "noise-threshold"
#define ENC_NOISE_THRESHOLD_TEXT N_("Noise Threshold")
#define ENC_NOISE_THRESHOLD_LONGTEXT N_("Noise threshold to use in constant noise threshold mode")
#define ENC_BITRATE "bitrate"
#define ENC_BITRATE_TEXT N_("CBR bitrate (kbps)")
#define ENC_BITRATE_LONGTEXT N_("Target bitrate in kbps when encoding in constant bitrate mode")
#define ENC_MAX_BITRATE "max-bitrate"
#define ENC_MAX_BITRATE_TEXT N_("Maximum bitrate (kbps)")
#define ENC_MAX_BITRATE_LONGTEXT N_("Maximum bitrate in kbps when encoding in constant bitrate mode")
#define ENC_MIN_BITRATE "min-bitrate"
#define ENC_MIN_BITRATE_TEXT N_("Minimum bitrate (kbps)")
#define ENC_MIN_BITRATE_LONGTEXT N_("Minimum bitrate in kbps when encoding in constant bitrate mode")
#define ENC_AU_DISTANCE "gop-length"
#define ENC_AU_DISTANCE_TEXT N_("GOP length")
#define ENC_AU_DISTANCE_LONGTEXT N_("Number of pictures between successive sequence headers i.e. length of the group of pictures")
#define ENC_PREFILTER "filtering"
#define ENC_PREFILTER_TEXT N_("Prefilter")
#define ENC_PREFILTER_LONGTEXT N_("Enable adaptive prefiltering")
static const char *enc_filtering_list[] = {
"none",
"center_weighted_median",
"gaussian",
"add_noise",
"adaptive_gaussian",
"lowpass"
};
static const char *enc_filtering_list_text[] = {
N_("No pre-filtering"),
N_("Centre Weighted Median"),
N_("Gaussian Low Pass Filter"),
N_("Add Noise"),
N_("Gaussian Adaptive Low Pass Filter"),
N_("Low Pass Filter"),
};
#define ENC_PREFILTER_STRENGTH "filter-value"
#define ENC_PREFILTER_STRENGTH_TEXT N_("Amount of prefiltering")
#define ENC_PREFILTER_STRENGTH_LONGTEXT N_("Higher value implies more prefiltering")
#define ENC_CODINGMODE "coding-mode"
#define ENC_CODINGMODE_TEXT N_("Picture coding mode")
#define ENC_CODINGMODE_LONGTEXT N_("Field coding is where interlaced fields are coded" \
" separately as opposed to a pseudo-progressive frame")
static const char *const enc_codingmode_list[] =
{ "auto", "progressive", "field" };
static const char *const enc_codingmode_list_text[] =
{ N_("auto - let encoder decide based upon input (Best)"),
N_("force coding frame as single picture"),
N_("force coding frame as separate interlaced fields"),
};
/* advanced option only */
#define ENC_MCBLK_SIZE "motion-block-size"
#define ENC_MCBLK_SIZE_TEXT N_("Size of motion compensation blocks")
static const char *enc_block_size_list[] = {
"automatic",
"small",
"medium",
"large"
};
static const char *const enc_block_size_list_text[] =
{ N_("automatic - let encoder decide based upon input (Best)"),
N_("small - use small motion compensation blocks"),
N_("medium - use medium motion compensation blocks"),
N_("large - use large motion compensation blocks"),
};
/* advanced option only */
#define ENC_MCBLK_OVERLAP "motion-block-overlap"
#define ENC_MCBLK_OVERLAP_TEXT N_("Overlap of motion compensation blocks")
static const char *enc_block_overlap_list[] = {
"automatic",
"none",
"partial",
"full"
};
static const char *const enc_block_overlap_list_text[] =
{ N_("automatic - let encoder decide based upon input (Best)"),
N_("none - Motion compensation blocks do not overlap"),
N_("partial - Motion compensation blocks only partially overlap"),
N_("full - Motion compensation blocks fully overlap"),
};
#define ENC_MVPREC "mv-precision"
#define ENC_MVPREC_TEXT N_("Motion Vector precision")
#define ENC_MVPREC_LONGTEXT N_("Motion Vector precision in pels")
static const char *const enc_mvprec_list[] =
{ "1", "1/2", "1/4", "1/8" };
/* advanced option only */
#define ENC_ME_COMBINED "me-combined"
#define ENC_ME_COMBINED_TEXT N_("Three component motion estimation")
#define ENC_ME_COMBINED_LONGTEXT N_("Use chroma as part of the motion estimation process")
#define ENC_DWTINTRA "intra-wavelet"
#define ENC_DWTINTRA_TEXT N_("Intra picture DWT filter")
#define ENC_DWTINTER "inter-wavelet"
#define ENC_DWTINTER_TEXT N_("Inter picture DWT filter")
static const char *enc_wavelet_list[] = {
"desl_dubuc_9_7",
"le_gall_5_3",
"desl_dubuc_13_7",
"haar_0",
"haar_1",
"fidelity",
"daub_9_7"
};
static const char *enc_wavelet_list_text[] = {
"Deslauriers-Dubuc (9,7)",
"LeGall (5,3)",
"Deslauriers-Dubuc (13,7)",
"Haar with no shift",
"Haar with single shift per level",
"Fidelity filter",
"Daubechies (9,7) integer approximation"
};
#define ENC_DWTDEPTH "transform-depth"
#define ENC_DWTDEPTH_TEXT N_("Number of DWT iterations")
#define ENC_DWTDEPTH_LONGTEXT N_("Also known as DWT levels")
/* advanced option only */
#define ENC_MULTIQUANT "enable-multiquant"
#define ENC_MULTIQUANT_TEXT N_("Enable multiple quantizers")
#define ENC_MULTIQUANT_LONGTEXT N_("Enable multiple quantizers per subband (one per codeblock)")
/* advanced option only */
#define ENC_NOAC "enable-noarith"
#define ENC_NOAC_TEXT N_("Disable arithmetic coding")
#define ENC_NOAC_LONGTEXT N_("Use variable length codes instead, useful for very high bitrates")
/* visual modelling */
/* advanced option only */
#define ENC_PWT "perceptual-weighting"
#define ENC_PWT_TEXT N_("perceptual weighting method")
static const char *enc_perceptual_weighting_list[] = {
"none",
"ccir959",
"moo",
"manos_sakrison"
};
/* advanced option only */
#define ENC_PDIST "perceptual-distance"
#define ENC_PDIST_TEXT N_("perceptual distance")
#define ENC_PDIST_LONGTEXT N_("perceptual distance to calculate perceptual weight")
/* advanced option only */
#define ENC_HSLICES "horiz-slices"
#define ENC_HSLICES_TEXT N_("Horizontal slices per frame")
#define ENC_HSLICES_LONGTEXT N_("Number of horizontal slices per frame in low delay mode")
/* advanced option only */
#define ENC_VSLICES "vert-slices"
#define ENC_VSLICES_TEXT N_("Vertical slices per frame")
#define ENC_VSLICES_LONGTEXT N_("Number of vertical slices per frame in low delay mode")
/* advanced option only */
#define ENC_SCBLK_SIZE "codeblock-size"
#define ENC_SCBLK_SIZE_TEXT N_("Size of code blocks in each subband")
static const char *enc_codeblock_size_list[] = {
"automatic",
"small",
"medium",
"large",
"full"
};
static const char *const enc_codeblock_size_list_text[] =
{ N_("automatic - let encoder decide based upon input (Best)"),
N_("small - use small code blocks"),
N_("medium - use medium sized code blocks"),
N_("large - use large code blocks"),
N_("full - One code block per subband"),
};
/* advanced option only */
#define ENC_ME_HIERARCHICAL "enable-hierarchical-me"
#define ENC_ME_HIERARCHICAL_TEXT N_("Enable hierarchical Motion Estimation")
/* advanced option only */
#define ENC_ME_DOWNSAMPLE_LEVELS "downsample-levels"
#define ENC_ME_DOWNSAMPLE_LEVELS_TEXT N_("Number of levels of downsampling")
#define ENC_ME_DOWNSAMPLE_LEVELS_LONGTEXT N_("Number of levels of downsampling in hierarchical motion estimation mode")
/* advanced option only */
#define ENC_ME_GLOBAL_MOTION "enable-global-me"
#define ENC_ME_GLOBAL_MOTION_TEXT N_("Enable Global Motion Estimation")
/* advanced option only */
#define ENC_ME_PHASECORR "enable-phasecorr-me"
#define ENC_ME_PHASECORR_TEXT N_("Enable Phase Correlation Estimation")
/* advanced option only */
#define ENC_SCD "enable-scd"
#define ENC_SCD_TEXT N_("Enable Scene Change Detection")
/* advanced option only */
#define ENC_FORCE_PROFILE "force-profile"
#define ENC_FORCE_PROFILE_TEXT N_("Force Profile")
static const char *enc_profile_list[] = {
"auto",
"vc2_low_delay",
"vc2_simple",
"vc2_main",
"main"
};
static const char *const enc_profile_list_text[] =
{ N_("automatic - let encoder decide based upon input (Best)"),
N_("VC2 Low Delay Profile"),
N_("VC2 Simple Profile"),
N_("VC2 Main Profile"),
N_("Main Profile"),
};
static const char *const ppsz_enc_options[] = {
ENC_RATE_CONTROL, ENC_GOP_STRUCTURE, ENC_QUALITY, ENC_NOISE_THRESHOLD, ENC_BITRATE,
ENC_MIN_BITRATE, ENC_MAX_BITRATE, ENC_AU_DISTANCE, ENC_CHROMAFMT,
ENC_PREFILTER, ENC_PREFILTER_STRENGTH, ENC_CODINGMODE, ENC_MCBLK_SIZE,
ENC_MCBLK_OVERLAP, ENC_MVPREC, ENC_ME_COMBINED, ENC_DWTINTRA, ENC_DWTINTER,
ENC_DWTDEPTH, ENC_MULTIQUANT, ENC_NOAC, ENC_PWT, ENC_PDIST, ENC_HSLICES,
ENC_VSLICES, ENC_SCBLK_SIZE, ENC_ME_HIERARCHICAL, ENC_ME_DOWNSAMPLE_LEVELS,
ENC_ME_GLOBAL_MOTION, ENC_ME_PHASECORR, ENC_SCD, ENC_FORCE_PROFILE,
NULL
};
/* Module declaration */
vlc_module_begin ()
set_category( CAT_INPUT )
set_subcategory( SUBCAT_INPUT_VCODEC )
set_shortname( "Schroedinger" )
set_description( N_("Dirac video decoder using libschroedinger") )
set_capability( "decoder", 200 )
set_callbacks( OpenDecoder, CloseDecoder )
add_shortcut( "schroedinger" )
/* encoder */
add_submodule()
set_section( N_("Encoding") , NULL )
set_description( N_("Dirac video encoder using libschroedinger") )
set_capability( "encoder", 110 )
set_callbacks( OpenEncoder, CloseEncoder )
add_shortcut( "schroedinger", "schro" )
add_string( ENC_CFG_PREFIX ENC_RATE_CONTROL, NULL,
ENC_RATE_CONTROL_TEXT, ENC_RATE_CONTROL_LONGTEXT, false )
change_string_list( enc_rate_control_list, enc_rate_control_list_text )
add_float( ENC_CFG_PREFIX ENC_QUALITY, -1.,
ENC_QUALITY_TEXT, ENC_QUALITY_LONGTEXT, false )
change_float_range(-1., 10.);
add_float( ENC_CFG_PREFIX ENC_NOISE_THRESHOLD, -1.,
ENC_NOISE_THRESHOLD_TEXT, ENC_NOISE_THRESHOLD_LONGTEXT, false )
change_float_range(-1., 100.);
add_integer( ENC_CFG_PREFIX ENC_BITRATE, -1,
ENC_BITRATE_TEXT, ENC_BITRATE_LONGTEXT, false )
change_integer_range(-1, INT_MAX);
add_integer( ENC_CFG_PREFIX ENC_MAX_BITRATE, -1,
ENC_MAX_BITRATE_TEXT, ENC_MAX_BITRATE_LONGTEXT, false )
change_integer_range(-1, INT_MAX);
add_integer( ENC_CFG_PREFIX ENC_MIN_BITRATE, -1,
ENC_MIN_BITRATE_TEXT, ENC_MIN_BITRATE_LONGTEXT, false )
change_integer_range(-1, INT_MAX);
add_string( ENC_CFG_PREFIX ENC_GOP_STRUCTURE, NULL,
ENC_GOP_STRUCTURE_TEXT, ENC_GOP_STRUCTURE_LONGTEXT, false )
change_string_list( enc_gop_structure_list, enc_gop_structure_list_text )
add_integer( ENC_CFG_PREFIX ENC_AU_DISTANCE, -1,
ENC_AU_DISTANCE_TEXT, ENC_AU_DISTANCE_LONGTEXT, false )
change_integer_range(-1, INT_MAX);
add_string( ENC_CFG_PREFIX ENC_CHROMAFMT, "420",
ENC_CHROMAFMT_TEXT, ENC_CHROMAFMT_LONGTEXT, false )
change_string_list( enc_chromafmt_list, enc_chromafmt_list_text )
add_string( ENC_CFG_PREFIX ENC_CODINGMODE, "auto",
ENC_CODINGMODE_TEXT, ENC_CODINGMODE_LONGTEXT, false )
change_string_list( enc_codingmode_list, enc_codingmode_list_text )
add_string( ENC_CFG_PREFIX ENC_MVPREC, NULL,
ENC_MVPREC_TEXT, ENC_MVPREC_LONGTEXT, false )
change_string_list( enc_mvprec_list, enc_mvprec_list )
/* advanced option only */
add_string( ENC_CFG_PREFIX ENC_MCBLK_SIZE, NULL,
ENC_MCBLK_SIZE_TEXT, ENC_MCBLK_SIZE_TEXT, true )
change_string_list( enc_block_size_list, enc_block_size_list_text )
/* advanced option only */
add_string( ENC_CFG_PREFIX ENC_MCBLK_OVERLAP, NULL,
ENC_MCBLK_OVERLAP_TEXT, ENC_MCBLK_OVERLAP_TEXT, true )
change_string_list( enc_block_overlap_list, enc_block_overlap_list_text )
/* advanced option only */
add_integer( ENC_CFG_PREFIX ENC_ME_COMBINED, -1,
ENC_ME_COMBINED_TEXT, ENC_ME_COMBINED_LONGTEXT, true )
change_integer_range(-1, 1 );
/* advanced option only */
add_integer( ENC_CFG_PREFIX ENC_ME_HIERARCHICAL, -1,
ENC_ME_HIERARCHICAL_TEXT, ENC_ME_HIERARCHICAL_TEXT, true )
change_integer_range(-1, 1 );
/* advanced option only */
add_integer( ENC_CFG_PREFIX ENC_ME_DOWNSAMPLE_LEVELS, -1,
ENC_ME_DOWNSAMPLE_LEVELS_TEXT, ENC_ME_DOWNSAMPLE_LEVELS_LONGTEXT, true )
change_integer_range(-1, 8 );
/* advanced option only */
add_integer( ENC_CFG_PREFIX ENC_ME_GLOBAL_MOTION, -1,
ENC_ME_GLOBAL_MOTION_TEXT, ENC_ME_GLOBAL_MOTION_TEXT, true )
change_integer_range(-1, 1 );
/* advanced option only */
add_integer( ENC_CFG_PREFIX ENC_ME_PHASECORR, -1,
ENC_ME_PHASECORR_TEXT, ENC_ME_PHASECORR_TEXT, true )
change_integer_range(-1, 1 );
add_string( ENC_CFG_PREFIX ENC_DWTINTRA, NULL,
ENC_DWTINTRA_TEXT, ENC_DWTINTRA_TEXT, false )
change_string_list( enc_wavelet_list, enc_wavelet_list_text )
add_string( ENC_CFG_PREFIX ENC_DWTINTER, NULL,
ENC_DWTINTER_TEXT, ENC_DWTINTER_TEXT, false )
change_string_list( enc_wavelet_list, enc_wavelet_list_text )
add_integer( ENC_CFG_PREFIX ENC_DWTDEPTH, -1,
ENC_DWTDEPTH_TEXT, ENC_DWTDEPTH_LONGTEXT, false )
change_integer_range(-1, SCHRO_LIMIT_ENCODER_TRANSFORM_DEPTH );
/* advanced option only */
add_integer( ENC_CFG_PREFIX ENC_MULTIQUANT, -1,
ENC_MULTIQUANT_TEXT, ENC_MULTIQUANT_LONGTEXT, true )
change_integer_range(-1, 1 );
/* advanced option only */
add_string( ENC_CFG_PREFIX ENC_SCBLK_SIZE, NULL,
ENC_SCBLK_SIZE_TEXT, ENC_SCBLK_SIZE_TEXT, true )
change_string_list( enc_codeblock_size_list, enc_codeblock_size_list_text )
add_string( ENC_CFG_PREFIX ENC_PREFILTER, NULL,
ENC_PREFILTER_TEXT, ENC_PREFILTER_LONGTEXT, false )
change_string_list( enc_filtering_list, enc_filtering_list_text )
add_float( ENC_CFG_PREFIX ENC_PREFILTER_STRENGTH, -1.,
ENC_PREFILTER_STRENGTH_TEXT, ENC_PREFILTER_STRENGTH_LONGTEXT, false )
change_float_range(-1., 100.0);
/* advanced option only */
add_integer( ENC_CFG_PREFIX ENC_SCD, -1,
ENC_SCD_TEXT, ENC_SCD_TEXT, true )
change_integer_range(-1, 1 );
/* advanced option only */
add_string( ENC_CFG_PREFIX ENC_PWT, NULL,
ENC_PWT_TEXT, ENC_PWT_TEXT, true )
change_string_list( enc_perceptual_weighting_list, enc_perceptual_weighting_list )
/* advanced option only */
add_float( ENC_CFG_PREFIX ENC_PDIST, -1,
ENC_PDIST_TEXT, ENC_PDIST_LONGTEXT, true )
change_float_range(-1., 100.);
/* advanced option only */
add_integer( ENC_CFG_PREFIX ENC_NOAC, -1,
ENC_NOAC_TEXT, ENC_NOAC_LONGTEXT, true )
change_integer_range(-1, 1 );
/* advanced option only */
add_integer( ENC_CFG_PREFIX ENC_HSLICES, -1,
ENC_HSLICES_TEXT, ENC_HSLICES_LONGTEXT, true )
change_integer_range(-1, INT_MAX );
/* advanced option only */
add_integer( ENC_CFG_PREFIX ENC_VSLICES, -1,
ENC_VSLICES_TEXT, ENC_VSLICES_LONGTEXT, true )
change_integer_range(-1, INT_MAX );
/* advanced option only */
add_string( ENC_CFG_PREFIX ENC_FORCE_PROFILE, NULL,
ENC_FORCE_PROFILE_TEXT, ENC_FORCE_PROFILE_TEXT, true )
change_string_list( enc_profile_list, enc_profile_list_text )
vlc_module_end ()
/*****************************************************************************
* Local prototypes
*****************************************************************************/
static picture_t *DecodeBlock ( decoder_t *p_dec, block_t **pp_block );
struct picture_free_t
{
picture_t *p_pic;
decoder_t *p_dec;
};
/*****************************************************************************
* decoder_sys_t : Schroedinger decoder descriptor
*****************************************************************************/
struct decoder_sys_t
{
/*
* Dirac properties
*/
mtime_t i_lastpts;
mtime_t i_frame_pts_delta;
SchroDecoder *p_schro;
SchroVideoFormat *p_format;
};
/*****************************************************************************
* OpenDecoder: probe the decoder and return score
*****************************************************************************/
static int OpenDecoder( vlc_object_t *p_this )
{
decoder_t *p_dec = (decoder_t*)p_this;
decoder_sys_t *p_sys;
SchroDecoder *p_schro;
if( p_dec->fmt_in.i_codec != VLC_CODEC_DIRAC )
{
return VLC_EGENERIC;
}
/* Allocate the memory needed to store the decoder's structure */
p_sys = malloc(sizeof(decoder_sys_t));
if( p_sys == NULL )
return VLC_ENOMEM;
/* Initialise the schroedinger (and hence liboil libraries */
/* This does no allocation and is safe to call */
schro_init();
/* Initialise the schroedinger decoder */
if( !(p_schro = schro_decoder_new()) )
{
free( p_sys );
return VLC_EGENERIC;
}
p_dec->p_sys = p_sys;
p_sys->p_schro = p_schro;
p_sys->p_format = NULL;
p_sys->i_lastpts = VLC_TS_INVALID;
p_sys->i_frame_pts_delta = 0;
/* Set output properties */
p_dec->fmt_out.i_cat = VIDEO_ES;
p_dec->fmt_out.i_codec = VLC_CODEC_I420;
/* Set callbacks */
p_dec->pf_decode_video = DecodeBlock;
return VLC_SUCCESS;
}
/*****************************************************************************
* SetPictureFormat: Set the decoded picture params to the ones from the stream
*****************************************************************************/
static void SetVideoFormat( decoder_t *p_dec )
{
decoder_sys_t *p_sys = p_dec->p_sys;
p_sys->p_format = schro_decoder_get_video_format(p_sys->p_schro);
if( p_sys->p_format == NULL ) return;
p_sys->i_frame_pts_delta = CLOCK_FREQ
* p_sys->p_format->frame_rate_denominator
/ p_sys->p_format->frame_rate_numerator;
switch( p_sys->p_format->chroma_format )
{
case SCHRO_CHROMA_420: p_dec->fmt_out.i_codec = VLC_CODEC_I420; break;
case SCHRO_CHROMA_422: p_dec->fmt_out.i_codec = VLC_CODEC_I422; break;
case SCHRO_CHROMA_444: p_dec->fmt_out.i_codec = VLC_CODEC_I444; break;
default:
p_dec->fmt_out.i_codec = 0;
break;
}
p_dec->fmt_out.video.i_visible_width = p_sys->p_format->clean_width;
p_dec->fmt_out.video.i_x_offset = p_sys->p_format->left_offset;
p_dec->fmt_out.video.i_width = p_sys->p_format->width;
p_dec->fmt_out.video.i_visible_height = p_sys->p_format->clean_height;
p_dec->fmt_out.video.i_y_offset = p_sys->p_format->top_offset;
p_dec->fmt_out.video.i_height = p_sys->p_format->height;
/* aspect_ratio_[numerator|denominator] describes the pixel aspect ratio */
p_dec->fmt_out.video.i_sar_num = p_sys->p_format->aspect_ratio_numerator;
p_dec->fmt_out.video.i_sar_den = p_sys->p_format->aspect_ratio_denominator;
p_dec->fmt_out.video.i_frame_rate =
p_sys->p_format->frame_rate_numerator;
p_dec->fmt_out.video.i_frame_rate_base =
p_sys->p_format->frame_rate_denominator;
}
/*****************************************************************************
* SchroFrameFree: schro_frame callback to release the associated picture_t
* When schro_decoder_reset() is called there will be pictures in the
* decoding pipeline that need to be released rather than displayed.
*****************************************************************************/
static void SchroFrameFree( SchroFrame *frame, void *priv)
{
struct picture_free_t *p_free = priv;
if( !p_free )
return;
picture_Release( p_free->p_pic );
free(p_free);
(void)frame;
}
/*****************************************************************************
* CreateSchroFrameFromPic: wrap a picture_t in a SchroFrame
*****************************************************************************/
static SchroFrame *CreateSchroFrameFromPic( decoder_t *p_dec )
{
decoder_sys_t *p_sys = p_dec->p_sys;
SchroFrame *p_schroframe = schro_frame_new();
picture_t *p_pic = NULL;
struct picture_free_t *p_free;
if( !p_schroframe )
return NULL;
p_pic = decoder_NewPicture( p_dec );
if( !p_pic )
return NULL;
p_schroframe->format = SCHRO_FRAME_FORMAT_U8_420;
if( p_sys->p_format->chroma_format == SCHRO_CHROMA_422 )
{
p_schroframe->format = SCHRO_FRAME_FORMAT_U8_422;
}
else if( p_sys->p_format->chroma_format == SCHRO_CHROMA_444 )
{
p_schroframe->format = SCHRO_FRAME_FORMAT_U8_444;
}
p_schroframe->width = p_sys->p_format->width;
p_schroframe->height = p_sys->p_format->height;
p_free = malloc( sizeof( *p_free ) );
p_free->p_pic = p_pic;
p_free->p_dec = p_dec;
schro_frame_set_free_callback( p_schroframe, SchroFrameFree, p_free );
for( int i=0; i<3; i++ )
{
p_schroframe->components[i].width = p_pic->p[i].i_visible_pitch;
p_schroframe->components[i].stride = p_pic->p[i].i_pitch;
p_schroframe->components[i].height = p_pic->p[i].i_visible_lines;
p_schroframe->components[i].length =
p_pic->p[i].i_pitch * p_pic->p[i].i_lines;
p_schroframe->components[i].data = p_pic->p[i].p_pixels;
if(i!=0)
{
p_schroframe->components[i].v_shift =
SCHRO_FRAME_FORMAT_V_SHIFT( p_schroframe->format );
p_schroframe->components[i].h_shift =
SCHRO_FRAME_FORMAT_H_SHIFT( p_schroframe->format );
}
}
p_pic->b_progressive = !p_sys->p_format->interlaced;
p_pic->b_top_field_first = p_sys->p_format->top_field_first;
p_pic->i_nb_fields = 2;
return p_schroframe;
}
/*****************************************************************************
* SchroBufferFree: schro_buffer callback to release the associated block_t
*****************************************************************************/
static void SchroBufferFree( SchroBuffer *buf, void *priv )
{
block_t *p_block = priv;
if( !p_block )
return;
block_Release( p_block );
(void)buf;
}
/*****************************************************************************
* CloseDecoder: decoder destruction
*****************************************************************************/
static void CloseDecoder( vlc_object_t *p_this )
{
decoder_t *p_dec = (decoder_t *)p_this;
decoder_sys_t *p_sys = p_dec->p_sys;
schro_decoder_free( p_sys->p_schro );
free( p_sys );
}
/****************************************************************************
* DecodeBlock: the whole thing
****************************************************************************
* Blocks need not be Dirac dataunit aligned.
* If a block has a PTS signaled, it applies to the first picture at or after p_block
*
* If this function returns a picture (!NULL), it is called again and the
* same block is resubmitted. To avoid this, set *pp_block to NULL;
* If this function returns NULL, the *pp_block is lost (and leaked).
* This function must free all blocks when finished with them.
****************************************************************************/
static picture_t *DecodeBlock( decoder_t *p_dec, block_t **pp_block )
{
decoder_sys_t *p_sys = p_dec->p_sys;
if( !pp_block ) return NULL;
if ( *pp_block ) {
block_t *p_block = *pp_block;
/* reset the decoder when seeking as the decode in progress is invalid */
/* discard the block as it is just a null magic block */
if( p_block->i_flags & BLOCK_FLAG_DISCONTINUITY ) {
schro_decoder_reset( p_sys->p_schro );
p_sys->i_lastpts = VLC_TS_INVALID;
block_Release( p_block );
*pp_block = NULL;
return NULL;
}
SchroBuffer *p_schrobuffer;
p_schrobuffer = schro_buffer_new_with_data( p_block->p_buffer, p_block->i_buffer );
p_schrobuffer->free = SchroBufferFree;
p_schrobuffer->priv = p_block;
if( p_block->i_pts > VLC_TS_INVALID ) {
mtime_t *p_pts = malloc( sizeof(*p_pts) );
if( p_pts ) {
*p_pts = p_block->i_pts;
/* if this call fails, p_pts is freed automatically */
p_schrobuffer->tag = schro_tag_new( p_pts, free );
}
}
/* this stops the same block being fed back into this function if
* we were on the next iteration of this loop to output a picture */
*pp_block = NULL;
schro_decoder_autoparse_push( p_sys->p_schro, p_schrobuffer );
/* DO NOT refer to p_block after this point, it may have been freed */
}
while( 1 )
{
SchroFrame *p_schroframe;
picture_t *p_pic;
int state = schro_decoder_autoparse_wait( p_sys->p_schro );
switch( state )
{
case SCHRO_DECODER_FIRST_ACCESS_UNIT:
SetVideoFormat( p_dec );
break;
case SCHRO_DECODER_NEED_BITS:
return NULL;
case SCHRO_DECODER_NEED_FRAME:
p_schroframe = CreateSchroFrameFromPic( p_dec );
if( !p_schroframe )
{
msg_Err( p_dec, "Could not allocate picture for decoder");
return NULL;
}
schro_decoder_add_output_picture( p_sys->p_schro, p_schroframe);
break;
case SCHRO_DECODER_OK: {
SchroTag *p_tag = schro_decoder_get_picture_tag( p_sys->p_schro );
p_schroframe = schro_decoder_pull( p_sys->p_schro );
if( !p_schroframe || !p_schroframe->priv )
{
/* frame can't be one that was allocated by us
* -- no private data: discard */
if( p_tag ) schro_tag_free( p_tag );
if( p_schroframe ) schro_frame_unref( p_schroframe );
break;
}
p_pic = ((struct picture_free_t*) p_schroframe->priv)->p_pic;
p_schroframe->priv = NULL;
if( p_tag )
{
/* free is handled by schro_frame_unref */
p_pic->date = *(mtime_t*) p_tag->value;
schro_tag_free( p_tag );
}
else if( p_sys->i_lastpts > VLC_TS_INVALID )
{
/* NB, this shouldn't happen since the packetizer does a
* very thorough job of inventing timestamps. The
* following is just a very rough fall back incase packetizer
* is missing. */
/* maybe it would be better to set p_pic->b_force ? */
p_pic->date = p_sys->i_lastpts + p_sys->i_frame_pts_delta;
}
p_sys->i_lastpts = p_pic->date;
schro_frame_unref( p_schroframe );
return p_pic;
}
case SCHRO_DECODER_EOS:
/* NB, the new api will not emit _EOS, it handles the reset internally */
break;
case SCHRO_DECODER_ERROR:
msg_Err( p_dec, "SCHRO_DECODER_ERROR");
return NULL;
}
}
}
/*****************************************************************************
* Local prototypes
*****************************************************************************/
static block_t *Encode( encoder_t *p_enc, picture_t *p_pict );
/*****************************************************************************
* picture_pts_t : store pts alongside picture number, not carried through
* encoder
*****************************************************************************/
struct picture_pts_t
{
mtime_t i_pts; /* associated pts */
uint32_t u_pnum; /* dirac picture number */
bool b_empty; /* entry is invalid */
};
/*****************************************************************************
* encoder_sys_t : Schroedinger encoder descriptor
*****************************************************************************/
#define SCHRO_PTS_TLB_SIZE 256
struct encoder_sys_t
{
/*
* Schro properties
*/
SchroEncoder *p_schro;
SchroVideoFormat *p_format;
int started;
bool b_auto_field_coding;
uint32_t i_input_picnum;
block_fifo_t *p_dts_fifo;
block_t *p_chain;
struct picture_pts_t pts_tlb[SCHRO_PTS_TLB_SIZE];
mtime_t i_pts_offset;
mtime_t i_field_time;
bool b_eos_signalled;
bool b_eos_pulled;
};
static struct
{
unsigned int i_height;
int i_approx_fps;
SchroVideoFormatEnum i_vf;
} schro_format_guess[] = {
/* Important: Keep this list ordered in ascending picture height */
{1, 0, SCHRO_VIDEO_FORMAT_CUSTOM},
{120, 15, SCHRO_VIDEO_FORMAT_QSIF},
{144, 12, SCHRO_VIDEO_FORMAT_QCIF},
{240, 15, SCHRO_VIDEO_FORMAT_SIF},
{288, 12, SCHRO_VIDEO_FORMAT_CIF},
{480, 30, SCHRO_VIDEO_FORMAT_SD480I_60},
{480, 15, SCHRO_VIDEO_FORMAT_4SIF},
{576, 12, SCHRO_VIDEO_FORMAT_4CIF},
{576, 25, SCHRO_VIDEO_FORMAT_SD576I_50},
{720, 50, SCHRO_VIDEO_FORMAT_HD720P_50},
{720, 60, SCHRO_VIDEO_FORMAT_HD720P_60},
{1080, 24, SCHRO_VIDEO_FORMAT_DC2K_24},
{1080, 25, SCHRO_VIDEO_FORMAT_HD1080I_50},
{1080, 30, SCHRO_VIDEO_FORMAT_HD1080I_60},
{1080, 50, SCHRO_VIDEO_FORMAT_HD1080P_50},
{1080, 60, SCHRO_VIDEO_FORMAT_HD1080P_60},
{2160, 24, SCHRO_VIDEO_FORMAT_DC4K_24},
{0, 0, 0},
};
/*****************************************************************************
* ResetPTStlb: Purge all entries in @p_enc@'s PTS-tlb
*****************************************************************************/
static void ResetPTStlb( encoder_t *p_enc )
{
encoder_sys_t *p_sys = p_enc->p_sys;
for( int i = 0; i < SCHRO_PTS_TLB_SIZE; i++ )
{
p_sys->pts_tlb[i].b_empty = true;
}
}
/*****************************************************************************
* StorePicturePTS: Store the PTS value for a particular picture number
*****************************************************************************/
static void StorePicturePTS( encoder_t *p_enc, uint32_t u_pnum, mtime_t i_pts )
{
encoder_sys_t *p_sys = p_enc->p_sys;
for( int i = 0; i<SCHRO_PTS_TLB_SIZE; i++ )
{
if( p_sys->pts_tlb[i].b_empty )
{
p_sys->pts_tlb[i].u_pnum = u_pnum;
p_sys->pts_tlb[i].i_pts = i_pts;
p_sys->pts_tlb[i].b_empty = false;
return;
}
}
msg_Err( p_enc, "Could not store PTS %"PRId64" for frame %u", i_pts, u_pnum );
}
/*****************************************************************************
* GetPicturePTS: Retrieve the PTS value for a particular picture number
*****************************************************************************/
static mtime_t GetPicturePTS( encoder_t *p_enc, uint32_t u_pnum )
{
encoder_sys_t *p_sys = p_enc->p_sys;
for( int i = 0; i < SCHRO_PTS_TLB_SIZE; i++ )
{
if( !p_sys->pts_tlb[i].b_empty &&
p_sys->pts_tlb[i].u_pnum == u_pnum )
{
p_sys->pts_tlb[i].b_empty = true;
return p_sys->pts_tlb[i].i_pts;
}
}
msg_Err( p_enc, "Could not retrieve PTS for picture %u", u_pnum );
return 0;
}
static inline bool SchroSetEnum( const encoder_t *p_enc, int i_list_size, const char *list[],
const char *psz_name, const char *psz_name_text, const char *psz_value)
{
encoder_sys_t *p_sys = p_enc->p_sys;
if( list && psz_name_text && psz_name && psz_value ) {
for( int i = 0; i < i_list_size; ++i ) {
if( strcmp( list[i], psz_value ) )
continue;
schro_encoder_setting_set_double( p_sys->p_schro, psz_name, i );
return true;
}
msg_Err( p_enc, "Invalid %s: %s", psz_name_text, psz_value );
}
return false;
}
static bool SetEncChromaFormat( encoder_t *p_enc, uint32_t i_codec )
{
encoder_sys_t *p_sys = p_enc->p_sys;
switch( i_codec ) {
case VLC_CODEC_I420:
p_enc->fmt_in.i_codec = i_codec;
p_enc->fmt_in.video.i_bits_per_pixel = 12;
p_sys->p_format->chroma_format = SCHRO_CHROMA_420;
break;
case VLC_CODEC_I422:
p_enc->fmt_in.i_codec = i_codec;
p_enc->fmt_in.video.i_bits_per_pixel = 16;
p_sys->p_format->chroma_format = SCHRO_CHROMA_422;
break;
case VLC_CODEC_I444:
p_enc->fmt_in.i_codec = i_codec;
p_enc->fmt_in.video.i_bits_per_pixel = 24;
p_sys->p_format->chroma_format = SCHRO_CHROMA_444;
break;
default:
return false;
}
return true;
}
#define SCHRO_SET_FLOAT(psz_name, pschro_name) \
f_tmp = var_GetFloat( p_enc, ENC_CFG_PREFIX psz_name ); \
if( f_tmp >= 0.0 ) \
schro_encoder_setting_set_double( p_sys->p_schro, pschro_name, f_tmp );
#define SCHRO_SET_INTEGER(psz_name, pschro_name, ignore_val) \
i_tmp = var_GetInteger( p_enc, ENC_CFG_PREFIX psz_name ); \
if( i_tmp > ignore_val ) \
schro_encoder_setting_set_double( p_sys->p_schro, pschro_name, i_tmp );
#define SCHRO_SET_ENUM(list, psz_name, psz_name_text, pschro_name) \
psz_tmp = var_GetString( p_enc, ENC_CFG_PREFIX psz_name ); \
if( !psz_tmp ) \
goto error; \
else if ( *psz_tmp != '\0' ) { \
int i_list_size = ARRAY_SIZE(list); \
if( !SchroSetEnum( p_enc, i_list_size, list, pschro_name, psz_name_text, psz_tmp ) ) { \
free( psz_tmp ); \
goto error; \
} \
} \
free( psz_tmp );
/*****************************************************************************
* OpenEncoder: probe the encoder and return score
*****************************************************************************/
static int OpenEncoder( vlc_object_t *p_this )
{
encoder_t *p_enc = (encoder_t *)p_this;
encoder_sys_t *p_sys;
int i_tmp;
float f_tmp;
char *psz_tmp;
if( p_enc->fmt_out.i_codec != VLC_CODEC_DIRAC &&
!p_enc->b_force )
{
return VLC_EGENERIC;
}
if( !p_enc->fmt_in.video.i_frame_rate || !p_enc->fmt_in.video.i_frame_rate_base ||
!p_enc->fmt_in.video.i_visible_height || !p_enc->fmt_in.video.i_visible_width )
{
msg_Err( p_enc, "Framerate and picture dimensions must be non-zero" );
return VLC_EGENERIC;
}
/* Allocate the memory needed to store the decoder's structure */
if( ( p_sys = calloc( 1, sizeof( *p_sys ) ) ) == NULL )
return VLC_ENOMEM;
p_enc->p_sys = p_sys;
p_enc->pf_encode_video = Encode;
p_enc->fmt_out.i_codec = VLC_CODEC_DIRAC;
p_enc->fmt_out.i_cat = VIDEO_ES;
if( ( p_sys->p_dts_fifo = block_FifoNew() ) == NULL )
{
CloseEncoder( p_this );
return VLC_ENOMEM;
}
ResetPTStlb( p_enc );
/* guess the video format based upon number of lines and picture height */
int i = 0;
SchroVideoFormatEnum guessed_video_fmt = SCHRO_VIDEO_FORMAT_CUSTOM;
/* Pick the dirac_video_format in this order of preference:
* 1. an exact match in frame height and an approximate fps match
* 2. the previous preset with a smaller number of lines.
*/
do
{
if( schro_format_guess[i].i_height > p_enc->fmt_in.video.i_height )
{
guessed_video_fmt = schro_format_guess[i-1].i_vf;
break;
}
if( schro_format_guess[i].i_height != p_enc->fmt_in.video.i_height )
continue;
int src_fps = p_enc->fmt_in.video.i_frame_rate / p_enc->fmt_in.video.i_frame_rate_base;
int delta_fps = abs( schro_format_guess[i].i_approx_fps - src_fps );
if( delta_fps > 2 )
continue;
guessed_video_fmt = schro_format_guess[i].i_vf;
break;
} while( schro_format_guess[++i].i_height );
schro_init();
p_sys->p_schro = schro_encoder_new();
if( !p_sys->p_schro ) {
msg_Err( p_enc, "Failed to initialize libschroedinger encoder" );
return VLC_EGENERIC;
}
schro_encoder_set_packet_assembly( p_sys->p_schro, true );
if( !( p_sys->p_format = schro_encoder_get_video_format( p_sys->p_schro ) ) ) {
msg_Err( p_enc, "Failed to get Schroedigner video format" );
schro_encoder_free( p_sys->p_schro );
return VLC_EGENERIC;
}
/* initialise the video format parameters to the guessed format */
schro_video_format_set_std_video_format( p_sys->p_format, guessed_video_fmt );
/* constants set from the input video format */
p_sys->p_format->width = p_enc->fmt_in.video.i_visible_width;
p_sys->p_format->height = p_enc->fmt_in.video.i_visible_height;
p_sys->p_format->frame_rate_numerator = p_enc->fmt_in.video.i_frame_rate;
p_sys->p_format->frame_rate_denominator = p_enc->fmt_in.video.i_frame_rate_base;
unsigned u_asr_num, u_asr_den;
vlc_ureduce( &u_asr_num, &u_asr_den,
p_enc->fmt_in.video.i_sar_num,
p_enc->fmt_in.video.i_sar_den, 0 );
p_sys->p_format->aspect_ratio_numerator = u_asr_num;
p_sys->p_format->aspect_ratio_denominator = u_asr_den;
config_ChainParse( p_enc, ENC_CFG_PREFIX, ppsz_enc_options, p_enc->p_cfg );
SCHRO_SET_ENUM(enc_rate_control_list, ENC_RATE_CONTROL, ENC_RATE_CONTROL_TEXT, "rate_control")
SCHRO_SET_ENUM(enc_gop_structure_list, ENC_GOP_STRUCTURE, ENC_GOP_STRUCTURE_TEXT, "gop_structure")
psz_tmp = var_GetString( p_enc, ENC_CFG_PREFIX ENC_CHROMAFMT );
if( !psz_tmp )
goto error;
else {
uint32_t i_codec;
if( !strcmp( psz_tmp, "420" ) ) {
i_codec = VLC_CODEC_I420;
}
else if( !strcmp( psz_tmp, "422" ) ) {
i_codec = VLC_CODEC_I422;
}
else if( !strcmp( psz_tmp, "444" ) ) {
i_codec = VLC_CODEC_I444;
}
else {
msg_Err( p_enc, "Invalid chroma format: %s", psz_tmp );
free( psz_tmp );
goto error;
}
SetEncChromaFormat( p_enc, i_codec );
}
free( psz_tmp );
SCHRO_SET_FLOAT(ENC_QUALITY, "quality")
SCHRO_SET_FLOAT(ENC_NOISE_THRESHOLD, "noise_threshold")
/* use bitrate from sout-transcode-vb in kbps */
i_tmp = var_GetInteger( p_enc, ENC_CFG_PREFIX ENC_BITRATE );
if( i_tmp > -1 )
schro_encoder_setting_set_double( p_sys->p_schro, "bitrate", i_tmp * 1000 );
else
schro_encoder_setting_set_double( p_sys->p_schro, "bitrate", p_enc->fmt_out.i_bitrate );
p_enc->fmt_out.i_bitrate = schro_encoder_setting_get_double( p_sys->p_schro, "bitrate" );
i_tmp = var_GetInteger( p_enc, ENC_CFG_PREFIX ENC_MIN_BITRATE );
if( i_tmp > -1 )
schro_encoder_setting_set_double( p_sys->p_schro, "min_bitrate", i_tmp * 1000 );
i_tmp = var_GetInteger( p_enc, ENC_CFG_PREFIX ENC_MAX_BITRATE );
if( i_tmp > -1 )
schro_encoder_setting_set_double( p_sys->p_schro, "max_bitrate", i_tmp * 1000 );
SCHRO_SET_INTEGER(ENC_AU_DISTANCE, "au_distance", -1)
SCHRO_SET_ENUM(enc_filtering_list, ENC_PREFILTER, ENC_PREFILTER_TEXT, "filtering")
SCHRO_SET_FLOAT(ENC_PREFILTER_STRENGTH, "filter_value")
psz_tmp = var_GetString( p_enc, ENC_CFG_PREFIX ENC_CODINGMODE );
if( !psz_tmp )
goto error;
else if( !strcmp( psz_tmp, "auto" ) ) {
p_sys->b_auto_field_coding = true;
}
else if( !strcmp( psz_tmp, "progressive" ) ) {
p_sys->b_auto_field_coding = false;
schro_encoder_setting_set_double( p_sys->p_schro, "interlaced_coding", false);
}
else if( !strcmp( psz_tmp, "field" ) ) {
p_sys->b_auto_field_coding = false;
schro_encoder_setting_set_double( p_sys->p_schro, "interlaced_coding", true);
}
else {
msg_Err( p_enc, "Invalid codingmode: %s", psz_tmp );
free( psz_tmp );
goto error;
}
free( psz_tmp );
SCHRO_SET_ENUM(enc_block_size_list, ENC_MCBLK_SIZE, ENC_MCBLK_SIZE_TEXT, "motion_block_size")
SCHRO_SET_ENUM(enc_block_overlap_list, ENC_MCBLK_OVERLAP, ENC_MCBLK_OVERLAP_TEXT, "motion_block_overlap")
psz_tmp = var_GetString( p_enc, ENC_CFG_PREFIX ENC_MVPREC );
if( !psz_tmp )
goto error;
else if( *psz_tmp != '\0') {
if( !strcmp( psz_tmp, "1" ) ) {
schro_encoder_setting_set_double( p_sys->p_schro, "mv_precision", 0 );
}
else if( !strcmp( psz_tmp, "1/2" ) ) {
schro_encoder_setting_set_double( p_sys->p_schro, "mv_precision", 1 );
}
else if( !strcmp( psz_tmp, "1/4" ) ) {
schro_encoder_setting_set_double( p_sys->p_schro, "mv_precision", 2 );
}
else if( !strcmp( psz_tmp, "1/8" ) ) {
schro_encoder_setting_set_double( p_sys->p_schro, "mv_precision", 3 );
}
else {
msg_Err( p_enc, "Invalid mv_precision: %s", psz_tmp );
free( psz_tmp );
goto error;
}
}
free( psz_tmp );
SCHRO_SET_INTEGER(ENC_ME_COMBINED, "enable_chroma_me", -1)
SCHRO_SET_ENUM(enc_wavelet_list, ENC_DWTINTRA, ENC_DWTINTRA_TEXT, "intra_wavelet")
SCHRO_SET_ENUM(enc_wavelet_list, ENC_DWTINTER, ENC_DWTINTER_TEXT, "inter_wavelet")
SCHRO_SET_INTEGER(ENC_DWTDEPTH, "transform_depth", -1)
SCHRO_SET_INTEGER(ENC_MULTIQUANT, "enable_multiquant", -1)
SCHRO_SET_INTEGER(ENC_NOAC, "enable_noarith", -1)
SCHRO_SET_ENUM(enc_perceptual_weighting_list, ENC_PWT, ENC_PWT_TEXT, "perceptual_weighting")
SCHRO_SET_FLOAT(ENC_PDIST, "perceptual_distance")
SCHRO_SET_INTEGER(ENC_HSLICES, "horiz_slices", -1)
SCHRO_SET_INTEGER(ENC_VSLICES, "vert_slices", -1)
SCHRO_SET_ENUM(enc_codeblock_size_list, ENC_SCBLK_SIZE, ENC_SCBLK_SIZE_TEXT, "codeblock_size")
SCHRO_SET_INTEGER(ENC_ME_HIERARCHICAL, "enable_hierarchical_estimation", -1)
SCHRO_SET_INTEGER(ENC_ME_DOWNSAMPLE_LEVELS, "downsample_levels", 1)
SCHRO_SET_INTEGER(ENC_ME_GLOBAL_MOTION, "enable_global_motion", -1)
SCHRO_SET_INTEGER(ENC_ME_PHASECORR, "enable_phasecorr_estimation", -1)
SCHRO_SET_INTEGER(ENC_SCD, "enable_scene_change_detection", -1)
SCHRO_SET_ENUM(enc_profile_list, ENC_FORCE_PROFILE, ENC_FORCE_PROFILE_TEXT, "force_profile")
p_sys->started = 0;
return VLC_SUCCESS;
error:
CloseEncoder( p_this );
return VLC_EGENERIC;
}
struct enc_picture_free_t
{
picture_t *p_pic;
encoder_t *p_enc;
};
/*****************************************************************************
* EncSchroFrameFree: schro_frame callback to release the associated picture_t
* When schro_encoder_reset() is called there will be pictures in the
* encoding pipeline that need to be released rather than displayed.
*****************************************************************************/
static void EncSchroFrameFree( SchroFrame *frame, void *priv )
{
struct enc_picture_free_t *p_free = priv;
if( !p_free )
return;
picture_Release( p_free->p_pic );
free( p_free );
(void)frame;
}
/*****************************************************************************
* CreateSchroFrameFromPic: wrap a picture_t in a SchroFrame
*****************************************************************************/
static SchroFrame *CreateSchroFrameFromInputPic( encoder_t *p_enc, picture_t *p_pic )
{
encoder_sys_t *p_sys = p_enc->p_sys;
SchroFrame *p_schroframe = schro_frame_new();
struct enc_picture_free_t *p_free;
if( !p_schroframe )
return NULL;
if( !p_pic )
return NULL;
p_schroframe->format = SCHRO_FRAME_FORMAT_U8_420;
if( p_sys->p_format->chroma_format == SCHRO_CHROMA_422 )
{
p_schroframe->format = SCHRO_FRAME_FORMAT_U8_422;
}
else if( p_sys->p_format->chroma_format == SCHRO_CHROMA_444 )
{
p_schroframe->format = SCHRO_FRAME_FORMAT_U8_444;
}
p_schroframe->width = p_sys->p_format->width;
p_schroframe->height = p_sys->p_format->height;
p_free = malloc( sizeof( *p_free ) );
if( unlikely( p_free == NULL ) ) {
schro_frame_unref( p_schroframe );
return NULL;
}
p_free->p_pic = p_pic;
p_free->p_enc = p_enc;
schro_frame_set_free_callback( p_schroframe, EncSchroFrameFree, p_free );
for( int i=0; i<3; i++ )
{
p_schroframe->components[i].width = p_pic->p[i].i_visible_pitch;
p_schroframe->components[i].stride = p_pic->p[i].i_pitch;
p_schroframe->components[i].height = p_pic->p[i].i_visible_lines;
p_schroframe->components[i].length =
p_pic->p[i].i_pitch * p_pic->p[i].i_lines;
p_schroframe->components[i].data = p_pic->p[i].p_pixels;
if( i!=0 )
{
p_schroframe->components[i].v_shift =
SCHRO_FRAME_FORMAT_V_SHIFT( p_schroframe->format );
p_schroframe->components[i].h_shift =
SCHRO_FRAME_FORMAT_H_SHIFT( p_schroframe->format );
}
}
return p_schroframe;
}
/* Attempt to find dirac picture number in an encapsulation unit */
static int ReadDiracPictureNumber( uint32_t *p_picnum, block_t *p_block )
{
uint32_t u_pos = 4;
/* protect against falling off the edge */
while( u_pos + 13 < p_block->i_buffer )
{
/* find the picture startcode */
if( p_block->p_buffer[u_pos] & 0x08 )
{
*p_picnum = GetDWBE( p_block->p_buffer + u_pos + 9 );
return 1;
}
/* skip to the next dirac data unit */
uint32_t u_npo = GetDWBE( p_block->p_buffer + u_pos + 1 );
assert( u_npo <= UINT32_MAX - u_pos );
if( u_npo == 0 )
u_npo = 13;
u_pos += u_npo;
}
return 0;
}
static block_t *Encode( encoder_t *p_enc, picture_t *p_pic )
{
encoder_sys_t *p_sys = p_enc->p_sys;
block_t *p_block, *p_output_chain = NULL;
SchroFrame *p_frame;
bool b_go = true;
if( !p_pic ) {
if( !p_sys->started || p_sys->b_eos_pulled )
return NULL;
if( !p_sys->b_eos_signalled ) {
p_sys->b_eos_signalled = 1;
schro_encoder_end_of_stream( p_sys->p_schro );
}
} else {
/* we only know if the sequence is interlaced when the first
* picture arrives, so final setup is done here */
/* XXX todo, detect change of interlace */
p_sys->p_format->interlaced = !p_pic->b_progressive;
p_sys->p_format->top_field_first = p_pic->b_top_field_first;
if( p_sys->b_auto_field_coding )
schro_encoder_setting_set_double( p_sys->p_schro, "interlaced_coding", !p_pic->b_progressive );
}
if( !p_sys->started ) {
date_t date;
if( p_pic->format.i_chroma != p_enc->fmt_in.i_codec ) {
char chroma_in[5], chroma_out[5];
vlc_fourcc_to_char( p_pic->format.i_chroma, chroma_in );
chroma_in[4] = '\0';
chroma_out[4] = '\0';
vlc_fourcc_to_char( p_enc->fmt_in.i_codec, chroma_out );
msg_Warn( p_enc, "Resetting chroma from %s to %s", chroma_out, chroma_in );
if( !SetEncChromaFormat( p_enc, p_pic->format.i_chroma ) ) {
msg_Err( p_enc, "Could not reset chroma format to %s", chroma_in );
return NULL;
}
}
date_Init( &date, p_enc->fmt_in.video.i_frame_rate, p_enc->fmt_in.video.i_frame_rate_base );
/* FIXME - Unlike dirac-research codec Schro doesn't have a function that returns the delay in pics yet.
* Use a default of 1
*/
date_Increment( &date, 1 );
p_sys->i_pts_offset = date_Get( &date );
if( schro_encoder_setting_get_double( p_sys->p_schro, "interlaced_coding" ) > 0.0 ) {
date_Set( &date, 0 );
date_Increment( &date, 1);
p_sys->i_field_time = date_Get( &date ) / 2;
}
schro_video_format_set_std_signal_range( p_sys->p_format, SCHRO_SIGNAL_RANGE_8BIT_VIDEO );
schro_encoder_set_video_format( p_sys->p_schro, p_sys->p_format );
schro_encoder_start( p_sys->p_schro );
p_sys->started = 1;
}
if( !p_sys->b_eos_signalled ) {
/* create a schro frame from the input pic and load */
/* Increase ref count by 1 so that the picture is not freed until
Schro finishes with it */
picture_Hold( p_pic );
p_frame = CreateSchroFrameFromInputPic( p_enc, p_pic );
if( !p_frame )
return NULL;
schro_encoder_push_frame( p_sys->p_schro, p_frame );
/* store pts in a lookaside buffer, so that the same pts may
* be used for the picture in coded order */
StorePicturePTS( p_enc, p_sys->i_input_picnum, p_pic->date );
p_sys->i_input_picnum++;
/* store dts in a queue, so that they appear in order in
* coded order */
p_block = block_Alloc( 1 );
if( !p_block )
return NULL;
p_block->i_dts = p_pic->date - p_sys->i_pts_offset;
block_FifoPut( p_sys->p_dts_fifo, p_block );
p_block = NULL;
/* for field coding mode, insert an extra value into both the
* pts lookaside buffer and dts queue, offset to correspond
* to a one field delay. */
if( schro_encoder_setting_get_double( p_sys->p_schro, "interlaced_coding" ) > 0.0 ) {
StorePicturePTS( p_enc, p_sys->i_input_picnum, p_pic->date + p_sys->i_field_time );
p_sys->i_input_picnum++;
p_block = block_Alloc( 1 );
if( !p_block )
return NULL;
p_block->i_dts = p_pic->date - p_sys->i_pts_offset + p_sys->i_field_time;
block_FifoPut( p_sys->p_dts_fifo, p_block );
p_block = NULL;
}
}
do
{
SchroStateEnum state;
state = schro_encoder_wait( p_sys->p_schro );
switch( state )
{
case SCHRO_STATE_NEED_FRAME:
b_go = false;
break;
case SCHRO_STATE_AGAIN:
break;
case SCHRO_STATE_END_OF_STREAM:
p_sys->b_eos_pulled = 1;
b_go = false;
break;
case SCHRO_STATE_HAVE_BUFFER:
{
SchroBuffer *p_enc_buf;
uint32_t u_pic_num;
int i_presentation_frame;
p_enc_buf = schro_encoder_pull( p_sys->p_schro, &i_presentation_frame );
p_block = block_Alloc( p_enc_buf->length );
if( !p_block )
return NULL;
memcpy( p_block->p_buffer, p_enc_buf->data, p_enc_buf->length );
schro_buffer_unref( p_enc_buf );
/* Presence of a Sequence header indicates a seek point */
if( 0 == p_block->p_buffer[4] )
{
p_block->i_flags |= BLOCK_FLAG_TYPE_I;
if( !p_enc->fmt_out.p_extra ) {
const uint8_t eos[] = { 'B','B','C','D',0x10,0,0,0,13,0,0,0,0 };
uint32_t len = GetDWBE( p_block->p_buffer + 5 );
/* if it hasn't been done so far, stash a copy of the
* sequence header for muxers such as ogg */
/* The OggDirac spec advises that a Dirac EOS DataUnit
* is appended to the sequence header to allow guard
* against poor streaming servers */
/* XXX, should this be done using the packetizer ? */
p_enc->fmt_out.p_extra = malloc( len + sizeof( eos ) );
if( !p_enc->fmt_out.p_extra )
return NULL;
memcpy( p_enc->fmt_out.p_extra, p_block->p_buffer, len );
memcpy( (uint8_t*)p_enc->fmt_out.p_extra + len, eos, sizeof( eos ) );
SetDWBE( (uint8_t*)p_enc->fmt_out.p_extra + len + sizeof(eos) - 4, len );
p_enc->fmt_out.i_extra = len + sizeof( eos );
}
}
if( ReadDiracPictureNumber( &u_pic_num, p_block ) ) {
block_t *p_dts_block = block_FifoGet( p_sys->p_dts_fifo );
p_block->i_dts = p_dts_block->i_dts;
p_block->i_pts = GetPicturePTS( p_enc, u_pic_num );
block_Release( p_dts_block );
block_ChainAppend( &p_output_chain, p_block );
} else {
/* End of sequence */
block_ChainAppend( &p_output_chain, p_block );
}
break;
}
default:
break;
}
} while( b_go );
return p_output_chain;
}
/*****************************************************************************
* CloseEncoder: Schro encoder destruction
*****************************************************************************/
static void CloseEncoder( vlc_object_t *p_this )
{
encoder_t *p_enc = (encoder_t *)p_this;
encoder_sys_t *p_sys = p_enc->p_sys;
/* Free the encoder resources */
if( p_sys->p_schro )
schro_encoder_free( p_sys->p_schro );
free( p_sys->p_format );
if( p_sys->p_dts_fifo )
block_FifoRelease( p_sys->p_dts_fifo );
block_ChainRelease( p_sys->p_chain );
free( p_sys );
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_2396_0 |
crossvul-cpp_data_bad_3862_1 | /*
* Copyright (c) 2016 Intel Corporation.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <tc_util.h>
#include <mqtt_internal.h>
#include <sys/util.h> /* for ARRAY_SIZE */
#include <ztest.h>
#define CLIENTID MQTT_UTF8_LITERAL("zephyr")
#define TOPIC MQTT_UTF8_LITERAL("sensors")
#define WILL_TOPIC MQTT_UTF8_LITERAL("quitting")
#define WILL_MSG MQTT_UTF8_LITERAL("bye")
#define USERNAME MQTT_UTF8_LITERAL("zephyr1")
#define PASSWORD MQTT_UTF8_LITERAL("password")
#define BUFFER_SIZE 128
static ZTEST_DMEM u8_t rx_buffer[BUFFER_SIZE];
static ZTEST_DMEM u8_t tx_buffer[BUFFER_SIZE];
static ZTEST_DMEM struct mqtt_client client;
static ZTEST_DMEM struct mqtt_topic topic_qos_0 = {
.qos = 0,
.topic = TOPIC,
};
static ZTEST_DMEM struct mqtt_topic topic_qos_1 = {
.qos = 1,
.topic = TOPIC,
};
static ZTEST_DMEM struct mqtt_topic topic_qos_2 = {
.qos = 2,
.topic = TOPIC,
};
static ZTEST_DMEM struct mqtt_topic will_topic_qos_0 = {
.qos = 0,
.topic = WILL_TOPIC,
};
static ZTEST_DMEM struct mqtt_topic will_topic_qos_1 = {
.qos = 1,
.topic = WILL_TOPIC,
};
static ZTEST_DMEM struct mqtt_utf8 will_msg = WILL_MSG;
static ZTEST_DMEM struct mqtt_utf8 username = USERNAME;
static ZTEST_DMEM struct mqtt_utf8 password = PASSWORD;
/**
* @brief MQTT test structure
*/
struct mqtt_test {
/* test name, for example: "test connect 1" */
const char *test_name;
/* cast to something like:
* struct mqtt_publish_param *msg_publish =
* (struct mqtt_publish_param *)ctx
*/
void *ctx;
/* pointer to the eval routine, for example:
* eval_fcn = eval_msg_connect
*/
int (*eval_fcn)(struct mqtt_test *);
/* expected result */
u8_t *expected;
/* length of 'expected' */
u16_t expected_len;
};
/**
* @brief eval_msg_connect Evaluate the given mqtt_test against the
* connect packing/unpacking routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_connect(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_publish Evaluate the given mqtt_test against the
* publish packing/unpacking routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_publish(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_subscribe Evaluate the given mqtt_test against the
* subscribe packing/unpacking routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_subscribe(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_suback Evaluate the given mqtt_test against the
* suback packing/unpacking routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_suback(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_pingreq Evaluate the given mqtt_test against the
* pingreq packing/unpacking routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_pingreq(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_puback Evaluate the given mqtt_test against the
* puback routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_puback(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_puback Evaluate the given mqtt_test against the
* pubcomp routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_pubcomp(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_pubrec Evaluate the given mqtt_test against the
* pubrec routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_pubrec(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_pubrel Evaluate the given mqtt_test against the
* pubrel routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_pubrel(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_unsuback Evaluate the given mqtt_test against the
* unsuback routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_unsuback(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_disconnect Evaluate the given mqtt_test against the
* disconnect routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_disconnect(struct mqtt_test *mqtt_test);
/**
* @brief eval_max_pkt_len Evaluate header with maximum allowed packet
* length.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_max_pkt_len(struct mqtt_test *mqtt_test);
/**
* @brief eval_corrupted_pkt_len Evaluate header exceeding maximum
* allowed packet length.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_corrupted_pkt_len(struct mqtt_test *mqtt_test);
/**
* @brief eval_buffers Evaluate if two given buffers are equal
* @param [in] buf Input buffer 1, mostly used as the 'computed'
* buffer
* @param [in] expected Expected buffer
* @param [in] len 'expected' len
* @return TC_PASS on success
* @return TC_FAIL on error and prints both buffers
*/
static int eval_buffers(const struct buf_ctx *buf,
const u8_t *expected, u16_t len);
/**
* @brief print_array Prints the array 'a' of 'size' elements
* @param a The array
* @param size Array's size
*/
static void print_array(const u8_t *a, u16_t size);
/*
* MQTT CONNECT msg:
* Clean session: 1 Client id: [6] 'zephyr' Will flag: 0
* Will QoS: 0 Will retain: 0 Will topic: [0]
* Will msg: [0] Keep alive: 60 User name: [0]
* Password: [0]
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -k 60 -t sensors
*/
static ZTEST_DMEM
u8_t connect1[] = {0x10, 0x12, 0x00, 0x04, 0x4d, 0x51, 0x54, 0x54,
0x04, 0x02, 0x00, 0x3c, 0x00, 0x06, 0x7a, 0x65,
0x70, 0x68, 0x79, 0x72};
static ZTEST_DMEM struct mqtt_client client_connect1 = {
.clean_session = 1, .client_id = CLIENTID,
.will_retain = 0, .will_topic = NULL,
.will_message = NULL, .user_name = NULL,
.password = NULL
};
/*
* MQTT CONNECT msg:
* Clean session: 1 Client id: [6] 'zephyr' Will flag: 1
* Will QoS: 0 Will retain: 0 Will topic: [8] quitting
* Will msg: [3] bye Keep alive: 0
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -k 60 -t sensors --will-topic quitting \
* --will-qos 0 --will-payload bye
*/
static ZTEST_DMEM
u8_t connect2[] = {0x10, 0x21, 0x00, 0x04, 0x4d, 0x51, 0x54, 0x54,
0x04, 0x06, 0x00, 0x3c, 0x00, 0x06, 0x7a, 0x65,
0x70, 0x68, 0x79, 0x72, 0x00, 0x08, 0x71, 0x75,
0x69, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x00, 0x03,
0x62, 0x79, 0x65};
static ZTEST_DMEM struct mqtt_client client_connect2 = {
.clean_session = 1, .client_id = CLIENTID,
.will_retain = 0, .will_topic = &will_topic_qos_0,
.will_message = &will_msg, .user_name = NULL,
.password = NULL
};
/*
* MQTT CONNECT msg:
* Same message as connect3, but set Will retain: 1
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -k 60 -t sensors --will-topic quitting \
* --will-qos 0 --will-payload bye --will-retain
*/
static ZTEST_DMEM
u8_t connect3[] = {0x10, 0x21, 0x00, 0x04, 0x4d, 0x51, 0x54, 0x54,
0x04, 0x26, 0x00, 0x3c, 0x00, 0x06, 0x7a, 0x65,
0x70, 0x68, 0x79, 0x72, 0x00, 0x08, 0x71, 0x75,
0x69, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x00, 0x03,
0x62, 0x79, 0x65};
static ZTEST_DMEM struct mqtt_client client_connect3 = {
.clean_session = 1, .client_id = CLIENTID,
.will_retain = 1, .will_topic = &will_topic_qos_0,
.will_message = &will_msg, .user_name = NULL,
.password = NULL
};
/*
* MQTT CONNECT msg:
* Same message as connect3, but set Will QoS: 1
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -k 60 -t sensors --will-topic quitting \
* --will-qos 1 --will-payload bye
*/
static ZTEST_DMEM
u8_t connect4[] = {0x10, 0x21, 0x00, 0x04, 0x4d, 0x51, 0x54, 0x54,
0x04, 0x0e, 0x00, 0x3c, 0x00, 0x06, 0x7a, 0x65,
0x70, 0x68, 0x79, 0x72, 0x00, 0x08, 0x71, 0x75,
0x69, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x00, 0x03,
0x62, 0x79, 0x65};
static ZTEST_DMEM struct mqtt_client client_connect4 = {
.clean_session = 1, .client_id = CLIENTID,
.will_retain = 0, .will_topic = &will_topic_qos_1,
.will_message = &will_msg, .user_name = NULL,
.password = NULL
};
/*
* MQTT CONNECT msg:
* Same message as connect5, but set Will retain: 1
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -k 60 -t sensors --will-topic quitting \
* --will-qos 1 --will-payload bye --will-retain
*/
static ZTEST_DMEM
u8_t connect5[] = {0x10, 0x21, 0x00, 0x04, 0x4d, 0x51, 0x54, 0x54,
0x04, 0x2e, 0x00, 0x3c, 0x00, 0x06, 0x7a, 0x65,
0x70, 0x68, 0x79, 0x72, 0x00, 0x08, 0x71, 0x75,
0x69, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x00, 0x03,
0x62, 0x79, 0x65};
static ZTEST_DMEM struct mqtt_client client_connect5 = {
.clean_session = 1, .client_id = CLIENTID,
.will_retain = 1, .will_topic = &will_topic_qos_1,
.will_message = &will_msg, .user_name = NULL,
.password = NULL
};
/*
* MQTT CONNECT msg:
* Same message as connect6, but set username: zephyr1 and password: password
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -k 60 -t sensors --will-topic quitting \
* --will-qos 1 --will-payload bye --will-retain -u zephyr1 -P password
*/
static ZTEST_DMEM
u8_t connect6[] = {0x10, 0x34, 0x00, 0x04, 0x4d, 0x51, 0x54, 0x54,
0x04, 0xee, 0x00, 0x3c, 0x00, 0x06, 0x7a, 0x65,
0x70, 0x68, 0x79, 0x72, 0x00, 0x08, 0x71, 0x75,
0x69, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x00, 0x03,
0x62, 0x79, 0x65, 0x00, 0x07, 0x7a, 0x65, 0x70,
0x68, 0x79, 0x72, 0x31, 0x00, 0x08, 0x70, 0x61,
0x73, 0x73, 0x77, 0x6f, 0x72, 0x64};
static ZTEST_DMEM struct mqtt_client client_connect6 = {
.clean_session = 1, .client_id = CLIENTID,
.will_retain = 1, .will_topic = &will_topic_qos_1,
.will_message = &will_msg, .user_name = &username,
.password = &password
};
static ZTEST_DMEM
u8_t disconnect1[] = {0xe0, 0x00};
/*
* MQTT PUBLISH msg:
* DUP: 0, QoS: 0, Retain: 0, topic: sensors, message: OK
*
* Message can be generated by the following command:
* mosquitto_pub -V mqttv311 -i zephyr -t sensors -q 0 -m "OK"
*/
static ZTEST_DMEM
u8_t publish1[] = {0x30, 0x0b, 0x00, 0x07, 0x73, 0x65, 0x6e, 0x73,
0x6f, 0x72, 0x73, 0x4f, 0x4b};
static ZTEST_DMEM struct mqtt_publish_param msg_publish1 = {
.dup_flag = 0, .retain_flag = 0, .message_id = 0,
.message.topic.qos = 0,
.message.topic.topic = TOPIC,
.message.payload.data = (u8_t *)"OK",
.message.payload.len = 2,
};
/*
* MQTT PUBLISH msg:
* DUP: 0, QoS: 0, Retain: 1, topic: sensors, message: OK
*
* Message can be generated by the following command:
* mosquitto_pub -V mqttv311 -i zephyr -t sensors -q 0 -m "OK" -r
*/
static ZTEST_DMEM
u8_t publish2[] = {0x31, 0x0b, 0x00, 0x07, 0x73, 0x65, 0x6e, 0x73,
0x6f, 0x72, 0x73, 0x4f, 0x4b};
static ZTEST_DMEM struct mqtt_publish_param msg_publish2 = {
.dup_flag = 0, .retain_flag = 1, .message_id = 0,
.message.topic.qos = 0,
.message.topic.topic = TOPIC,
.message.payload.data = (u8_t *)"OK",
.message.payload.len = 2,
};
/*
* MQTT PUBLISH msg:
* DUP: 0, QoS: 1, Retain: 1, topic: sensors, message: OK, pkt_id: 1
*
* Message can be generated by the following command:
* mosquitto_pub -V mqttv311 -i zephyr -t sensors -q 1 -m "OK" -r
*/
static ZTEST_DMEM
u8_t publish3[] = {0x33, 0x0d, 0x00, 0x07, 0x73, 0x65, 0x6e, 0x73,
0x6f, 0x72, 0x73, 0x00, 0x01, 0x4f, 0x4b};
static ZTEST_DMEM struct mqtt_publish_param msg_publish3 = {
.dup_flag = 0, .retain_flag = 1, .message_id = 1,
.message.topic.qos = 1,
.message.topic.topic = TOPIC,
.message.payload.data = (u8_t *)"OK",
.message.payload.len = 2,
};
/*
* MQTT PUBLISH msg:
* DUP: 0, QoS: 2, Retain: 0, topic: sensors, message: OK, pkt_id: 1
*
* Message can be generated by the following command:
* mosquitto_pub -V mqttv311 -i zephyr -t sensors -q 2 -m "OK"
*/
static ZTEST_DMEM
u8_t publish4[] = {0x34, 0x0d, 0x00, 0x07, 0x73, 0x65, 0x6e, 0x73,
0x6f, 0x72, 0x73, 0x00, 0x01, 0x4f, 0x4b};
static ZTEST_DMEM struct mqtt_publish_param msg_publish4 = {
.dup_flag = 0, .retain_flag = 0, .message_id = 1,
.message.topic.qos = 2,
.message.topic.topic = TOPIC,
.message.payload.data = (u8_t *)"OK",
.message.payload.len = 2,
};
/*
* MQTT SUBSCRIBE msg:
* pkt_id: 1, topic: sensors, qos: 0
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -t sensors -q 0
*/
static ZTEST_DMEM
u8_t subscribe1[] = {0x82, 0x0c, 0x00, 0x01, 0x00, 0x07, 0x73, 0x65,
0x6e, 0x73, 0x6f, 0x72, 0x73, 0x00};
static ZTEST_DMEM struct mqtt_subscription_list msg_subscribe1 = {
.message_id = 1, .list_count = 1, .list = &topic_qos_0
};
/*
* MQTT SUBSCRIBE msg:
* pkt_id: 1, topic: sensors, qos: 1
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -t sensors -q 1
*/
static ZTEST_DMEM
u8_t subscribe2[] = {0x82, 0x0c, 0x00, 0x01, 0x00, 0x07, 0x73, 0x65,
0x6e, 0x73, 0x6f, 0x72, 0x73, 0x01};
static ZTEST_DMEM struct mqtt_subscription_list msg_subscribe2 = {
.message_id = 1, .list_count = 1, .list = &topic_qos_1
};
/*
* MQTT SUBSCRIBE msg:
* pkt_id: 1, topic: sensors, qos: 2
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -t sensors -q 2
*/
static ZTEST_DMEM
u8_t subscribe3[] = {0x82, 0x0c, 0x00, 0x01, 0x00, 0x07, 0x73, 0x65,
0x6e, 0x73, 0x6f, 0x72, 0x73, 0x02};
static ZTEST_DMEM struct mqtt_subscription_list msg_subscribe3 = {
.message_id = 1, .list_count = 1, .list = &topic_qos_2
};
/*
* MQTT SUBACK msg
* pkt_id: 1, qos: 0
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -t sensors -q 0
*/
static ZTEST_DMEM
u8_t suback1[] = {0x90, 0x03, 0x00, 0x01, 0x00};
static ZTEST_DMEM u8_t data_suback1[] = { MQTT_SUBACK_SUCCESS_QoS_0 };
static ZTEST_DMEM struct mqtt_suback_param msg_suback1 = {
.message_id = 1, .return_codes.len = 1,
.return_codes.data = data_suback1
};
/*
* MQTT SUBACK message
* pkt_id: 1, qos: 1
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -t sensors -q 1
*/
static ZTEST_DMEM
u8_t suback2[] = {0x90, 0x03, 0x00, 0x01, 0x01};
static ZTEST_DMEM u8_t data_suback2[] = { MQTT_SUBACK_SUCCESS_QoS_1 };
static ZTEST_DMEM struct mqtt_suback_param msg_suback2 = {
.message_id = 1, .return_codes.len = 1,
.return_codes.data = data_suback2
};
/*
* MQTT SUBACK message
* pkt_id: 1, qos: 2
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -t sensors -q 2
*/
static ZTEST_DMEM
u8_t suback3[] = {0x90, 0x03, 0x00, 0x01, 0x02};
static ZTEST_DMEM u8_t data_suback3[] = { MQTT_SUBACK_SUCCESS_QoS_2 };
static ZTEST_DMEM struct mqtt_suback_param msg_suback3 = {
.message_id = 1, .return_codes.len = 1,
.return_codes.data = data_suback3
};
static ZTEST_DMEM
u8_t pingreq1[] = {0xc0, 0x00};
static ZTEST_DMEM
u8_t puback1[] = {0x40, 0x02, 0x00, 0x01};
static ZTEST_DMEM struct mqtt_puback_param msg_puback1 = {.message_id = 1};
static ZTEST_DMEM
u8_t pubrec1[] = {0x50, 0x02, 0x00, 0x01};
static ZTEST_DMEM struct mqtt_pubrec_param msg_pubrec1 = {.message_id = 1};
static ZTEST_DMEM
u8_t pubrel1[] = {0x62, 0x02, 0x00, 0x01};
static ZTEST_DMEM struct mqtt_pubrel_param msg_pubrel1 = {.message_id = 1};
static ZTEST_DMEM
u8_t pubcomp1[] = {0x70, 0x02, 0x00, 0x01};
static ZTEST_DMEM struct mqtt_pubcomp_param msg_pubcomp1 = {.message_id = 1};
static ZTEST_DMEM
u8_t unsuback1[] = {0xb0, 0x02, 0x00, 0x01};
static ZTEST_DMEM struct mqtt_unsuback_param msg_unsuback1 = {.message_id = 1};
static ZTEST_DMEM
u8_t max_pkt_len[] = {0x30, 0xff, 0xff, 0xff, 0x7f};
static ZTEST_DMEM struct buf_ctx max_pkt_len_buf = {
.cur = max_pkt_len, .end = max_pkt_len + sizeof(max_pkt_len)
};
static ZTEST_DMEM
u8_t corrupted_pkt_len[] = {0x30, 0xff, 0xff, 0xff, 0xff, 0x01};
static ZTEST_DMEM struct buf_ctx corrupted_pkt_len_buf = {
.cur = corrupted_pkt_len,
.end = corrupted_pkt_len + sizeof(corrupted_pkt_len)
};
static ZTEST_DMEM
struct mqtt_test mqtt_tests[] = {
{.test_name = "CONNECT, new session, zeros",
.ctx = &client_connect1, .eval_fcn = eval_msg_connect,
.expected = connect1, .expected_len = sizeof(connect1)},
{.test_name = "CONNECT, new session, will",
.ctx = &client_connect2, .eval_fcn = eval_msg_connect,
.expected = connect2, .expected_len = sizeof(connect2)},
{.test_name = "CONNECT, new session, will retain",
.ctx = &client_connect3, .eval_fcn = eval_msg_connect,
.expected = connect3, .expected_len = sizeof(connect3)},
{.test_name = "CONNECT, new session, will qos = 1",
.ctx = &client_connect4, .eval_fcn = eval_msg_connect,
.expected = connect4, .expected_len = sizeof(connect4)},
{.test_name = "CONNECT, new session, will qos = 1, will retain",
.ctx = &client_connect5, .eval_fcn = eval_msg_connect,
.expected = connect5, .expected_len = sizeof(connect5)},
{.test_name = "CONNECT, new session, username and password",
.ctx = &client_connect6, .eval_fcn = eval_msg_connect,
.expected = connect6, .expected_len = sizeof(connect6)},
{.test_name = "DISCONNECT",
.ctx = NULL, .eval_fcn = eval_msg_disconnect,
.expected = disconnect1, .expected_len = sizeof(disconnect1)},
{.test_name = "PUBLISH, qos = 0",
.ctx = &msg_publish1, .eval_fcn = eval_msg_publish,
.expected = publish1, .expected_len = sizeof(publish1)},
{.test_name = "PUBLISH, retain = 1",
.ctx = &msg_publish2, .eval_fcn = eval_msg_publish,
.expected = publish2, .expected_len = sizeof(publish2)},
{.test_name = "PUBLISH, retain = 1, qos = 1",
.ctx = &msg_publish3, .eval_fcn = eval_msg_publish,
.expected = publish3, .expected_len = sizeof(publish3)},
{.test_name = "PUBLISH, qos = 2",
.ctx = &msg_publish4, .eval_fcn = eval_msg_publish,
.expected = publish4, .expected_len = sizeof(publish4)},
{.test_name = "SUBSCRIBE, one topic, qos = 0",
.ctx = &msg_subscribe1, .eval_fcn = eval_msg_subscribe,
.expected = subscribe1, .expected_len = sizeof(subscribe1)},
{.test_name = "SUBSCRIBE, one topic, qos = 1",
.ctx = &msg_subscribe2, .eval_fcn = eval_msg_subscribe,
.expected = subscribe2, .expected_len = sizeof(subscribe2)},
{.test_name = "SUBSCRIBE, one topic, qos = 2",
.ctx = &msg_subscribe3, .eval_fcn = eval_msg_subscribe,
.expected = subscribe3, .expected_len = sizeof(subscribe3)},
{.test_name = "SUBACK, one topic, qos = 0",
.ctx = &msg_suback1, .eval_fcn = eval_msg_suback,
.expected = suback1, .expected_len = sizeof(suback1)},
{.test_name = "SUBACK, one topic, qos = 1",
.ctx = &msg_suback2, .eval_fcn = eval_msg_suback,
.expected = suback2, .expected_len = sizeof(suback2)},
{.test_name = "SUBACK, one topic, qos = 2",
.ctx = &msg_suback3, .eval_fcn = eval_msg_suback,
.expected = suback3, .expected_len = sizeof(suback3)},
{.test_name = "PINGREQ",
.ctx = NULL, .eval_fcn = eval_msg_pingreq,
.expected = pingreq1, .expected_len = sizeof(pingreq1)},
{.test_name = "PUBACK",
.ctx = &msg_puback1, .eval_fcn = eval_msg_puback,
.expected = puback1, .expected_len = sizeof(puback1)},
{.test_name = "PUBREC",
.ctx = &msg_pubrec1, .eval_fcn = eval_msg_pubrec,
.expected = pubrec1, .expected_len = sizeof(pubrec1)},
{.test_name = "PUBREL",
.ctx = &msg_pubrel1, .eval_fcn = eval_msg_pubrel,
.expected = pubrel1, .expected_len = sizeof(pubrel1)},
{.test_name = "PUBCOMP",
.ctx = &msg_pubcomp1, .eval_fcn = eval_msg_pubcomp,
.expected = pubcomp1, .expected_len = sizeof(pubcomp1)},
{.test_name = "UNSUBACK",
.ctx = &msg_unsuback1, .eval_fcn = eval_msg_unsuback,
.expected = unsuback1, .expected_len = sizeof(unsuback1)},
{.test_name = "Maximum packet length",
.ctx = &max_pkt_len_buf, .eval_fcn = eval_max_pkt_len},
{.test_name = "Corrupted packet length",
.ctx = &corrupted_pkt_len_buf, .eval_fcn = eval_corrupted_pkt_len},
/* last test case, do not remove it */
{.test_name = NULL}
};
static void print_array(const u8_t *a, u16_t size)
{
u16_t i;
TC_PRINT("\n");
for (i = 0U; i < size; i++) {
TC_PRINT("%x ", a[i]);
if ((i+1) % 8 == 0U) {
TC_PRINT("\n");
}
}
TC_PRINT("\n");
}
static
int eval_buffers(const struct buf_ctx *buf, const u8_t *expected, u16_t len)
{
if (buf->end - buf->cur != len) {
goto exit_eval;
}
if (memcmp(expected, buf->cur, buf->end - buf->cur) != 0) {
goto exit_eval;
}
return TC_PASS;
exit_eval:
TC_PRINT("FAIL\n");
TC_PRINT("Computed:");
print_array(buf->cur, buf->end - buf->cur);
TC_PRINT("Expected:");
print_array(expected, len);
return TC_FAIL;
}
static int eval_msg_connect(struct mqtt_test *mqtt_test)
{
struct mqtt_client *test_client;
int rc;
struct buf_ctx buf;
test_client = (struct mqtt_client *)mqtt_test->ctx;
client.clean_session = test_client->clean_session;
client.client_id = test_client->client_id;
client.will_topic = test_client->will_topic;
client.will_retain = test_client->will_retain;
client.will_message = test_client->will_message;
client.user_name = test_client->user_name;
client.password = test_client->password;
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = connect_request_encode(&client, &buf);
/**TESTPOINTS: Check connect_request_encode functions*/
zassert_false(rc, "connect_request_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
return TC_PASS;
}
static int eval_msg_disconnect(struct mqtt_test *mqtt_test)
{
int rc;
struct buf_ctx buf;
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = disconnect_encode(&buf);
/**TESTPOINTS: Check disconnect_encode functions*/
zassert_false(rc, "disconnect_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
return TC_PASS;
}
static int eval_msg_publish(struct mqtt_test *mqtt_test)
{
struct mqtt_publish_param *param =
(struct mqtt_publish_param *)mqtt_test->ctx;
struct mqtt_publish_param dec_param;
int rc;
u8_t type_and_flags;
u32_t length;
struct buf_ctx buf;
memset(&dec_param, 0, sizeof(dec_param));
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = publish_encode(param, &buf);
/* Payload is not copied, copy it manually just after the header.*/
memcpy(buf.end, param->message.payload.data,
param->message.payload.len);
buf.end += param->message.payload.len;
/**TESTPOINT: Check publish_encode function*/
zassert_false(rc, "publish_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
rc = fixed_header_decode(&buf, &type_and_flags, &length);
zassert_false(rc, "fixed_header_decode failed");
rc = publish_decode(type_and_flags, length, &buf, &dec_param);
/**TESTPOINT: Check publish_decode function*/
zassert_false(rc, "publish_decode failed");
zassert_equal(dec_param.message_id, param->message_id,
"message_id error");
zassert_equal(dec_param.dup_flag, param->dup_flag,
"dup flag error");
zassert_equal(dec_param.retain_flag, param->retain_flag,
"retain flag error");
zassert_equal(dec_param.message.topic.qos, param->message.topic.qos,
"topic qos error");
zassert_equal(dec_param.message.topic.topic.size,
param->message.topic.topic.size,
"topic len error");
if (memcmp(dec_param.message.topic.topic.utf8,
param->message.topic.topic.utf8,
dec_param.message.topic.topic.size) != 0) {
zassert_unreachable("topic content error");
}
zassert_equal(dec_param.message.payload.len,
param->message.payload.len,
"payload len error");
return TC_PASS;
}
static int eval_msg_subscribe(struct mqtt_test *mqtt_test)
{
struct mqtt_subscription_list *param =
(struct mqtt_subscription_list *)mqtt_test->ctx;
int rc;
struct buf_ctx buf;
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = subscribe_encode(param, &buf);
/**TESTPOINT: Check subscribe_encode function*/
zassert_false(rc, "subscribe_encode failed");
return eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
}
static int eval_msg_suback(struct mqtt_test *mqtt_test)
{
struct mqtt_suback_param *param =
(struct mqtt_suback_param *)mqtt_test->ctx;
struct mqtt_suback_param dec_param;
int rc;
u8_t type_and_flags;
u32_t length;
struct buf_ctx buf;
buf.cur = mqtt_test->expected;
buf.end = mqtt_test->expected + mqtt_test->expected_len;
memset(&dec_param, 0, sizeof(dec_param));
rc = fixed_header_decode(&buf, &type_and_flags, &length);
zassert_false(rc, "fixed_header_decode failed");
rc = subscribe_ack_decode(&buf, &dec_param);
/**TESTPOINT: Check subscribe_ack_decode function*/
zassert_false(rc, "subscribe_ack_decode failed");
zassert_equal(dec_param.message_id, param->message_id,
"packet identifier error");
zassert_equal(dec_param.return_codes.len,
param->return_codes.len,
"topic count error");
if (memcmp(dec_param.return_codes.data, param->return_codes.data,
dec_param.return_codes.len) != 0) {
zassert_unreachable("subscribe result error");
}
return TC_PASS;
}
static int eval_msg_pingreq(struct mqtt_test *mqtt_test)
{
int rc;
struct buf_ctx buf;
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = ping_request_encode(&buf);
/**TESTPOINTS: Check eval_msg_pingreq functions*/
zassert_false(rc, "ping_request_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
return TC_PASS;
}
static int eval_msg_puback(struct mqtt_test *mqtt_test)
{
struct mqtt_puback_param *param =
(struct mqtt_puback_param *)mqtt_test->ctx;
struct mqtt_puback_param dec_param;
int rc;
u8_t type_and_flags;
u32_t length;
struct buf_ctx buf;
memset(&dec_param, 0, sizeof(dec_param));
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = publish_ack_encode(param, &buf);
/**TESTPOINTS: Check publish_ack_encode functions*/
zassert_false(rc, "publish_ack_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
rc = fixed_header_decode(&buf, &type_and_flags, &length);
zassert_false(rc, "fixed_header_decode failed");
rc = publish_ack_decode(&buf, &dec_param);
zassert_false(rc, "publish_ack_decode failed");
zassert_equal(dec_param.message_id, param->message_id,
"packet identifier error");
return TC_PASS;
}
static int eval_msg_pubcomp(struct mqtt_test *mqtt_test)
{
struct mqtt_pubcomp_param *param =
(struct mqtt_pubcomp_param *)mqtt_test->ctx;
struct mqtt_pubcomp_param dec_param;
int rc;
u32_t length;
u8_t type_and_flags;
struct buf_ctx buf;
memset(&dec_param, 0, sizeof(dec_param));
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = publish_complete_encode(param, &buf);
/**TESTPOINTS: Check publish_complete_encode functions*/
zassert_false(rc, "publish_complete_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
rc = fixed_header_decode(&buf, &type_and_flags, &length);
zassert_false(rc, "fixed_header_decode failed");
rc = publish_complete_decode(&buf, &dec_param);
zassert_false(rc, "publish_complete_decode failed");
zassert_equal(dec_param.message_id, param->message_id,
"packet identifier error");
return TC_PASS;
}
static int eval_msg_pubrec(struct mqtt_test *mqtt_test)
{
struct mqtt_pubrec_param *param =
(struct mqtt_pubrec_param *)mqtt_test->ctx;
struct mqtt_pubrec_param dec_param;
int rc;
u32_t length;
u8_t type_and_flags;
struct buf_ctx buf;
memset(&dec_param, 0, sizeof(dec_param));
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = publish_receive_encode(param, &buf);
/**TESTPOINTS: Check publish_receive_encode functions*/
zassert_false(rc, "publish_receive_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
rc = fixed_header_decode(&buf, &type_and_flags, &length);
zassert_false(rc, "fixed_header_decode failed");
rc = publish_receive_decode(&buf, &dec_param);
zassert_false(rc, "publish_receive_decode failed");
zassert_equal(dec_param.message_id, param->message_id,
"packet identifier error");
return TC_PASS;
}
static int eval_msg_pubrel(struct mqtt_test *mqtt_test)
{
struct mqtt_pubrel_param *param =
(struct mqtt_pubrel_param *)mqtt_test->ctx;
struct mqtt_pubrel_param dec_param;
int rc;
u32_t length;
u8_t type_and_flags;
struct buf_ctx buf;
memset(&dec_param, 0, sizeof(dec_param));
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = publish_release_encode(param, &buf);
/**TESTPOINTS: Check publish_release_encode functions*/
zassert_false(rc, "publish_release_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
rc = fixed_header_decode(&buf, &type_and_flags, &length);
zassert_false(rc, "fixed_header_decode failed");
rc = publish_release_decode(&buf, &dec_param);
zassert_false(rc, "publish_release_decode failed");
zassert_equal(dec_param.message_id, param->message_id,
"packet identifier error");
return TC_PASS;
}
static int eval_msg_unsuback(struct mqtt_test *mqtt_test)
{
struct mqtt_unsuback_param *param =
(struct mqtt_unsuback_param *)mqtt_test->ctx;
struct mqtt_unsuback_param dec_param;
int rc;
u32_t length;
u8_t type_and_flags;
struct buf_ctx buf;
memset(&dec_param, 0, sizeof(dec_param));
buf.cur = mqtt_test->expected;
buf.end = mqtt_test->expected + mqtt_test->expected_len;
rc = fixed_header_decode(&buf, &type_and_flags, &length);
zassert_false(rc, "fixed_header_decode failed");
rc = unsubscribe_ack_decode(&buf, &dec_param);
zassert_false(rc, "unsubscribe_ack_decode failed");
zassert_equal(dec_param.message_id, param->message_id,
"packet identifier error");
return TC_PASS;
}
static int eval_max_pkt_len(struct mqtt_test *mqtt_test)
{
struct buf_ctx *buf = (struct buf_ctx *)mqtt_test->ctx;
int rc;
u8_t flags;
u32_t length;
rc = fixed_header_decode(buf, &flags, &length);
zassert_equal(rc, 0, "fixed_header_decode failed");
zassert_equal(length, MQTT_MAX_PAYLOAD_SIZE,
"Invalid packet length decoded");
return TC_PASS;
}
static int eval_corrupted_pkt_len(struct mqtt_test *mqtt_test)
{
struct buf_ctx *buf = (struct buf_ctx *)mqtt_test->ctx;
int rc;
u8_t flags;
u32_t length;
rc = fixed_header_decode(buf, &flags, &length);
zassert_equal(rc, -EINVAL, "fixed_header_decode should fail");
return TC_PASS;
}
void test_mqtt_packet(void)
{
TC_START("MQTT Library test");
int rc;
int i;
mqtt_client_init(&client);
client.protocol_version = MQTT_VERSION_3_1_1;
client.rx_buf = rx_buffer;
client.rx_buf_size = sizeof(rx_buffer);
client.tx_buf = tx_buffer;
client.tx_buf_size = sizeof(tx_buffer);
i = 0;
do {
struct mqtt_test *test = &mqtt_tests[i];
if (test->test_name == NULL) {
break;
}
rc = test->eval_fcn(test);
TC_PRINT("[%s] %d - %s\n", TC_RESULT_TO_STR(rc), i + 1,
test->test_name);
/**TESTPOINT: Check eval_fcn*/
zassert_false(rc, "mqtt_packet test error");
i++;
} while (1);
mqtt_abort(&client);
}
void test_main(void)
{
ztest_test_suite(test_mqtt_packet_fn,
ztest_user_unit_test(test_mqtt_packet));
ztest_run_test_suite(test_mqtt_packet_fn);
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_3862_1 |
crossvul-cpp_data_bad_4587_3 | /*
* The Python Imaging Library.
* $Id$
*
* decoder for Sgi RLE data.
*
* history:
* 2017-07-28 mb fixed for images larger than 64KB
* 2017-07-20 mb created
*
* Copyright (c) Mickael Bonfill 2017.
*
* See the README file for information on usage and redistribution.
*/
#include "Imaging.h"
#include "Sgi.h"
#define SGI_HEADER_SIZE 512
#define RLE_COPY_FLAG 0x80
#define RLE_MAX_RUN 0x7f
static void read4B(UINT32* dest, UINT8* buf)
{
*dest = (UINT32)((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]);
}
static int expandrow(UINT8* dest, UINT8* src, int n, int z)
{
UINT8 pixel, count;
for (;n > 0; n--)
{
pixel = *src++;
if (n == 1 && pixel != 0)
return n;
count = pixel & RLE_MAX_RUN;
if (!count)
return count;
if (pixel & RLE_COPY_FLAG) {
while(count--) {
*dest = *src++;
dest += z;
}
}
else {
pixel = *src++;
while (count--) {
*dest = pixel;
dest += z;
}
}
}
return 0;
}
static int expandrow2(UINT8* dest, const UINT8* src, int n, int z)
{
UINT8 pixel, count;
for (;n > 0; n--)
{
pixel = src[1];
src+=2;
if (n == 1 && pixel != 0)
return n;
count = pixel & RLE_MAX_RUN;
if (!count)
return count;
if (pixel & RLE_COPY_FLAG) {
while(count--) {
memcpy(dest, src, 2);
src += 2;
dest += z * 2;
}
}
else {
while (count--) {
memcpy(dest, src, 2);
dest += z * 2;
}
src+=2;
}
}
return 0;
}
int
ImagingSgiRleDecode(Imaging im, ImagingCodecState state,
UINT8* buf, Py_ssize_t bytes)
{
UINT8 *ptr;
SGISTATE *c;
int err = 0;
/* Get all data from File descriptor */
c = (SGISTATE*)state->context;
_imaging_seek_pyFd(state->fd, 0L, SEEK_END);
c->bufsize = _imaging_tell_pyFd(state->fd);
c->bufsize -= SGI_HEADER_SIZE;
ptr = malloc(sizeof(UINT8) * c->bufsize);
if (!ptr) {
return IMAGING_CODEC_MEMORY;
}
_imaging_seek_pyFd(state->fd, SGI_HEADER_SIZE, SEEK_SET);
_imaging_read_pyFd(state->fd, (char*)ptr, c->bufsize);
/* decoder initialization */
state->count = 0;
state->y = 0;
if (state->ystep < 0) {
state->y = im->ysize - 1;
} else {
state->ystep = 1;
}
if (im->xsize > INT_MAX / im->bands ||
im->ysize > INT_MAX / im->bands) {
err = IMAGING_CODEC_MEMORY;
goto sgi_finish_decode;
}
/* Allocate memory for RLE tables and rows */
free(state->buffer);
state->buffer = NULL;
/* malloc overflow check above */
state->buffer = calloc(im->xsize * im->bands, sizeof(UINT8) * 2);
c->tablen = im->bands * im->ysize;
c->starttab = calloc(c->tablen, sizeof(UINT32));
c->lengthtab = calloc(c->tablen, sizeof(UINT32));
if (!state->buffer ||
!c->starttab ||
!c->lengthtab) {
err = IMAGING_CODEC_MEMORY;
goto sgi_finish_decode;
}
/* populate offsets table */
for (c->tabindex = 0, c->bufindex = 0; c->tabindex < c->tablen; c->tabindex++, c->bufindex+=4)
read4B(&c->starttab[c->tabindex], &ptr[c->bufindex]);
/* populate lengths table */
for (c->tabindex = 0, c->bufindex = c->tablen * sizeof(UINT32); c->tabindex < c->tablen; c->tabindex++, c->bufindex+=4)
read4B(&c->lengthtab[c->tabindex], &ptr[c->bufindex]);
state->count += c->tablen * sizeof(UINT32) * 2;
/* read compressed rows */
for (c->rowno = 0; c->rowno < im->ysize; c->rowno++, state->y += state->ystep)
{
for (c->channo = 0; c->channo < im->bands; c->channo++)
{
c->rleoffset = c->starttab[c->rowno + c->channo * im->ysize];
c->rlelength = c->lengthtab[c->rowno + c->channo * im->ysize];
c->rleoffset -= SGI_HEADER_SIZE;
if (c->rleoffset + c->rlelength > c->bufsize) {
state->errcode = IMAGING_CODEC_OVERRUN;
return -1;
}
/* row decompression */
if (c->bpc ==1) {
if(expandrow(&state->buffer[c->channo], &ptr[c->rleoffset], c->rlelength, im->bands))
goto sgi_finish_decode;
}
else {
if(expandrow2(&state->buffer[c->channo * 2], &ptr[c->rleoffset], c->rlelength, im->bands))
goto sgi_finish_decode;
}
state->count += c->rlelength;
}
/* store decompressed data in image */
state->shuffle((UINT8*)im->image[state->y], state->buffer, im->xsize);
}
c->bufsize++;
sgi_finish_decode: ;
free(c->starttab);
free(c->lengthtab);
free(ptr);
if (err != 0){
return err;
}
return state->count - c->bufsize;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_4587_3 |
crossvul-cpp_data_bad_4403_1 | /* Generated by Cython 0.29.17 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [],
"name": "clickhouse_driver.bufferedreader",
"sources": [
"clickhouse_driver/bufferedreader.pyx"
]
},
"module_name": "clickhouse_driver.bufferedreader"
}
END: Cython Metadata */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_17"
#define CYTHON_HEX_VERSION 0x001D11F0
#define CYTHON_FUTURE_DIVISION 1
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__clickhouse_driver__bufferedreader
#define __PYX_HAVE_API__clickhouse_driver__bufferedreader
/* Early includes */
#include <string.h>
#include <stdio.h>
#include "pythread.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"clickhouse_driver/bufferedreader.pyx",
"stringsource",
"type.pxd",
"bool.pxd",
"complex.pxd",
};
/*--- Type declarations ---*/
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader;
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader;
struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader;
/* "clickhouse_driver/bufferedreader.pyx":10
*
*
* cdef class BufferedReader(object): # <<<<<<<<<<<<<<
* cdef public Py_ssize_t position, current_buffer_size
* cdef public bytearray buffer
*/
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader {
PyObject_HEAD
Py_ssize_t position;
Py_ssize_t current_buffer_size;
PyObject *buffer;
};
/* "clickhouse_driver/bufferedreader.pyx":180
*
*
* cdef class BufferedSocketReader(BufferedReader): # <<<<<<<<<<<<<<
* cdef object sock
*
*/
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader __pyx_base;
PyObject *sock;
};
/* "clickhouse_driver/bufferedreader.pyx":194
*
*
* cdef class CompressedBufferedReader(BufferedReader): # <<<<<<<<<<<<<<
* cdef object read_block
*
*/
struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader __pyx_base;
PyObject *read_block;
};
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* PyObjectCallNoArg.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
#endif
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* GetItemIntByteArray.proto */
#define __Pyx_GetItemInt_ByteArray(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_ByteArray_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "bytearray index out of range"), -1))
static CYTHON_INLINE int __Pyx_GetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i,
int wraparound, int boundscheck);
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* IncludeStringH.proto */
#include <string.h>
/* decode_c_string_utf16.proto */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* HasAttr.proto */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
/* CallNextTpTraverse.proto */
static int __Pyx_call_next_tp_traverse(PyObject* obj, visitproc v, void *a, traverseproc current_tp_traverse);
/* CallNextTpClear.proto */
static void __Pyx_call_next_tp_clear(PyObject* obj, inquiry current_tp_dealloc);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* PyObjectGetAttrStrNoError.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* TypeImport.proto */
#ifndef __PYX_HAVE_RT_ImportType_proto
#define __PYX_HAVE_RT_ImportType_proto
enum __Pyx_ImportType_CheckSize {
__Pyx_ImportType_CheckSize_Error = 0,
__Pyx_ImportType_CheckSize_Warn = 1,
__Pyx_ImportType_CheckSize_Ignore = 2
};
static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
#endif
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_char(unsigned char value);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'cpython.version' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'cpython.exc' */
/* Module declarations from 'cpython.module' */
/* Module declarations from 'cpython.mem' */
/* Module declarations from 'cpython.tuple' */
/* Module declarations from 'cpython.list' */
/* Module declarations from 'cpython.sequence' */
/* Module declarations from 'cpython.mapping' */
/* Module declarations from 'cpython.iterator' */
/* Module declarations from 'cpython.number' */
/* Module declarations from 'cpython.int' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.bool' */
static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0;
/* Module declarations from 'cpython.long' */
/* Module declarations from 'cpython.float' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.complex' */
static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0;
/* Module declarations from 'cpython.string' */
/* Module declarations from 'cpython.unicode' */
/* Module declarations from 'cpython.dict' */
/* Module declarations from 'cpython.instance' */
/* Module declarations from 'cpython.function' */
/* Module declarations from 'cpython.method' */
/* Module declarations from 'cpython.weakref' */
/* Module declarations from 'cpython.getargs' */
/* Module declarations from 'cpython.pythread' */
/* Module declarations from 'cpython.pystate' */
/* Module declarations from 'cpython.cobject' */
/* Module declarations from 'cpython.oldbuffer' */
/* Module declarations from 'cpython.set' */
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'cpython.bytes' */
/* Module declarations from 'cpython.pycapsule' */
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.bytearray' */
/* Module declarations from 'clickhouse_driver.bufferedreader' */
static PyTypeObject *__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader = 0;
static PyTypeObject *__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedSocketReader = 0;
static PyTypeObject *__pyx_ptype_17clickhouse_driver_14bufferedreader_CompressedBufferedReader = 0;
static PyObject *__pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedReader__set_state(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *, PyObject *); /*proto*/
static PyObject *__pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedSocketReader__set_state(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *, PyObject *); /*proto*/
static PyObject *__pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_CompressedBufferedReader__set_state(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *, PyObject *); /*proto*/
#define __Pyx_MODULE_NAME "clickhouse_driver.bufferedreader"
extern int __pyx_module_is_main_clickhouse_driver__bufferedreader;
int __pyx_module_is_main_clickhouse_driver__bufferedreader = 0;
/* Implementation of 'clickhouse_driver.bufferedreader' */
static PyObject *__pyx_builtin_super;
static PyObject *__pyx_builtin_NotImplementedError;
static PyObject *__pyx_builtin_object;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_UnicodeDecodeError;
static PyObject *__pyx_builtin_EOFError;
static const char __pyx_k_new[] = "__new__";
static const char __pyx_k_dict[] = "__dict__";
static const char __pyx_k_init[] = "__init__";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_name[] = "__name__";
static const char __pyx_k_sock[] = "sock";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_super[] = "super";
static const char __pyx_k_utf_8[] = "utf-8";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_object[] = "object";
static const char __pyx_k_pickle[] = "pickle";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_bufsize[] = "bufsize";
static const char __pyx_k_n_items[] = "n_items";
static const char __pyx_k_EOFError[] = "EOFError";
static const char __pyx_k_encoding[] = "encoding";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_pyx_type[] = "__pyx_type";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_pyx_state[] = "__pyx_state";
static const char __pyx_k_recv_into[] = "recv_into";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_pyx_result[] = "__pyx_result";
static const char __pyx_k_read_block[] = "read_block";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_PickleError[] = "PickleError";
static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_BufferedReader[] = "BufferedReader";
static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_read_into_buffer[] = "read_into_buffer";
static const char __pyx_k_UnicodeDecodeError[] = "UnicodeDecodeError";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_NotImplementedError[] = "NotImplementedError";
static const char __pyx_k_BufferedSocketReader[] = "BufferedSocketReader";
static const char __pyx_k_CompressedBufferedReader[] = "CompressedBufferedReader";
static const char __pyx_k_pyx_unpickle_BufferedReader[] = "__pyx_unpickle_BufferedReader";
static const char __pyx_k_pyx_unpickle_BufferedSocketRea[] = "__pyx_unpickle_BufferedSocketReader";
static const char __pyx_k_pyx_unpickle_CompressedBuffere[] = "__pyx_unpickle_CompressedBufferedReader";
static const char __pyx_k_Incompatible_checksums_s_vs_0x18[] = "Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))";
static const char __pyx_k_Incompatible_checksums_s_vs_0x2a[] = "Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))";
static const char __pyx_k_Incompatible_checksums_s_vs_0xef[] = "Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))";
static const char __pyx_k_Unexpected_EOF_while_reading_byt[] = "Unexpected EOF while reading bytes";
static const char __pyx_k_clickhouse_driver_bufferedreader[] = "clickhouse_driver.bufferedreader";
static PyObject *__pyx_n_s_BufferedReader;
static PyObject *__pyx_n_s_BufferedSocketReader;
static PyObject *__pyx_n_s_CompressedBufferedReader;
static PyObject *__pyx_n_s_EOFError;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0x18;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0x2a;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xef;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_n_s_NotImplementedError;
static PyObject *__pyx_n_s_PickleError;
static PyObject *__pyx_kp_u_Unexpected_EOF_while_reading_byt;
static PyObject *__pyx_n_s_UnicodeDecodeError;
static PyObject *__pyx_n_s_bufsize;
static PyObject *__pyx_n_s_clickhouse_driver_bufferedreader;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_n_s_dict;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_encoding;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_init;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_n_items;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_new;
static PyObject *__pyx_n_s_object;
static PyObject *__pyx_n_s_pickle;
static PyObject *__pyx_n_s_pyx_PickleError;
static PyObject *__pyx_n_s_pyx_checksum;
static PyObject *__pyx_n_s_pyx_result;
static PyObject *__pyx_n_s_pyx_state;
static PyObject *__pyx_n_s_pyx_type;
static PyObject *__pyx_n_s_pyx_unpickle_BufferedReader;
static PyObject *__pyx_n_s_pyx_unpickle_BufferedSocketRea;
static PyObject *__pyx_n_s_pyx_unpickle_CompressedBuffere;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_read_block;
static PyObject *__pyx_n_s_read_into_buffer;
static PyObject *__pyx_n_s_recv_into;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_sock;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_super;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_kp_u_utf_8;
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader___init__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_bufsize); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_2read_into_buffer(CYTHON_UNUSED struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_4read(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, Py_ssize_t __pyx_v_unread); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6read_one(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8read_strings(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, Py_ssize_t __pyx_v_n_items, PyObject *__pyx_v_encoding); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8position___get__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self); /* proto */
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_2__set__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size___get__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self); /* proto */
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_2__set__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer___get__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self); /* proto */
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_2__set__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_4__del__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_10__reduce_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_12__setstate_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader___init__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self, PyObject *__pyx_v_sock, PyObject *__pyx_v_bufsize); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_2read_into_buffer(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_4__reduce_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_6__setstate_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader___init__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self, PyObject *__pyx_v_read_block, PyObject *__pyx_v_bufsize); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_2read_into_buffer(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_4__reduce_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_6__setstate_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedReader(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_2__pyx_unpickle_BufferedSocketReader(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_4__pyx_unpickle_CompressedBufferedReader(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedReader(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedSocketReader(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_17clickhouse_driver_14bufferedreader_CompressedBufferedReader(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_int_25411819;
static PyObject *__pyx_int_44607813;
static PyObject *__pyx_int_251251440;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_codeobj__3;
static PyObject *__pyx_codeobj__5;
static PyObject *__pyx_codeobj__7;
/* Late includes */
/* "clickhouse_driver/bufferedreader.pyx":14
* cdef public bytearray buffer
*
* def __init__(self, bufsize): # <<<<<<<<<<<<<<
* self.buffer = bytearray(bufsize)
*
*/
/* Python wrapper */
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_bufsize = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_bufsize,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bufsize)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 14, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_bufsize = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 14, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader___init__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self), __pyx_v_bufsize);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader___init__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_bufsize) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__init__", 0);
/* "clickhouse_driver/bufferedreader.pyx":15
*
* def __init__(self, bufsize):
* self.buffer = bytearray(bufsize) # <<<<<<<<<<<<<<
*
* self.position = 0
*/
__pyx_t_1 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyByteArray_Type)), __pyx_v_bufsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->buffer);
__Pyx_DECREF(__pyx_v_self->buffer);
__pyx_v_self->buffer = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":17
* self.buffer = bytearray(bufsize)
*
* self.position = 0 # <<<<<<<<<<<<<<
* self.current_buffer_size = 0
*
*/
__pyx_v_self->position = 0;
/* "clickhouse_driver/bufferedreader.pyx":18
*
* self.position = 0
* self.current_buffer_size = 0 # <<<<<<<<<<<<<<
*
* super(BufferedReader, self).__init__()
*/
__pyx_v_self->current_buffer_size = 0;
/* "clickhouse_driver/bufferedreader.pyx":20
* self.current_buffer_size = 0
*
* super(BufferedReader, self).__init__() # <<<<<<<<<<<<<<
*
* def read_into_buffer(self):
*/
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader));
__Pyx_GIVEREF(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader));
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_self));
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_init); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":14
* cdef public bytearray buffer
*
* def __init__(self, bufsize): # <<<<<<<<<<<<<<
* self.buffer = bytearray(bufsize)
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":22
* super(BufferedReader, self).__init__()
*
* def read_into_buffer(self): # <<<<<<<<<<<<<<
* raise NotImplementedError
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_3read_into_buffer(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_3read_into_buffer(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("read_into_buffer (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_2read_into_buffer(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_2read_into_buffer(CYTHON_UNUSED struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("read_into_buffer", 0);
/* "clickhouse_driver/bufferedreader.pyx":23
*
* def read_into_buffer(self):
* raise NotImplementedError # <<<<<<<<<<<<<<
*
* def read(self, Py_ssize_t unread):
*/
__Pyx_Raise(__pyx_builtin_NotImplementedError, 0, 0, 0);
__PYX_ERR(0, 23, __pyx_L1_error)
/* "clickhouse_driver/bufferedreader.pyx":22
* super(BufferedReader, self).__init__()
*
* def read_into_buffer(self): # <<<<<<<<<<<<<<
* raise NotImplementedError
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.read_into_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":25
* raise NotImplementedError
*
* def read(self, Py_ssize_t unread): # <<<<<<<<<<<<<<
* # When the buffer is large enough bytes read are almost
* # always hit the buffer.
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_5read(PyObject *__pyx_v_self, PyObject *__pyx_arg_unread); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_5read(PyObject *__pyx_v_self, PyObject *__pyx_arg_unread) {
Py_ssize_t __pyx_v_unread;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("read (wrapper)", 0);
assert(__pyx_arg_unread); {
__pyx_v_unread = __Pyx_PyIndex_AsSsize_t(__pyx_arg_unread); if (unlikely((__pyx_v_unread == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 25, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.read", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_4read(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self), ((Py_ssize_t)__pyx_v_unread));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_4read(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, Py_ssize_t __pyx_v_unread) {
Py_ssize_t __pyx_v_next_position;
Py_ssize_t __pyx_v_t;
char *__pyx_v_buffer_ptr;
Py_ssize_t __pyx_v_read_bytes;
PyObject *__pyx_v_rv = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
__Pyx_RefNannySetupContext("read", 0);
/* "clickhouse_driver/bufferedreader.pyx":28
* # When the buffer is large enough bytes read are almost
* # always hit the buffer.
* cdef Py_ssize_t next_position = unread + self.position # <<<<<<<<<<<<<<
* if next_position < self.current_buffer_size:
* t = self.position
*/
__pyx_v_next_position = (__pyx_v_unread + __pyx_v_self->position);
/* "clickhouse_driver/bufferedreader.pyx":29
* # always hit the buffer.
* cdef Py_ssize_t next_position = unread + self.position
* if next_position < self.current_buffer_size: # <<<<<<<<<<<<<<
* t = self.position
* self.position = next_position
*/
__pyx_t_1 = ((__pyx_v_next_position < __pyx_v_self->current_buffer_size) != 0);
if (__pyx_t_1) {
/* "clickhouse_driver/bufferedreader.pyx":30
* cdef Py_ssize_t next_position = unread + self.position
* if next_position < self.current_buffer_size:
* t = self.position # <<<<<<<<<<<<<<
* self.position = next_position
* return bytes(self.buffer[t:self.position])
*/
__pyx_t_2 = __pyx_v_self->position;
__pyx_v_t = __pyx_t_2;
/* "clickhouse_driver/bufferedreader.pyx":31
* if next_position < self.current_buffer_size:
* t = self.position
* self.position = next_position # <<<<<<<<<<<<<<
* return bytes(self.buffer[t:self.position])
*
*/
__pyx_v_self->position = __pyx_v_next_position;
/* "clickhouse_driver/bufferedreader.pyx":32
* t = self.position
* self.position = next_position
* return bytes(self.buffer[t:self.position]) # <<<<<<<<<<<<<<
*
* cdef char* buffer_ptr = PyByteArray_AsString(self.buffer)
*/
__Pyx_XDECREF(__pyx_r);
if (unlikely(__pyx_v_self->buffer == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 32, __pyx_L1_error)
}
__pyx_t_3 = PySequence_GetSlice(__pyx_v_self->buffer, __pyx_v_t, __pyx_v_self->position); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyBytes_Type)), __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
/* "clickhouse_driver/bufferedreader.pyx":29
* # always hit the buffer.
* cdef Py_ssize_t next_position = unread + self.position
* if next_position < self.current_buffer_size: # <<<<<<<<<<<<<<
* t = self.position
* self.position = next_position
*/
}
/* "clickhouse_driver/bufferedreader.pyx":34
* return bytes(self.buffer[t:self.position])
*
* cdef char* buffer_ptr = PyByteArray_AsString(self.buffer) # <<<<<<<<<<<<<<
* cdef Py_ssize_t read_bytes
* rv = bytes()
*/
__pyx_t_4 = __pyx_v_self->buffer;
__Pyx_INCREF(__pyx_t_4);
__pyx_v_buffer_ptr = PyByteArray_AsString(__pyx_t_4);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "clickhouse_driver/bufferedreader.pyx":36
* cdef char* buffer_ptr = PyByteArray_AsString(self.buffer)
* cdef Py_ssize_t read_bytes
* rv = bytes() # <<<<<<<<<<<<<<
*
* while unread > 0:
*/
__pyx_t_4 = __Pyx_PyObject_CallNoArg(((PyObject *)(&PyBytes_Type))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_v_rv = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "clickhouse_driver/bufferedreader.pyx":38
* rv = bytes()
*
* while unread > 0: # <<<<<<<<<<<<<<
* if self.position == self.current_buffer_size:
* self.read_into_buffer()
*/
while (1) {
__pyx_t_1 = ((__pyx_v_unread > 0) != 0);
if (!__pyx_t_1) break;
/* "clickhouse_driver/bufferedreader.pyx":39
*
* while unread > 0:
* if self.position == self.current_buffer_size: # <<<<<<<<<<<<<<
* self.read_into_buffer()
* buffer_ptr = PyByteArray_AsString(self.buffer)
*/
__pyx_t_1 = ((__pyx_v_self->position == __pyx_v_self->current_buffer_size) != 0);
if (__pyx_t_1) {
/* "clickhouse_driver/bufferedreader.pyx":40
* while unread > 0:
* if self.position == self.current_buffer_size:
* self.read_into_buffer() # <<<<<<<<<<<<<<
* buffer_ptr = PyByteArray_AsString(self.buffer)
* self.position = 0
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_read_into_buffer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 40, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_4 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 40, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "clickhouse_driver/bufferedreader.pyx":41
* if self.position == self.current_buffer_size:
* self.read_into_buffer()
* buffer_ptr = PyByteArray_AsString(self.buffer) # <<<<<<<<<<<<<<
* self.position = 0
*
*/
__pyx_t_4 = __pyx_v_self->buffer;
__Pyx_INCREF(__pyx_t_4);
__pyx_v_buffer_ptr = PyByteArray_AsString(__pyx_t_4);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "clickhouse_driver/bufferedreader.pyx":42
* self.read_into_buffer()
* buffer_ptr = PyByteArray_AsString(self.buffer)
* self.position = 0 # <<<<<<<<<<<<<<
*
* read_bytes = min(unread, self.current_buffer_size - self.position)
*/
__pyx_v_self->position = 0;
/* "clickhouse_driver/bufferedreader.pyx":39
*
* while unread > 0:
* if self.position == self.current_buffer_size: # <<<<<<<<<<<<<<
* self.read_into_buffer()
* buffer_ptr = PyByteArray_AsString(self.buffer)
*/
}
/* "clickhouse_driver/bufferedreader.pyx":44
* self.position = 0
*
* read_bytes = min(unread, self.current_buffer_size - self.position) # <<<<<<<<<<<<<<
* rv += PyBytes_FromStringAndSize(
* &buffer_ptr[self.position], read_bytes
*/
__pyx_t_2 = (__pyx_v_self->current_buffer_size - __pyx_v_self->position);
__pyx_t_6 = __pyx_v_unread;
if (((__pyx_t_2 < __pyx_t_6) != 0)) {
__pyx_t_7 = __pyx_t_2;
} else {
__pyx_t_7 = __pyx_t_6;
}
__pyx_v_read_bytes = __pyx_t_7;
/* "clickhouse_driver/bufferedreader.pyx":45
*
* read_bytes = min(unread, self.current_buffer_size - self.position)
* rv += PyBytes_FromStringAndSize( # <<<<<<<<<<<<<<
* &buffer_ptr[self.position], read_bytes
* )
*/
__pyx_t_4 = PyBytes_FromStringAndSize((&(__pyx_v_buffer_ptr[__pyx_v_self->position])), __pyx_v_read_bytes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_rv, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF_SET(__pyx_v_rv, ((PyObject*)__pyx_t_3));
__pyx_t_3 = 0;
/* "clickhouse_driver/bufferedreader.pyx":48
* &buffer_ptr[self.position], read_bytes
* )
* self.position += read_bytes # <<<<<<<<<<<<<<
* unread -= read_bytes
*
*/
__pyx_v_self->position = (__pyx_v_self->position + __pyx_v_read_bytes);
/* "clickhouse_driver/bufferedreader.pyx":49
* )
* self.position += read_bytes
* unread -= read_bytes # <<<<<<<<<<<<<<
*
* return rv
*/
__pyx_v_unread = (__pyx_v_unread - __pyx_v_read_bytes);
}
/* "clickhouse_driver/bufferedreader.pyx":51
* unread -= read_bytes
*
* return rv # <<<<<<<<<<<<<<
*
* def read_one(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_rv);
__pyx_r = __pyx_v_rv;
goto __pyx_L0;
/* "clickhouse_driver/bufferedreader.pyx":25
* raise NotImplementedError
*
* def read(self, Py_ssize_t unread): # <<<<<<<<<<<<<<
* # When the buffer is large enough bytes read are almost
* # always hit the buffer.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.read", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_rv);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":53
* return rv
*
* def read_one(self): # <<<<<<<<<<<<<<
* if self.position == self.current_buffer_size:
* self.read_into_buffer()
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_7read_one(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_7read_one(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("read_one (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6read_one(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6read_one(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) {
unsigned char __pyx_v_rv;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
__Pyx_RefNannySetupContext("read_one", 0);
/* "clickhouse_driver/bufferedreader.pyx":54
*
* def read_one(self):
* if self.position == self.current_buffer_size: # <<<<<<<<<<<<<<
* self.read_into_buffer()
* self.position = 0
*/
__pyx_t_1 = ((__pyx_v_self->position == __pyx_v_self->current_buffer_size) != 0);
if (__pyx_t_1) {
/* "clickhouse_driver/bufferedreader.pyx":55
* def read_one(self):
* if self.position == self.current_buffer_size:
* self.read_into_buffer() # <<<<<<<<<<<<<<
* self.position = 0
*
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_read_into_buffer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_2 = (__pyx_t_4) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4) : __Pyx_PyObject_CallNoArg(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "clickhouse_driver/bufferedreader.pyx":56
* if self.position == self.current_buffer_size:
* self.read_into_buffer()
* self.position = 0 # <<<<<<<<<<<<<<
*
* rv = self.buffer[self.position]
*/
__pyx_v_self->position = 0;
/* "clickhouse_driver/bufferedreader.pyx":54
*
* def read_one(self):
* if self.position == self.current_buffer_size: # <<<<<<<<<<<<<<
* self.read_into_buffer()
* self.position = 0
*/
}
/* "clickhouse_driver/bufferedreader.pyx":58
* self.position = 0
*
* rv = self.buffer[self.position] # <<<<<<<<<<<<<<
* self.position += 1
* return rv
*/
__pyx_t_5 = __Pyx_GetItemInt_ByteArray(__pyx_v_self->buffer, __pyx_v_self->position, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 1, 1); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_rv = __pyx_t_5;
/* "clickhouse_driver/bufferedreader.pyx":59
*
* rv = self.buffer[self.position]
* self.position += 1 # <<<<<<<<<<<<<<
* return rv
*
*/
__pyx_v_self->position = (__pyx_v_self->position + 1);
/* "clickhouse_driver/bufferedreader.pyx":60
* rv = self.buffer[self.position]
* self.position += 1
* return rv # <<<<<<<<<<<<<<
*
* def read_strings(self, Py_ssize_t n_items, encoding=None):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_unsigned_char(__pyx_v_rv); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "clickhouse_driver/bufferedreader.pyx":53
* return rv
*
* def read_one(self): # <<<<<<<<<<<<<<
* if self.position == self.current_buffer_size:
* self.read_into_buffer()
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.read_one", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":62
* return rv
*
* def read_strings(self, Py_ssize_t n_items, encoding=None): # <<<<<<<<<<<<<<
* """
* Python has great overhead between function calls.
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_9read_strings(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_17clickhouse_driver_14bufferedreader_14BufferedReader_8read_strings[] = "\n Python has great overhead between function calls.\n We inline strings reading logic here to avoid this overhead.\n ";
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_9read_strings(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
Py_ssize_t __pyx_v_n_items;
PyObject *__pyx_v_encoding = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("read_strings (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_n_items,&__pyx_n_s_encoding,0};
PyObject* values[2] = {0,0};
values[1] = ((PyObject *)Py_None);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_n_items)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_encoding);
if (value) { values[1] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "read_strings") < 0)) __PYX_ERR(0, 62, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_n_items = __Pyx_PyIndex_AsSsize_t(values[0]); if (unlikely((__pyx_v_n_items == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 62, __pyx_L3_error)
__pyx_v_encoding = values[1];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("read_strings", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 62, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.read_strings", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8read_strings(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self), __pyx_v_n_items, __pyx_v_encoding);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8read_strings(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, Py_ssize_t __pyx_v_n_items, PyObject *__pyx_v_encoding) {
PyObject *__pyx_v_items = NULL;
Py_ssize_t __pyx_v_i;
char *__pyx_v_buffer_ptr;
Py_ssize_t __pyx_v_right;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_v_shift;
Py_ssize_t __pyx_v_bytes_read;
unsigned char __pyx_v_b;
char *__pyx_v_c_string;
Py_ssize_t __pyx_v_c_string_size;
char *__pyx_v_c_encoding;
PyObject *__pyx_v_rv = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
char *__pyx_t_5;
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
PyObject *__pyx_t_12 = NULL;
PyObject *__pyx_t_13 = NULL;
PyObject *__pyx_t_14 = NULL;
int __pyx_t_15;
PyObject *__pyx_t_16 = NULL;
__Pyx_RefNannySetupContext("read_strings", 0);
__Pyx_INCREF(__pyx_v_encoding);
/* "clickhouse_driver/bufferedreader.pyx":67
* We inline strings reading logic here to avoid this overhead.
* """
* items = PyTuple_New(n_items) # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t i
*/
__pyx_t_1 = PyTuple_New(__pyx_v_n_items); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_items = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":71
* cdef Py_ssize_t i
* # Buffer vars
* cdef char* buffer_ptr = PyByteArray_AsString(self.buffer) # <<<<<<<<<<<<<<
* cdef Py_ssize_t right
* # String length vars
*/
__pyx_t_1 = __pyx_v_self->buffer;
__Pyx_INCREF(__pyx_t_1);
__pyx_v_buffer_ptr = PyByteArray_AsString(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":78
*
* # String for decode vars.
* cdef char *c_string = NULL # <<<<<<<<<<<<<<
* cdef Py_ssize_t c_string_size = 1024
* cdef char *c_encoding = NULL
*/
__pyx_v_c_string = NULL;
/* "clickhouse_driver/bufferedreader.pyx":79
* # String for decode vars.
* cdef char *c_string = NULL
* cdef Py_ssize_t c_string_size = 1024 # <<<<<<<<<<<<<<
* cdef char *c_encoding = NULL
* if encoding:
*/
__pyx_v_c_string_size = 0x400;
/* "clickhouse_driver/bufferedreader.pyx":80
* cdef char *c_string = NULL
* cdef Py_ssize_t c_string_size = 1024
* cdef char *c_encoding = NULL # <<<<<<<<<<<<<<
* if encoding:
* encoding = encoding.encode('utf-8')
*/
__pyx_v_c_encoding = NULL;
/* "clickhouse_driver/bufferedreader.pyx":81
* cdef Py_ssize_t c_string_size = 1024
* cdef char *c_encoding = NULL
* if encoding: # <<<<<<<<<<<<<<
* encoding = encoding.encode('utf-8')
* c_encoding = encoding
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_encoding); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 81, __pyx_L1_error)
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":82
* cdef char *c_encoding = NULL
* if encoding:
* encoding = encoding.encode('utf-8') # <<<<<<<<<<<<<<
* c_encoding = encoding
*
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_encoding, __pyx_n_s_encode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 82, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_kp_u_utf_8) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_kp_u_utf_8);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 82, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_encoding, __pyx_t_1);
__pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":83
* if encoding:
* encoding = encoding.encode('utf-8')
* c_encoding = encoding # <<<<<<<<<<<<<<
*
* cdef object rv = object()
*/
__pyx_t_5 = __Pyx_PyObject_AsWritableString(__pyx_v_encoding); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) __PYX_ERR(0, 83, __pyx_L1_error)
__pyx_v_c_encoding = __pyx_t_5;
/* "clickhouse_driver/bufferedreader.pyx":81
* cdef Py_ssize_t c_string_size = 1024
* cdef char *c_encoding = NULL
* if encoding: # <<<<<<<<<<<<<<
* encoding = encoding.encode('utf-8')
* c_encoding = encoding
*/
}
/* "clickhouse_driver/bufferedreader.pyx":85
* c_encoding = encoding
*
* cdef object rv = object() # <<<<<<<<<<<<<<
* # String for decode vars.
* if c_encoding:
*/
__pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_builtin_object); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_rv = __pyx_t_1;
__pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":87
* cdef object rv = object()
* # String for decode vars.
* if c_encoding: # <<<<<<<<<<<<<<
* c_string = <char *> PyMem_Realloc(NULL, c_string_size)
*
*/
__pyx_t_2 = (__pyx_v_c_encoding != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":88
* # String for decode vars.
* if c_encoding:
* c_string = <char *> PyMem_Realloc(NULL, c_string_size) # <<<<<<<<<<<<<<
*
* for i in range(n_items):
*/
__pyx_v_c_string = ((char *)PyMem_Realloc(NULL, __pyx_v_c_string_size));
/* "clickhouse_driver/bufferedreader.pyx":87
* cdef object rv = object()
* # String for decode vars.
* if c_encoding: # <<<<<<<<<<<<<<
* c_string = <char *> PyMem_Realloc(NULL, c_string_size)
*
*/
}
/* "clickhouse_driver/bufferedreader.pyx":90
* c_string = <char *> PyMem_Realloc(NULL, c_string_size)
*
* for i in range(n_items): # <<<<<<<<<<<<<<
* shift = size = 0
*
*/
__pyx_t_6 = __pyx_v_n_items;
__pyx_t_7 = __pyx_t_6;
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) {
__pyx_v_i = __pyx_t_8;
/* "clickhouse_driver/bufferedreader.pyx":91
*
* for i in range(n_items):
* shift = size = 0 # <<<<<<<<<<<<<<
*
* # Read string size
*/
__pyx_v_shift = 0;
__pyx_v_size = 0;
/* "clickhouse_driver/bufferedreader.pyx":94
*
* # Read string size
* while True: # <<<<<<<<<<<<<<
* if self.position == self.current_buffer_size:
* self.read_into_buffer()
*/
while (1) {
/* "clickhouse_driver/bufferedreader.pyx":95
* # Read string size
* while True:
* if self.position == self.current_buffer_size: # <<<<<<<<<<<<<<
* self.read_into_buffer()
* # `read_into_buffer` can override buffer
*/
__pyx_t_2 = ((__pyx_v_self->position == __pyx_v_self->current_buffer_size) != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":96
* while True:
* if self.position == self.current_buffer_size:
* self.read_into_buffer() # <<<<<<<<<<<<<<
* # `read_into_buffer` can override buffer
* buffer_ptr = PyByteArray_AsString(self.buffer)
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_read_into_buffer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 96, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4) : __Pyx_PyObject_CallNoArg(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 96, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":98
* self.read_into_buffer()
* # `read_into_buffer` can override buffer
* buffer_ptr = PyByteArray_AsString(self.buffer) # <<<<<<<<<<<<<<
* self.position = 0
*
*/
__pyx_t_1 = __pyx_v_self->buffer;
__Pyx_INCREF(__pyx_t_1);
__pyx_v_buffer_ptr = PyByteArray_AsString(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":99
* # `read_into_buffer` can override buffer
* buffer_ptr = PyByteArray_AsString(self.buffer)
* self.position = 0 # <<<<<<<<<<<<<<
*
* b = buffer_ptr[self.position]
*/
__pyx_v_self->position = 0;
/* "clickhouse_driver/bufferedreader.pyx":95
* # Read string size
* while True:
* if self.position == self.current_buffer_size: # <<<<<<<<<<<<<<
* self.read_into_buffer()
* # `read_into_buffer` can override buffer
*/
}
/* "clickhouse_driver/bufferedreader.pyx":101
* self.position = 0
*
* b = buffer_ptr[self.position] # <<<<<<<<<<<<<<
* self.position += 1
*
*/
__pyx_v_b = (__pyx_v_buffer_ptr[__pyx_v_self->position]);
/* "clickhouse_driver/bufferedreader.pyx":102
*
* b = buffer_ptr[self.position]
* self.position += 1 # <<<<<<<<<<<<<<
*
* size |= (b & 0x7f) << shift
*/
__pyx_v_self->position = (__pyx_v_self->position + 1);
/* "clickhouse_driver/bufferedreader.pyx":104
* self.position += 1
*
* size |= (b & 0x7f) << shift # <<<<<<<<<<<<<<
* if b < 0x80:
* break
*/
__pyx_v_size = (__pyx_v_size | ((__pyx_v_b & 0x7f) << __pyx_v_shift));
/* "clickhouse_driver/bufferedreader.pyx":105
*
* size |= (b & 0x7f) << shift
* if b < 0x80: # <<<<<<<<<<<<<<
* break
*
*/
__pyx_t_2 = ((__pyx_v_b < 0x80) != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":106
* size |= (b & 0x7f) << shift
* if b < 0x80:
* break # <<<<<<<<<<<<<<
*
* shift += 7
*/
goto __pyx_L8_break;
/* "clickhouse_driver/bufferedreader.pyx":105
*
* size |= (b & 0x7f) << shift
* if b < 0x80: # <<<<<<<<<<<<<<
* break
*
*/
}
/* "clickhouse_driver/bufferedreader.pyx":108
* break
*
* shift += 7 # <<<<<<<<<<<<<<
*
* right = self.position + size
*/
__pyx_v_shift = (__pyx_v_shift + 7);
}
__pyx_L8_break:;
/* "clickhouse_driver/bufferedreader.pyx":110
* shift += 7
*
* right = self.position + size # <<<<<<<<<<<<<<
*
* if c_encoding:
*/
__pyx_v_right = (__pyx_v_self->position + __pyx_v_size);
/* "clickhouse_driver/bufferedreader.pyx":112
* right = self.position + size
*
* if c_encoding: # <<<<<<<<<<<<<<
* if size + 1 > c_string_size:
* c_string_size = size + 1
*/
__pyx_t_2 = (__pyx_v_c_encoding != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":113
*
* if c_encoding:
* if size + 1 > c_string_size: # <<<<<<<<<<<<<<
* c_string_size = size + 1
* c_string = <char *> PyMem_Realloc(c_string, c_string_size)
*/
__pyx_t_2 = (((__pyx_v_size + 1) > __pyx_v_c_string_size) != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":114
* if c_encoding:
* if size + 1 > c_string_size:
* c_string_size = size + 1 # <<<<<<<<<<<<<<
* c_string = <char *> PyMem_Realloc(c_string, c_string_size)
* if c_string is NULL:
*/
__pyx_v_c_string_size = (__pyx_v_size + 1);
/* "clickhouse_driver/bufferedreader.pyx":115
* if size + 1 > c_string_size:
* c_string_size = size + 1
* c_string = <char *> PyMem_Realloc(c_string, c_string_size) # <<<<<<<<<<<<<<
* if c_string is NULL:
* raise MemoryError()
*/
__pyx_v_c_string = ((char *)PyMem_Realloc(__pyx_v_c_string, __pyx_v_c_string_size));
/* "clickhouse_driver/bufferedreader.pyx":116
* c_string_size = size + 1
* c_string = <char *> PyMem_Realloc(c_string, c_string_size)
* if c_string is NULL: # <<<<<<<<<<<<<<
* raise MemoryError()
* c_string[size] = 0
*/
__pyx_t_2 = ((__pyx_v_c_string == NULL) != 0);
if (unlikely(__pyx_t_2)) {
/* "clickhouse_driver/bufferedreader.pyx":117
* c_string = <char *> PyMem_Realloc(c_string, c_string_size)
* if c_string is NULL:
* raise MemoryError() # <<<<<<<<<<<<<<
* c_string[size] = 0
* bytes_read = 0
*/
PyErr_NoMemory(); __PYX_ERR(0, 117, __pyx_L1_error)
/* "clickhouse_driver/bufferedreader.pyx":116
* c_string_size = size + 1
* c_string = <char *> PyMem_Realloc(c_string, c_string_size)
* if c_string is NULL: # <<<<<<<<<<<<<<
* raise MemoryError()
* c_string[size] = 0
*/
}
/* "clickhouse_driver/bufferedreader.pyx":113
*
* if c_encoding:
* if size + 1 > c_string_size: # <<<<<<<<<<<<<<
* c_string_size = size + 1
* c_string = <char *> PyMem_Realloc(c_string, c_string_size)
*/
}
/* "clickhouse_driver/bufferedreader.pyx":118
* if c_string is NULL:
* raise MemoryError()
* c_string[size] = 0 # <<<<<<<<<<<<<<
* bytes_read = 0
*
*/
(__pyx_v_c_string[__pyx_v_size]) = 0;
/* "clickhouse_driver/bufferedreader.pyx":119
* raise MemoryError()
* c_string[size] = 0
* bytes_read = 0 # <<<<<<<<<<<<<<
*
* # Decoding pure c strings in Cython is faster than in pure Python.
*/
__pyx_v_bytes_read = 0;
/* "clickhouse_driver/bufferedreader.pyx":112
* right = self.position + size
*
* if c_encoding: # <<<<<<<<<<<<<<
* if size + 1 > c_string_size:
* c_string_size = size + 1
*/
}
/* "clickhouse_driver/bufferedreader.pyx":124
* # We need to copy it into buffer for adding null symbol at the end.
* # In ClickHouse block there is no null
* if right > self.current_buffer_size: # <<<<<<<<<<<<<<
* if c_encoding:
* memcpy(&c_string[bytes_read], &buffer_ptr[self.position],
*/
__pyx_t_2 = ((__pyx_v_right > __pyx_v_self->current_buffer_size) != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":125
* # In ClickHouse block there is no null
* if right > self.current_buffer_size:
* if c_encoding: # <<<<<<<<<<<<<<
* memcpy(&c_string[bytes_read], &buffer_ptr[self.position],
* self.current_buffer_size - self.position)
*/
__pyx_t_2 = (__pyx_v_c_encoding != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":126
* if right > self.current_buffer_size:
* if c_encoding:
* memcpy(&c_string[bytes_read], &buffer_ptr[self.position], # <<<<<<<<<<<<<<
* self.current_buffer_size - self.position)
* else:
*/
(void)(memcpy((&(__pyx_v_c_string[__pyx_v_bytes_read])), (&(__pyx_v_buffer_ptr[__pyx_v_self->position])), (__pyx_v_self->current_buffer_size - __pyx_v_self->position)));
/* "clickhouse_driver/bufferedreader.pyx":125
* # In ClickHouse block there is no null
* if right > self.current_buffer_size:
* if c_encoding: # <<<<<<<<<<<<<<
* memcpy(&c_string[bytes_read], &buffer_ptr[self.position],
* self.current_buffer_size - self.position)
*/
goto __pyx_L15;
}
/* "clickhouse_driver/bufferedreader.pyx":129
* self.current_buffer_size - self.position)
* else:
* rv = PyBytes_FromStringAndSize( # <<<<<<<<<<<<<<
* &buffer_ptr[self.position],
* self.current_buffer_size - self.position
*/
/*else*/ {
/* "clickhouse_driver/bufferedreader.pyx":131
* rv = PyBytes_FromStringAndSize(
* &buffer_ptr[self.position],
* self.current_buffer_size - self.position # <<<<<<<<<<<<<<
* )
*
*/
__pyx_t_1 = PyBytes_FromStringAndSize((&(__pyx_v_buffer_ptr[__pyx_v_self->position])), (__pyx_v_self->current_buffer_size - __pyx_v_self->position)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 129, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF_SET(__pyx_v_rv, __pyx_t_1);
__pyx_t_1 = 0;
}
__pyx_L15:;
/* "clickhouse_driver/bufferedreader.pyx":134
* )
*
* bytes_read = self.current_buffer_size - self.position # <<<<<<<<<<<<<<
* # Read the rest of the string.
* while bytes_read != size:
*/
__pyx_v_bytes_read = (__pyx_v_self->current_buffer_size - __pyx_v_self->position);
/* "clickhouse_driver/bufferedreader.pyx":136
* bytes_read = self.current_buffer_size - self.position
* # Read the rest of the string.
* while bytes_read != size: # <<<<<<<<<<<<<<
* self.position = size - bytes_read
*
*/
while (1) {
__pyx_t_2 = ((__pyx_v_bytes_read != __pyx_v_size) != 0);
if (!__pyx_t_2) break;
/* "clickhouse_driver/bufferedreader.pyx":137
* # Read the rest of the string.
* while bytes_read != size:
* self.position = size - bytes_read # <<<<<<<<<<<<<<
*
* self.read_into_buffer()
*/
__pyx_v_self->position = (__pyx_v_size - __pyx_v_bytes_read);
/* "clickhouse_driver/bufferedreader.pyx":139
* self.position = size - bytes_read
*
* self.read_into_buffer() # <<<<<<<<<<<<<<
* # `read_into_buffer` can override buffer
* buffer_ptr = PyByteArray_AsString(self.buffer)
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_read_into_buffer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4) : __Pyx_PyObject_CallNoArg(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":141
* self.read_into_buffer()
* # `read_into_buffer` can override buffer
* buffer_ptr = PyByteArray_AsString(self.buffer) # <<<<<<<<<<<<<<
* # There can be not enough data in buffer.
* self.position = min(
*/
__pyx_t_1 = __pyx_v_self->buffer;
__Pyx_INCREF(__pyx_t_1);
__pyx_v_buffer_ptr = PyByteArray_AsString(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":144
* # There can be not enough data in buffer.
* self.position = min(
* self.position, self.current_buffer_size # <<<<<<<<<<<<<<
* )
* if c_encoding:
*/
__pyx_t_9 = __pyx_v_self->current_buffer_size;
__pyx_t_10 = __pyx_v_self->position;
if (((__pyx_t_9 < __pyx_t_10) != 0)) {
__pyx_t_11 = __pyx_t_9;
} else {
__pyx_t_11 = __pyx_t_10;
}
/* "clickhouse_driver/bufferedreader.pyx":143
* buffer_ptr = PyByteArray_AsString(self.buffer)
* # There can be not enough data in buffer.
* self.position = min( # <<<<<<<<<<<<<<
* self.position, self.current_buffer_size
* )
*/
__pyx_v_self->position = __pyx_t_11;
/* "clickhouse_driver/bufferedreader.pyx":146
* self.position, self.current_buffer_size
* )
* if c_encoding: # <<<<<<<<<<<<<<
* memcpy(
* &c_string[bytes_read], buffer_ptr, self.position
*/
__pyx_t_2 = (__pyx_v_c_encoding != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":147
* )
* if c_encoding:
* memcpy( # <<<<<<<<<<<<<<
* &c_string[bytes_read], buffer_ptr, self.position
* )
*/
(void)(memcpy((&(__pyx_v_c_string[__pyx_v_bytes_read])), __pyx_v_buffer_ptr, __pyx_v_self->position));
/* "clickhouse_driver/bufferedreader.pyx":146
* self.position, self.current_buffer_size
* )
* if c_encoding: # <<<<<<<<<<<<<<
* memcpy(
* &c_string[bytes_read], buffer_ptr, self.position
*/
goto __pyx_L18;
}
/* "clickhouse_driver/bufferedreader.pyx":151
* )
* else:
* rv += PyBytes_FromStringAndSize( # <<<<<<<<<<<<<<
* buffer_ptr, self.position
* )
*/
/*else*/ {
/* "clickhouse_driver/bufferedreader.pyx":152
* else:
* rv += PyBytes_FromStringAndSize(
* buffer_ptr, self.position # <<<<<<<<<<<<<<
* )
* bytes_read += self.position
*/
__pyx_t_1 = PyBytes_FromStringAndSize(__pyx_v_buffer_ptr, __pyx_v_self->position); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
/* "clickhouse_driver/bufferedreader.pyx":151
* )
* else:
* rv += PyBytes_FromStringAndSize( # <<<<<<<<<<<<<<
* buffer_ptr, self.position
* )
*/
__pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_rv, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_rv, __pyx_t_3);
__pyx_t_3 = 0;
}
__pyx_L18:;
/* "clickhouse_driver/bufferedreader.pyx":154
* buffer_ptr, self.position
* )
* bytes_read += self.position # <<<<<<<<<<<<<<
*
* else:
*/
__pyx_v_bytes_read = (__pyx_v_bytes_read + __pyx_v_self->position);
}
/* "clickhouse_driver/bufferedreader.pyx":124
* # We need to copy it into buffer for adding null symbol at the end.
* # In ClickHouse block there is no null
* if right > self.current_buffer_size: # <<<<<<<<<<<<<<
* if c_encoding:
* memcpy(&c_string[bytes_read], &buffer_ptr[self.position],
*/
goto __pyx_L14;
}
/* "clickhouse_driver/bufferedreader.pyx":157
*
* else:
* if c_encoding: # <<<<<<<<<<<<<<
* memcpy(c_string, &buffer_ptr[self.position], size)
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_c_encoding != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":158
* else:
* if c_encoding:
* memcpy(c_string, &buffer_ptr[self.position], size) # <<<<<<<<<<<<<<
* else:
* rv = PyBytes_FromStringAndSize(
*/
(void)(memcpy(__pyx_v_c_string, (&(__pyx_v_buffer_ptr[__pyx_v_self->position])), __pyx_v_size));
/* "clickhouse_driver/bufferedreader.pyx":157
*
* else:
* if c_encoding: # <<<<<<<<<<<<<<
* memcpy(c_string, &buffer_ptr[self.position], size)
* else:
*/
goto __pyx_L19;
}
/* "clickhouse_driver/bufferedreader.pyx":160
* memcpy(c_string, &buffer_ptr[self.position], size)
* else:
* rv = PyBytes_FromStringAndSize( # <<<<<<<<<<<<<<
* &buffer_ptr[self.position], size
* )
*/
/*else*/ {
/* "clickhouse_driver/bufferedreader.pyx":161
* else:
* rv = PyBytes_FromStringAndSize(
* &buffer_ptr[self.position], size # <<<<<<<<<<<<<<
* )
* self.position = right
*/
__pyx_t_3 = PyBytes_FromStringAndSize((&(__pyx_v_buffer_ptr[__pyx_v_self->position])), __pyx_v_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF_SET(__pyx_v_rv, __pyx_t_3);
__pyx_t_3 = 0;
}
__pyx_L19:;
/* "clickhouse_driver/bufferedreader.pyx":163
* &buffer_ptr[self.position], size
* )
* self.position = right # <<<<<<<<<<<<<<
*
* if c_encoding:
*/
__pyx_v_self->position = __pyx_v_right;
}
__pyx_L14:;
/* "clickhouse_driver/bufferedreader.pyx":165
* self.position = right
*
* if c_encoding: # <<<<<<<<<<<<<<
* try:
* rv = c_string[:size].decode(c_encoding)
*/
__pyx_t_2 = (__pyx_v_c_encoding != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":166
*
* if c_encoding:
* try: # <<<<<<<<<<<<<<
* rv = c_string[:size].decode(c_encoding)
* except UnicodeDecodeError:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14);
__Pyx_XGOTREF(__pyx_t_12);
__Pyx_XGOTREF(__pyx_t_13);
__Pyx_XGOTREF(__pyx_t_14);
/*try:*/ {
/* "clickhouse_driver/bufferedreader.pyx":167
* if c_encoding:
* try:
* rv = c_string[:size].decode(c_encoding) # <<<<<<<<<<<<<<
* except UnicodeDecodeError:
* rv = PyBytes_FromStringAndSize(c_string, size)
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_c_string, 0, __pyx_v_size, __pyx_v_c_encoding, NULL, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 167, __pyx_L21_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF_SET(__pyx_v_rv, __pyx_t_3);
__pyx_t_3 = 0;
/* "clickhouse_driver/bufferedreader.pyx":166
*
* if c_encoding:
* try: # <<<<<<<<<<<<<<
* rv = c_string[:size].decode(c_encoding)
* except UnicodeDecodeError:
*/
}
__Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
__Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
__Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0;
goto __pyx_L28_try_end;
__pyx_L21_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "clickhouse_driver/bufferedreader.pyx":168
* try:
* rv = c_string[:size].decode(c_encoding)
* except UnicodeDecodeError: # <<<<<<<<<<<<<<
* rv = PyBytes_FromStringAndSize(c_string, size)
*
*/
__pyx_t_15 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_UnicodeDecodeError);
if (__pyx_t_15) {
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.read_strings", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_1, &__pyx_t_4) < 0) __PYX_ERR(0, 168, __pyx_L23_except_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_t_4);
/* "clickhouse_driver/bufferedreader.pyx":169
* rv = c_string[:size].decode(c_encoding)
* except UnicodeDecodeError:
* rv = PyBytes_FromStringAndSize(c_string, size) # <<<<<<<<<<<<<<
*
* Py_INCREF(rv)
*/
__pyx_t_16 = PyBytes_FromStringAndSize(__pyx_v_c_string, __pyx_v_size); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 169, __pyx_L23_except_error)
__Pyx_GOTREF(__pyx_t_16);
__Pyx_DECREF_SET(__pyx_v_rv, __pyx_t_16);
__pyx_t_16 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
goto __pyx_L22_exception_handled;
}
goto __pyx_L23_except_error;
__pyx_L23_except_error:;
/* "clickhouse_driver/bufferedreader.pyx":166
*
* if c_encoding:
* try: # <<<<<<<<<<<<<<
* rv = c_string[:size].decode(c_encoding)
* except UnicodeDecodeError:
*/
__Pyx_XGIVEREF(__pyx_t_12);
__Pyx_XGIVEREF(__pyx_t_13);
__Pyx_XGIVEREF(__pyx_t_14);
__Pyx_ExceptionReset(__pyx_t_12, __pyx_t_13, __pyx_t_14);
goto __pyx_L1_error;
__pyx_L22_exception_handled:;
__Pyx_XGIVEREF(__pyx_t_12);
__Pyx_XGIVEREF(__pyx_t_13);
__Pyx_XGIVEREF(__pyx_t_14);
__Pyx_ExceptionReset(__pyx_t_12, __pyx_t_13, __pyx_t_14);
__pyx_L28_try_end:;
}
/* "clickhouse_driver/bufferedreader.pyx":165
* self.position = right
*
* if c_encoding: # <<<<<<<<<<<<<<
* try:
* rv = c_string[:size].decode(c_encoding)
*/
}
/* "clickhouse_driver/bufferedreader.pyx":171
* rv = PyBytes_FromStringAndSize(c_string, size)
*
* Py_INCREF(rv) # <<<<<<<<<<<<<<
* PyTuple_SET_ITEM(items, i, rv)
*
*/
Py_INCREF(__pyx_v_rv);
/* "clickhouse_driver/bufferedreader.pyx":172
*
* Py_INCREF(rv)
* PyTuple_SET_ITEM(items, i, rv) # <<<<<<<<<<<<<<
*
* if c_string:
*/
PyTuple_SET_ITEM(__pyx_v_items, __pyx_v_i, __pyx_v_rv);
}
/* "clickhouse_driver/bufferedreader.pyx":174
* PyTuple_SET_ITEM(items, i, rv)
*
* if c_string: # <<<<<<<<<<<<<<
* PyMem_Free(c_string)
*
*/
__pyx_t_2 = (__pyx_v_c_string != 0);
if (__pyx_t_2) {
/* "clickhouse_driver/bufferedreader.pyx":175
*
* if c_string:
* PyMem_Free(c_string) # <<<<<<<<<<<<<<
*
* return items
*/
PyMem_Free(__pyx_v_c_string);
/* "clickhouse_driver/bufferedreader.pyx":174
* PyTuple_SET_ITEM(items, i, rv)
*
* if c_string: # <<<<<<<<<<<<<<
* PyMem_Free(c_string)
*
*/
}
/* "clickhouse_driver/bufferedreader.pyx":177
* PyMem_Free(c_string)
*
* return items # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_items);
__pyx_r = __pyx_v_items;
goto __pyx_L0;
/* "clickhouse_driver/bufferedreader.pyx":62
* return rv
*
* def read_strings(self, Py_ssize_t n_items, encoding=None): # <<<<<<<<<<<<<<
* """
* Python has great overhead between function calls.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_16);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.read_strings", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_items);
__Pyx_XDECREF(__pyx_v_rv);
__Pyx_XDECREF(__pyx_v_encoding);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":11
*
* cdef class BufferedReader(object):
* cdef public Py_ssize_t position, current_buffer_size # <<<<<<<<<<<<<<
* cdef public bytearray buffer
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8position___get__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8position___get__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->position); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.position.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_2__set__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_2__set__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __Pyx_PyIndex_AsSsize_t(__pyx_v_value); if (unlikely((__pyx_t_1 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 11, __pyx_L1_error)
__pyx_v_self->position = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.position.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size___get__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size___get__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->current_buffer_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.current_buffer_size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_2__set__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_2__set__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __Pyx_PyIndex_AsSsize_t(__pyx_v_value); if (unlikely((__pyx_t_1 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 11, __pyx_L1_error)
__pyx_v_self->current_buffer_size = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.current_buffer_size.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":12
* cdef class BufferedReader(object):
* cdef public Py_ssize_t position, current_buffer_size
* cdef public bytearray buffer # <<<<<<<<<<<<<<
*
* def __init__(self, bufsize):
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer___get__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer___get__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->buffer);
__pyx_r = __pyx_v_self->buffer;
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_2__set__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_2__set__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__set__", 0);
if (!(likely(PyByteArray_CheckExact(__pyx_v_value))||((__pyx_v_value) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytearray", Py_TYPE(__pyx_v_value)->tp_name), 0))) __PYX_ERR(0, 12, __pyx_L1_error)
__pyx_t_1 = __pyx_v_value;
__Pyx_INCREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->buffer);
__Pyx_DECREF(__pyx_v_self->buffer);
__pyx_v_self->buffer = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.buffer.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_5__del__(PyObject *__pyx_v_self); /*proto*/
static int __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_5__del__(PyObject *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_4__del__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_4__del__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__", 0);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_self->buffer);
__Pyx_DECREF(__pyx_v_self->buffer);
__pyx_v_self->buffer = ((PyObject*)Py_None);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_11__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_11__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_10__reduce_cython__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_10__reduce_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.buffer, self.current_buffer_size, self.position) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->current_buffer_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->position); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_self->buffer);
__Pyx_GIVEREF(__pyx_v_self->buffer);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_self->buffer);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_v_state = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.buffer, self.current_buffer_size, self.position)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_3 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v__dict = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":7
* state = (self.buffer, self.current_buffer_size, self.position)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_4 = (__pyx_v__dict != Py_None);
__pyx_t_5 = (__pyx_t_4 != 0);
if (__pyx_t_5) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v__dict);
__pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_2));
__pyx_t_2 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.buffer is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.buffer, self.current_buffer_size, self.position)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.buffer is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, None), state
*/
/*else*/ {
__pyx_t_5 = (__pyx_v_self->buffer != ((PyObject*)Py_None));
__pyx_v_use_setstate = __pyx_t_5;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.buffer is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, None), state
* else:
*/
__pyx_t_5 = (__pyx_v_use_setstate != 0);
if (__pyx_t_5) {
/* "(tree fragment)":13
* use_setstate = self.buffer is not None
* if use_setstate:
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_pyx_unpickle_BufferedReader); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_44607813);
__Pyx_GIVEREF(__pyx_int_44607813);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_44607813);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 2, Py_None);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.buffer is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, None), state
* else:
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_BufferedReader__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_pyx_unpickle_BufferedReader); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_44607813);
__Pyx_GIVEREF(__pyx_int_44607813);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_44607813);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_state);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedReader__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_13__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_13__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_12__setstate_cython__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_14BufferedReader_12__setstate_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_BufferedReader__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedReader__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_BufferedReader, (type(self), 0x2a8a945, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedReader__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedReader.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":183
* cdef object sock
*
* def __init__(self, sock, bufsize): # <<<<<<<<<<<<<<
* self.sock = sock
* super(BufferedSocketReader, self).__init__(bufsize)
*/
/* Python wrapper */
static int __pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_sock = 0;
PyObject *__pyx_v_bufsize = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_sock,&__pyx_n_s_bufsize,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sock)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bufsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, 1); __PYX_ERR(0, 183, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 183, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_sock = values[0];
__pyx_v_bufsize = values[1];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 183, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedSocketReader.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader___init__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)__pyx_v_self), __pyx_v_sock, __pyx_v_bufsize);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader___init__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self, PyObject *__pyx_v_sock, PyObject *__pyx_v_bufsize) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__init__", 0);
/* "clickhouse_driver/bufferedreader.pyx":184
*
* def __init__(self, sock, bufsize):
* self.sock = sock # <<<<<<<<<<<<<<
* super(BufferedSocketReader, self).__init__(bufsize)
*
*/
__Pyx_INCREF(__pyx_v_sock);
__Pyx_GIVEREF(__pyx_v_sock);
__Pyx_GOTREF(__pyx_v_self->sock);
__Pyx_DECREF(__pyx_v_self->sock);
__pyx_v_self->sock = __pyx_v_sock;
/* "clickhouse_driver/bufferedreader.pyx":185
* def __init__(self, sock, bufsize):
* self.sock = sock
* super(BufferedSocketReader, self).__init__(bufsize) # <<<<<<<<<<<<<<
*
* def read_into_buffer(self):
*/
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedSocketReader));
__Pyx_GIVEREF(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedSocketReader));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedSocketReader));
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_self));
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 185, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_init); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_bufsize) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_bufsize);
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 185, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":183
* cdef object sock
*
* def __init__(self, sock, bufsize): # <<<<<<<<<<<<<<
* self.sock = sock
* super(BufferedSocketReader, self).__init__(bufsize)
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedSocketReader.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":187
* super(BufferedSocketReader, self).__init__(bufsize)
*
* def read_into_buffer(self): # <<<<<<<<<<<<<<
* self.current_buffer_size = self.sock.recv_into(self.buffer)
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_3read_into_buffer(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_3read_into_buffer(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("read_into_buffer (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_2read_into_buffer(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_2read_into_buffer(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t __pyx_t_4;
int __pyx_t_5;
__Pyx_RefNannySetupContext("read_into_buffer", 0);
/* "clickhouse_driver/bufferedreader.pyx":188
*
* def read_into_buffer(self):
* self.current_buffer_size = self.sock.recv_into(self.buffer) # <<<<<<<<<<<<<<
*
* if self.current_buffer_size == 0:
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->sock, __pyx_n_s_recv_into); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 188, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_self->__pyx_base.buffer) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_self->__pyx_base.buffer);
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 188, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_4 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_4 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 188, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_self->__pyx_base.current_buffer_size = __pyx_t_4;
/* "clickhouse_driver/bufferedreader.pyx":190
* self.current_buffer_size = self.sock.recv_into(self.buffer)
*
* if self.current_buffer_size == 0: # <<<<<<<<<<<<<<
* raise EOFError('Unexpected EOF while reading bytes')
*
*/
__pyx_t_5 = ((__pyx_v_self->__pyx_base.current_buffer_size == 0) != 0);
if (unlikely(__pyx_t_5)) {
/* "clickhouse_driver/bufferedreader.pyx":191
*
* if self.current_buffer_size == 0:
* raise EOFError('Unexpected EOF while reading bytes') # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_EOFError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 191, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(0, 191, __pyx_L1_error)
/* "clickhouse_driver/bufferedreader.pyx":190
* self.current_buffer_size = self.sock.recv_into(self.buffer)
*
* if self.current_buffer_size == 0: # <<<<<<<<<<<<<<
* raise EOFError('Unexpected EOF while reading bytes')
*
*/
}
/* "clickhouse_driver/bufferedreader.pyx":187
* super(BufferedSocketReader, self).__init__(bufsize)
*
* def read_into_buffer(self): # <<<<<<<<<<<<<<
* self.current_buffer_size = self.sock.recv_into(self.buffer)
*
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedSocketReader.read_into_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_4__reduce_cython__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_4__reduce_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.buffer, self.current_buffer_size, self.position, self.sock) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->__pyx_base.current_buffer_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->__pyx_base.position); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_self->__pyx_base.buffer);
__Pyx_GIVEREF(__pyx_v_self->__pyx_base.buffer);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_self->__pyx_base.buffer);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_INCREF(__pyx_v_self->sock);
__Pyx_GIVEREF(__pyx_v_self->sock);
PyTuple_SET_ITEM(__pyx_t_3, 3, __pyx_v_self->sock);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_v_state = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.buffer, self.current_buffer_size, self.position, self.sock)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_3 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v__dict = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":7
* state = (self.buffer, self.current_buffer_size, self.position, self.sock)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_4 = (__pyx_v__dict != Py_None);
__pyx_t_5 = (__pyx_t_4 != 0);
if (__pyx_t_5) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v__dict);
__pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_2));
__pyx_t_2 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.buffer is not None or self.sock is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.buffer, self.current_buffer_size, self.position, self.sock)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.buffer is not None or self.sock is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, None), state
*/
/*else*/ {
__pyx_t_4 = (__pyx_v_self->__pyx_base.buffer != ((PyObject*)Py_None));
__pyx_t_6 = (__pyx_t_4 != 0);
if (!__pyx_t_6) {
} else {
__pyx_t_5 = __pyx_t_6;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_6 = (__pyx_v_self->sock != Py_None);
__pyx_t_4 = (__pyx_t_6 != 0);
__pyx_t_5 = __pyx_t_4;
__pyx_L4_bool_binop_done:;
__pyx_v_use_setstate = __pyx_t_5;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.buffer is not None or self.sock is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, None), state
* else:
*/
__pyx_t_5 = (__pyx_v_use_setstate != 0);
if (__pyx_t_5) {
/* "(tree fragment)":13
* use_setstate = self.buffer is not None or self.sock is not None
* if use_setstate:
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_pyx_unpickle_BufferedSocketRea); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_251251440);
__Pyx_GIVEREF(__pyx_int_251251440);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_251251440);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 2, Py_None);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.buffer is not None or self.sock is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, None), state
* else:
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_BufferedSocketReader__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_pyx_unpickle_BufferedSocketRea); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_251251440);
__Pyx_GIVEREF(__pyx_int_251251440);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_251251440);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_state);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedSocketReader.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedSocketReader__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_6__setstate_cython__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_6__setstate_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_BufferedSocketReader__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedSocketReader__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_BufferedSocketReader, (type(self), 0xef9caf0, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedSocketReader__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.BufferedSocketReader.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":197
* cdef object read_block
*
* def __init__(self, read_block, bufsize): # <<<<<<<<<<<<<<
* self.read_block = read_block
* super(CompressedBufferedReader, self).__init__(bufsize)
*/
/* Python wrapper */
static int __pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_read_block = 0;
PyObject *__pyx_v_bufsize = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_read_block,&__pyx_n_s_bufsize,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_read_block)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bufsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, 1); __PYX_ERR(0, 197, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 197, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_read_block = values[0];
__pyx_v_bufsize = values[1];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 197, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.CompressedBufferedReader.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader___init__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)__pyx_v_self), __pyx_v_read_block, __pyx_v_bufsize);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader___init__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self, PyObject *__pyx_v_read_block, PyObject *__pyx_v_bufsize) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__init__", 0);
/* "clickhouse_driver/bufferedreader.pyx":198
*
* def __init__(self, read_block, bufsize):
* self.read_block = read_block # <<<<<<<<<<<<<<
* super(CompressedBufferedReader, self).__init__(bufsize)
*
*/
__Pyx_INCREF(__pyx_v_read_block);
__Pyx_GIVEREF(__pyx_v_read_block);
__Pyx_GOTREF(__pyx_v_self->read_block);
__Pyx_DECREF(__pyx_v_self->read_block);
__pyx_v_self->read_block = __pyx_v_read_block;
/* "clickhouse_driver/bufferedreader.pyx":199
* def __init__(self, read_block, bufsize):
* self.read_block = read_block
* super(CompressedBufferedReader, self).__init__(bufsize) # <<<<<<<<<<<<<<
*
* def read_into_buffer(self):
*/
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 199, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_CompressedBufferedReader));
__Pyx_GIVEREF(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_CompressedBufferedReader));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_CompressedBufferedReader));
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_self));
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 199, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_init); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 199, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_bufsize) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_bufsize);
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 199, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":197
* cdef object read_block
*
* def __init__(self, read_block, bufsize): # <<<<<<<<<<<<<<
* self.read_block = read_block
* super(CompressedBufferedReader, self).__init__(bufsize)
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.CompressedBufferedReader.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "clickhouse_driver/bufferedreader.pyx":201
* super(CompressedBufferedReader, self).__init__(bufsize)
*
* def read_into_buffer(self): # <<<<<<<<<<<<<<
* self.buffer = bytearray(self.read_block())
* self.current_buffer_size = len(self.buffer)
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_3read_into_buffer(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_3read_into_buffer(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("read_into_buffer (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_2read_into_buffer(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_2read_into_buffer(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t __pyx_t_4;
int __pyx_t_5;
__Pyx_RefNannySetupContext("read_into_buffer", 0);
/* "clickhouse_driver/bufferedreader.pyx":202
*
* def read_into_buffer(self):
* self.buffer = bytearray(self.read_block()) # <<<<<<<<<<<<<<
* self.current_buffer_size = len(self.buffer)
*
*/
__Pyx_INCREF(__pyx_v_self->read_block);
__pyx_t_2 = __pyx_v_self->read_block; __pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 202, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyByteArray_Type)), __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 202, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_self->__pyx_base.buffer);
__Pyx_DECREF(__pyx_v_self->__pyx_base.buffer);
__pyx_v_self->__pyx_base.buffer = ((PyObject*)__pyx_t_2);
__pyx_t_2 = 0;
/* "clickhouse_driver/bufferedreader.pyx":203
* def read_into_buffer(self):
* self.buffer = bytearray(self.read_block())
* self.current_buffer_size = len(self.buffer) # <<<<<<<<<<<<<<
*
* if self.current_buffer_size == 0:
*/
__pyx_t_2 = __pyx_v_self->__pyx_base.buffer;
__Pyx_INCREF(__pyx_t_2);
if (unlikely(__pyx_t_2 == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(0, 203, __pyx_L1_error)
}
__pyx_t_4 = PyByteArray_GET_SIZE(__pyx_t_2); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 203, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_self->__pyx_base.current_buffer_size = __pyx_t_4;
/* "clickhouse_driver/bufferedreader.pyx":205
* self.current_buffer_size = len(self.buffer)
*
* if self.current_buffer_size == 0: # <<<<<<<<<<<<<<
* raise EOFError('Unexpected EOF while reading bytes')
*/
__pyx_t_5 = ((__pyx_v_self->__pyx_base.current_buffer_size == 0) != 0);
if (unlikely(__pyx_t_5)) {
/* "clickhouse_driver/bufferedreader.pyx":206
*
* if self.current_buffer_size == 0:
* raise EOFError('Unexpected EOF while reading bytes') # <<<<<<<<<<<<<<
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_EOFError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 206, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(0, 206, __pyx_L1_error)
/* "clickhouse_driver/bufferedreader.pyx":205
* self.current_buffer_size = len(self.buffer)
*
* if self.current_buffer_size == 0: # <<<<<<<<<<<<<<
* raise EOFError('Unexpected EOF while reading bytes')
*/
}
/* "clickhouse_driver/bufferedreader.pyx":201
* super(CompressedBufferedReader, self).__init__(bufsize)
*
* def read_into_buffer(self): # <<<<<<<<<<<<<<
* self.buffer = bytearray(self.read_block())
* self.current_buffer_size = len(self.buffer)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.CompressedBufferedReader.read_into_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_4__reduce_cython__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_4__reduce_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.buffer, self.current_buffer_size, self.position, self.read_block) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->__pyx_base.current_buffer_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->__pyx_base.position); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_self->__pyx_base.buffer);
__Pyx_GIVEREF(__pyx_v_self->__pyx_base.buffer);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_self->__pyx_base.buffer);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_INCREF(__pyx_v_self->read_block);
__Pyx_GIVEREF(__pyx_v_self->read_block);
PyTuple_SET_ITEM(__pyx_t_3, 3, __pyx_v_self->read_block);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_v_state = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.buffer, self.current_buffer_size, self.position, self.read_block)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_3 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v__dict = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":7
* state = (self.buffer, self.current_buffer_size, self.position, self.read_block)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_4 = (__pyx_v__dict != Py_None);
__pyx_t_5 = (__pyx_t_4 != 0);
if (__pyx_t_5) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v__dict);
__pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_2));
__pyx_t_2 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.buffer is not None or self.read_block is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.buffer, self.current_buffer_size, self.position, self.read_block)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.buffer is not None or self.read_block is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, None), state
*/
/*else*/ {
__pyx_t_4 = (__pyx_v_self->__pyx_base.buffer != ((PyObject*)Py_None));
__pyx_t_6 = (__pyx_t_4 != 0);
if (!__pyx_t_6) {
} else {
__pyx_t_5 = __pyx_t_6;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_6 = (__pyx_v_self->read_block != Py_None);
__pyx_t_4 = (__pyx_t_6 != 0);
__pyx_t_5 = __pyx_t_4;
__pyx_L4_bool_binop_done:;
__pyx_v_use_setstate = __pyx_t_5;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.buffer is not None or self.read_block is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, None), state
* else:
*/
__pyx_t_5 = (__pyx_v_use_setstate != 0);
if (__pyx_t_5) {
/* "(tree fragment)":13
* use_setstate = self.buffer is not None or self.read_block is not None
* if use_setstate:
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_pyx_unpickle_CompressedBuffere); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_25411819);
__Pyx_GIVEREF(__pyx_int_25411819);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_25411819);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 2, Py_None);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.buffer is not None or self.read_block is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, None), state
* else:
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_CompressedBufferedReader__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_pyx_unpickle_CompressedBuffere); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_25411819);
__Pyx_GIVEREF(__pyx_int_25411819);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_25411819);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_state);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.CompressedBufferedReader.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_CompressedBufferedReader__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_6__setstate_cython__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_6__setstate_cython__(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_CompressedBufferedReader__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_CompressedBufferedReader__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_CompressedBufferedReader, (type(self), 0x183c0eb, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_CompressedBufferedReader__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.CompressedBufferedReader.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __pyx_unpickle_BufferedReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_1__pyx_unpickle_BufferedReader(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_17clickhouse_driver_14bufferedreader_1__pyx_unpickle_BufferedReader = {"__pyx_unpickle_BufferedReader", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_17clickhouse_driver_14bufferedreader_1__pyx_unpickle_BufferedReader, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_1__pyx_unpickle_BufferedReader(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_BufferedReader (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_BufferedReader", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_BufferedReader", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_BufferedReader") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_BufferedReader", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_BufferedReader", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedReader(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedReader(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
__Pyx_RefNannySetupContext("__pyx_unpickle_BufferedReader", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0x2a8a945: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0x2a8a945) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0x2a8a945:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))" % __pyx_checksum)
* __pyx_result = BufferedReader.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0x2a8a945:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = BufferedReader.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0x2a, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0x2a8a945: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))" % __pyx_checksum)
* __pyx_result = BufferedReader.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))" % __pyx_checksum)
* __pyx_result = BufferedReader.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = BufferedReader.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedReader__set_state(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x2a8a945 = (buffer, current_buffer_size, position))" % __pyx_checksum)
* __pyx_result = BufferedReader.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_BufferedReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_BufferedReader", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]
* if len(__pyx_state) > 3 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedReader__set_state(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("__pyx_unpickle_BufferedReader__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 3 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[3])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(PyByteArray_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytearray", Py_TYPE(__pyx_t_1)->tp_name), 0))) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->buffer);
__Pyx_DECREF(__pyx_v___pyx_result->buffer);
__pyx_v___pyx_result->buffer = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->current_buffer_size = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->position = __pyx_t_2;
/* "(tree fragment)":13
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]
* if len(__pyx_state) > 3 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[3])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_2 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_2 > 3) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_3 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_3 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_3) {
/* "(tree fragment)":14
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]
* if len(__pyx_state) > 3 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[3]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]
* if len(__pyx_state) > 3 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[3])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]
* if len(__pyx_state) > 3 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_BufferedReader__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __pyx_unpickle_BufferedSocketReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_3__pyx_unpickle_BufferedSocketReader(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_17clickhouse_driver_14bufferedreader_3__pyx_unpickle_BufferedSocketReader = {"__pyx_unpickle_BufferedSocketReader", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_17clickhouse_driver_14bufferedreader_3__pyx_unpickle_BufferedSocketReader, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_3__pyx_unpickle_BufferedSocketReader(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_BufferedSocketReader (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_BufferedSocketReader", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_BufferedSocketReader", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_BufferedSocketReader") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_BufferedSocketReader", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_BufferedSocketReader", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_2__pyx_unpickle_BufferedSocketReader(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_2__pyx_unpickle_BufferedSocketReader(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
__Pyx_RefNannySetupContext("__pyx_unpickle_BufferedSocketReader", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xef9caf0: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0xef9caf0) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0xef9caf0:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))" % __pyx_checksum)
* __pyx_result = BufferedSocketReader.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0xef9caf0:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = BufferedSocketReader.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xef, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xef9caf0: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))" % __pyx_checksum)
* __pyx_result = BufferedSocketReader.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedSocketReader), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))" % __pyx_checksum)
* __pyx_result = BufferedSocketReader.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = BufferedSocketReader.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedSocketReader__set_state(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xef9caf0 = (buffer, current_buffer_size, position, sock))" % __pyx_checksum)
* __pyx_result = BufferedSocketReader.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_BufferedSocketReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_BufferedSocketReader", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedSocketReader__set_state(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("__pyx_unpickle_BufferedSocketReader__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[4])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(PyByteArray_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytearray", Py_TYPE(__pyx_t_1)->tp_name), 0))) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->__pyx_base.buffer);
__Pyx_DECREF(__pyx_v___pyx_result->__pyx_base.buffer);
__pyx_v___pyx_result->__pyx_base.buffer = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->__pyx_base.current_buffer_size = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->__pyx_base.position = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->sock);
__Pyx_DECREF(__pyx_v___pyx_result->sock);
__pyx_v___pyx_result->sock = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[4])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_2 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_2 > 4) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_3 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_3 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_3) {
/* "(tree fragment)":14
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[4]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 4, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[4])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_BufferedSocketReader__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __pyx_unpickle_CompressedBufferedReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_5__pyx_unpickle_CompressedBufferedReader(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_17clickhouse_driver_14bufferedreader_5__pyx_unpickle_CompressedBufferedReader = {"__pyx_unpickle_CompressedBufferedReader", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_17clickhouse_driver_14bufferedreader_5__pyx_unpickle_CompressedBufferedReader, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_5__pyx_unpickle_CompressedBufferedReader(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_CompressedBufferedReader (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_CompressedBufferedReader", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_CompressedBufferedReader", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_CompressedBufferedReader") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_CompressedBufferedReader", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_CompressedBufferedReader", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_4__pyx_unpickle_CompressedBufferedReader(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedreader_4__pyx_unpickle_CompressedBufferedReader(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
__Pyx_RefNannySetupContext("__pyx_unpickle_CompressedBufferedReader", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0x183c0eb: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0x183c0eb) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0x183c0eb:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))" % __pyx_checksum)
* __pyx_result = CompressedBufferedReader.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0x183c0eb:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = CompressedBufferedReader.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0x18, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0x183c0eb: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))" % __pyx_checksum)
* __pyx_result = CompressedBufferedReader.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_CompressedBufferedReader__set_state(<CompressedBufferedReader> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedreader_CompressedBufferedReader), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))" % __pyx_checksum)
* __pyx_result = CompressedBufferedReader.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_CompressedBufferedReader__set_state(<CompressedBufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = CompressedBufferedReader.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_CompressedBufferedReader__set_state(<CompressedBufferedReader> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_CompressedBufferedReader__set_state(CompressedBufferedReader __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_CompressedBufferedReader__set_state(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x183c0eb = (buffer, current_buffer_size, position, read_block))" % __pyx_checksum)
* __pyx_result = CompressedBufferedReader.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_CompressedBufferedReader__set_state(<CompressedBufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_CompressedBufferedReader__set_state(<CompressedBufferedReader> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_CompressedBufferedReader__set_state(CompressedBufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.read_block = __pyx_state[3]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_CompressedBufferedReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_CompressedBufferedReader", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_CompressedBufferedReader__set_state(<CompressedBufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_CompressedBufferedReader__set_state(CompressedBufferedReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.read_block = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_CompressedBufferedReader__set_state(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("__pyx_unpickle_CompressedBufferedReader__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_CompressedBufferedReader__set_state(CompressedBufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.read_block = __pyx_state[3] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[4])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(PyByteArray_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytearray", Py_TYPE(__pyx_t_1)->tp_name), 0))) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->__pyx_base.buffer);
__Pyx_DECREF(__pyx_v___pyx_result->__pyx_base.buffer);
__pyx_v___pyx_result->__pyx_base.buffer = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->__pyx_base.current_buffer_size = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->__pyx_base.position = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->read_block);
__Pyx_DECREF(__pyx_v___pyx_result->read_block);
__pyx_v___pyx_result->read_block = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_CompressedBufferedReader__set_state(CompressedBufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.read_block = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[4])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_2 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_2 > 4) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_3 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_3 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_3) {
/* "(tree fragment)":14
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.read_block = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[4]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 4, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_CompressedBufferedReader__set_state(CompressedBufferedReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.read_block = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[4])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_CompressedBufferedReader__set_state(<CompressedBufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_CompressedBufferedReader__set_state(CompressedBufferedReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.read_block = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_CompressedBufferedReader__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedReader(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)o);
p->buffer = ((PyObject*)Py_None); Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_BufferedReader(PyObject *o) {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *p = (struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
Py_CLEAR(p->buffer);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_getprop_17clickhouse_driver_14bufferedreader_14BufferedReader_position(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_1__get__(o);
}
static int __pyx_setprop_17clickhouse_driver_14bufferedreader_14BufferedReader_position(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_8position_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_17clickhouse_driver_14bufferedreader_14BufferedReader_current_buffer_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_1__get__(o);
}
static int __pyx_setprop_17clickhouse_driver_14bufferedreader_14BufferedReader_current_buffer_size(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_19current_buffer_size_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_17clickhouse_driver_14bufferedreader_14BufferedReader_buffer(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_1__get__(o);
}
static int __pyx_setprop_17clickhouse_driver_14bufferedreader_14BufferedReader_buffer(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_3__set__(o, v);
}
else {
return __pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_6buffer_5__del__(o);
}
}
static PyMethodDef __pyx_methods_17clickhouse_driver_14bufferedreader_BufferedReader[] = {
{"read_into_buffer", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_3read_into_buffer, METH_NOARGS, 0},
{"read", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_5read, METH_O, 0},
{"read_one", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_7read_one, METH_NOARGS, 0},
{"read_strings", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_9read_strings, METH_VARARGS|METH_KEYWORDS, __pyx_doc_17clickhouse_driver_14bufferedreader_14BufferedReader_8read_strings},
{"__reduce_cython__", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_11__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_13__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_17clickhouse_driver_14bufferedreader_BufferedReader[] = {
{(char *)"position", __pyx_getprop_17clickhouse_driver_14bufferedreader_14BufferedReader_position, __pyx_setprop_17clickhouse_driver_14bufferedreader_14BufferedReader_position, (char *)0, 0},
{(char *)"current_buffer_size", __pyx_getprop_17clickhouse_driver_14bufferedreader_14BufferedReader_current_buffer_size, __pyx_setprop_17clickhouse_driver_14bufferedreader_14BufferedReader_current_buffer_size, (char *)0, 0},
{(char *)"buffer", __pyx_getprop_17clickhouse_driver_14bufferedreader_14BufferedReader_buffer, __pyx_setprop_17clickhouse_driver_14bufferedreader_14BufferedReader_buffer, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader = {
PyVarObject_HEAD_INIT(0, 0)
"clickhouse_driver.bufferedreader.BufferedReader", /*tp_name*/
sizeof(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedReader), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_BufferedReader, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_17clickhouse_driver_14bufferedreader_BufferedReader, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_17clickhouse_driver_14bufferedreader_BufferedReader, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_pw_17clickhouse_driver_14bufferedreader_14BufferedReader_1__init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedReader, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyObject *__pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedSocketReader(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *p;
PyObject *o = __pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedReader(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)o);
p->sock = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_BufferedSocketReader(PyObject *o) {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *p = (struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->sock);
#if CYTHON_USE_TYPE_SLOTS
if (PyType_IS_GC(Py_TYPE(o)->tp_base))
#endif
PyObject_GC_Track(o);
__pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_BufferedReader(o);
}
static int __pyx_tp_traverse_17clickhouse_driver_14bufferedreader_BufferedSocketReader(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *p = (struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)o;
e = ((likely(__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader)) ? ((__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_traverse) ? __pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_traverse(o, v, a) : 0) : __Pyx_call_next_tp_traverse(o, v, a, __pyx_tp_traverse_17clickhouse_driver_14bufferedreader_BufferedSocketReader)); if (e) return e;
if (p->sock) {
e = (*v)(p->sock, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_17clickhouse_driver_14bufferedreader_BufferedSocketReader(PyObject *o) {
PyObject* tmp;
struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *p = (struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)o;
if (likely(__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader)) { if (__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_clear) __pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_clear(o); } else __Pyx_call_next_tp_clear(o, __pyx_tp_clear_17clickhouse_driver_14bufferedreader_BufferedSocketReader);
tmp = ((PyObject*)p->sock);
p->sock = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_17clickhouse_driver_14bufferedreader_BufferedSocketReader[] = {
{"read_into_buffer", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_3read_into_buffer, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_5__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_7__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader = {
PyVarObject_HEAD_INIT(0, 0)
"clickhouse_driver.bufferedreader.BufferedSocketReader", /*tp_name*/
sizeof(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_BufferedSocketReader, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_17clickhouse_driver_14bufferedreader_BufferedSocketReader, /*tp_traverse*/
__pyx_tp_clear_17clickhouse_driver_14bufferedreader_BufferedSocketReader, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_17clickhouse_driver_14bufferedreader_BufferedSocketReader, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_1__init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedSocketReader, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyObject *__pyx_tp_new_17clickhouse_driver_14bufferedreader_CompressedBufferedReader(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *p;
PyObject *o = __pyx_tp_new_17clickhouse_driver_14bufferedreader_BufferedReader(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)o);
p->read_block = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_CompressedBufferedReader(PyObject *o) {
struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *p = (struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->read_block);
#if CYTHON_USE_TYPE_SLOTS
if (PyType_IS_GC(Py_TYPE(o)->tp_base))
#endif
PyObject_GC_Track(o);
__pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_BufferedReader(o);
}
static int __pyx_tp_traverse_17clickhouse_driver_14bufferedreader_CompressedBufferedReader(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *p = (struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)o;
e = ((likely(__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader)) ? ((__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_traverse) ? __pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_traverse(o, v, a) : 0) : __Pyx_call_next_tp_traverse(o, v, a, __pyx_tp_traverse_17clickhouse_driver_14bufferedreader_CompressedBufferedReader)); if (e) return e;
if (p->read_block) {
e = (*v)(p->read_block, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_17clickhouse_driver_14bufferedreader_CompressedBufferedReader(PyObject *o) {
PyObject* tmp;
struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *p = (struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader *)o;
if (likely(__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader)) { if (__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_clear) __pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader->tp_clear(o); } else __Pyx_call_next_tp_clear(o, __pyx_tp_clear_17clickhouse_driver_14bufferedreader_CompressedBufferedReader);
tmp = ((PyObject*)p->read_block);
p->read_block = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_17clickhouse_driver_14bufferedreader_CompressedBufferedReader[] = {
{"read_into_buffer", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_3read_into_buffer, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_5__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_7__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader = {
PyVarObject_HEAD_INIT(0, 0)
"clickhouse_driver.bufferedreader.CompressedBufferedReader", /*tp_name*/
sizeof(struct __pyx_obj_17clickhouse_driver_14bufferedreader_CompressedBufferedReader), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_17clickhouse_driver_14bufferedreader_CompressedBufferedReader, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_17clickhouse_driver_14bufferedreader_CompressedBufferedReader, /*tp_traverse*/
__pyx_tp_clear_17clickhouse_driver_14bufferedreader_CompressedBufferedReader, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_17clickhouse_driver_14bufferedreader_CompressedBufferedReader, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_pw_17clickhouse_driver_14bufferedreader_24CompressedBufferedReader_1__init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_17clickhouse_driver_14bufferedreader_CompressedBufferedReader, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_bufferedreader(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_bufferedreader},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"bufferedreader",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_BufferedReader, __pyx_k_BufferedReader, sizeof(__pyx_k_BufferedReader), 0, 0, 1, 1},
{&__pyx_n_s_BufferedSocketReader, __pyx_k_BufferedSocketReader, sizeof(__pyx_k_BufferedSocketReader), 0, 0, 1, 1},
{&__pyx_n_s_CompressedBufferedReader, __pyx_k_CompressedBufferedReader, sizeof(__pyx_k_CompressedBufferedReader), 0, 0, 1, 1},
{&__pyx_n_s_EOFError, __pyx_k_EOFError, sizeof(__pyx_k_EOFError), 0, 0, 1, 1},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0x18, __pyx_k_Incompatible_checksums_s_vs_0x18, sizeof(__pyx_k_Incompatible_checksums_s_vs_0x18), 0, 0, 1, 0},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0x2a, __pyx_k_Incompatible_checksums_s_vs_0x2a, sizeof(__pyx_k_Incompatible_checksums_s_vs_0x2a), 0, 0, 1, 0},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0xef, __pyx_k_Incompatible_checksums_s_vs_0xef, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xef), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_n_s_NotImplementedError, __pyx_k_NotImplementedError, sizeof(__pyx_k_NotImplementedError), 0, 0, 1, 1},
{&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
{&__pyx_kp_u_Unexpected_EOF_while_reading_byt, __pyx_k_Unexpected_EOF_while_reading_byt, sizeof(__pyx_k_Unexpected_EOF_while_reading_byt), 0, 1, 0, 0},
{&__pyx_n_s_UnicodeDecodeError, __pyx_k_UnicodeDecodeError, sizeof(__pyx_k_UnicodeDecodeError), 0, 0, 1, 1},
{&__pyx_n_s_bufsize, __pyx_k_bufsize, sizeof(__pyx_k_bufsize), 0, 0, 1, 1},
{&__pyx_n_s_clickhouse_driver_bufferedreader, __pyx_k_clickhouse_driver_bufferedreader, sizeof(__pyx_k_clickhouse_driver_bufferedreader), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_encoding, __pyx_k_encoding, sizeof(__pyx_k_encoding), 0, 0, 1, 1},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_init, __pyx_k_init, sizeof(__pyx_k_init), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_n_items, __pyx_k_n_items, sizeof(__pyx_k_n_items), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
{&__pyx_n_s_object, __pyx_k_object, sizeof(__pyx_k_object), 0, 0, 1, 1},
{&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
{&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
{&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
{&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_BufferedReader, __pyx_k_pyx_unpickle_BufferedReader, sizeof(__pyx_k_pyx_unpickle_BufferedReader), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_BufferedSocketRea, __pyx_k_pyx_unpickle_BufferedSocketRea, sizeof(__pyx_k_pyx_unpickle_BufferedSocketRea), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_CompressedBuffere, __pyx_k_pyx_unpickle_CompressedBuffere, sizeof(__pyx_k_pyx_unpickle_CompressedBuffere), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_read_block, __pyx_k_read_block, sizeof(__pyx_k_read_block), 0, 0, 1, 1},
{&__pyx_n_s_read_into_buffer, __pyx_k_read_into_buffer, sizeof(__pyx_k_read_into_buffer), 0, 0, 1, 1},
{&__pyx_n_s_recv_into, __pyx_k_recv_into, sizeof(__pyx_k_recv_into), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_sock, __pyx_k_sock, sizeof(__pyx_k_sock), 0, 0, 1, 1},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_super, __pyx_k_super, sizeof(__pyx_k_super), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{&__pyx_kp_u_utf_8, __pyx_k_utf_8, sizeof(__pyx_k_utf_8), 0, 1, 0, 0},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_super = __Pyx_GetBuiltinName(__pyx_n_s_super); if (!__pyx_builtin_super) __PYX_ERR(0, 20, __pyx_L1_error)
__pyx_builtin_NotImplementedError = __Pyx_GetBuiltinName(__pyx_n_s_NotImplementedError); if (!__pyx_builtin_NotImplementedError) __PYX_ERR(0, 23, __pyx_L1_error)
__pyx_builtin_object = __Pyx_GetBuiltinName(__pyx_n_s_object); if (!__pyx_builtin_object) __PYX_ERR(0, 85, __pyx_L1_error)
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 90, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(0, 117, __pyx_L1_error)
__pyx_builtin_UnicodeDecodeError = __Pyx_GetBuiltinName(__pyx_n_s_UnicodeDecodeError); if (!__pyx_builtin_UnicodeDecodeError) __PYX_ERR(0, 168, __pyx_L1_error)
__pyx_builtin_EOFError = __Pyx_GetBuiltinName(__pyx_n_s_EOFError); if (!__pyx_builtin_EOFError) __PYX_ERR(0, 191, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "clickhouse_driver/bufferedreader.pyx":191
*
* if self.current_buffer_size == 0:
* raise EOFError('Unexpected EOF while reading bytes') # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_Unexpected_EOF_while_reading_byt); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 191, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "(tree fragment)":1
* def __pyx_unpickle_BufferedReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_tuple__2 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
__pyx_codeobj__3 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__2, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_BufferedReader, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__3)) __PYX_ERR(1, 1, __pyx_L1_error)
__pyx_tuple__4 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
__pyx_codeobj__5 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__4, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_BufferedSocketRea, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__5)) __PYX_ERR(1, 1, __pyx_L1_error)
__pyx_tuple__6 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
__pyx_codeobj__7 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__6, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_CompressedBuffere, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__7)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_25411819 = PyInt_FromLong(25411819L); if (unlikely(!__pyx_int_25411819)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_44607813 = PyInt_FromLong(44607813L); if (unlikely(!__pyx_int_44607813)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_251251440 = PyInt_FromLong(251251440L); if (unlikely(!__pyx_int_251251440)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
if (PyType_Ready(&__pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader) < 0) __PYX_ERR(0, 10, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader.tp_dictoffset && __pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_BufferedReader, (PyObject *)&__pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader) < 0) __PYX_ERR(0, 10, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader) < 0) __PYX_ERR(0, 10, __pyx_L1_error)
__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader = &__pyx_type_17clickhouse_driver_14bufferedreader_BufferedReader;
__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader.tp_base = __pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader;
if (PyType_Ready(&__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader) < 0) __PYX_ERR(0, 180, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader.tp_dictoffset && __pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_BufferedSocketReader, (PyObject *)&__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader) < 0) __PYX_ERR(0, 180, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader) < 0) __PYX_ERR(0, 180, __pyx_L1_error)
__pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedSocketReader = &__pyx_type_17clickhouse_driver_14bufferedreader_BufferedSocketReader;
__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader.tp_base = __pyx_ptype_17clickhouse_driver_14bufferedreader_BufferedReader;
if (PyType_Ready(&__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader) < 0) __PYX_ERR(0, 194, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader.tp_dictoffset && __pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_CompressedBufferedReader, (PyObject *)&__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader) < 0) __PYX_ERR(0, 194, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader) < 0) __PYX_ERR(0, 194, __pyx_L1_error)
__pyx_ptype_17clickhouse_driver_14bufferedreader_CompressedBufferedReader = &__pyx_type_17clickhouse_driver_14bufferedreader_CompressedBufferedReader;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type",
#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
__Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7cpython_4bool_bool) __PYX_ERR(3, 8, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(4, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7cpython_7complex_complex) __PYX_ERR(4, 15, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#if PY_MAJOR_VERSION < 3
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC void
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#else
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initbufferedreader(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initbufferedreader(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_bufferedreader(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_bufferedreader(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_bufferedreader(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'bufferedreader' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_bufferedreader(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("bufferedreader", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_clickhouse_driver__bufferedreader) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "clickhouse_driver.bufferedreader")) {
if (unlikely(PyDict_SetItemString(modules, "clickhouse_driver.bufferedreader", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) goto __pyx_L1_error;
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) goto __pyx_L1_error;
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() != 0)) goto __pyx_L1_error;
if (unlikely(__Pyx_modinit_type_import_code() != 0)) goto __pyx_L1_error;
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "(tree fragment)":1
* def __pyx_unpickle_BufferedReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_17clickhouse_driver_14bufferedreader_1__pyx_unpickle_BufferedReader, NULL, __pyx_n_s_clickhouse_driver_bufferedreader); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_BufferedReader, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":11
* __pyx_unpickle_BufferedReader__set_state(<BufferedReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_BufferedReader__set_state(BufferedReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]
* if len(__pyx_state) > 3 and hasattr(__pyx_result, '__dict__'):
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_17clickhouse_driver_14bufferedreader_3__pyx_unpickle_BufferedSocketReader, NULL, __pyx_n_s_clickhouse_driver_bufferedreader); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_BufferedSocketRea, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":1
* def __pyx_unpickle_CompressedBufferedReader(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_17clickhouse_driver_14bufferedreader_5__pyx_unpickle_CompressedBufferedReader, NULL, __pyx_n_s_clickhouse_driver_bufferedreader); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_CompressedBuffere, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "clickhouse_driver/bufferedreader.pyx":1
* from cpython cimport Py_INCREF, PyBytes_FromStringAndSize # <<<<<<<<<<<<<<
* from cpython.bytearray cimport PyByteArray_AsString
* # Using python's versions of pure c memory management functions for
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init clickhouse_driver.bufferedreader", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init clickhouse_driver.bufferedreader");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* PyObjectCallNoArg */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, NULL, 0);
}
#endif
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))
#else
if (likely(PyCFunction_Check(func)))
#endif
{
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
}
#endif
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* GetItemIntByteArray */
static CYTHON_INLINE int __Pyx_GetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i,
int wraparound, int boundscheck) {
Py_ssize_t length;
if (wraparound | boundscheck) {
length = PyByteArray_GET_SIZE(string);
if (wraparound & unlikely(i < 0)) i += length;
if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) {
return (unsigned char) (PyByteArray_AS_STRING(string)[i]);
} else {
PyErr_SetString(PyExc_IndexError, "bytearray index out of range");
return -1;
}
} else {
return (unsigned char) (PyByteArray_AS_STRING(string)[i]);
}
}
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
if (unlikely(stop <= start))
return PyUnicode_FromUnicode(NULL, 0);
length = stop - start;
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* GetAttr3 */
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* HasAttr */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
/* CallNextTpTraverse */
static int __Pyx_call_next_tp_traverse(PyObject* obj, visitproc v, void *a, traverseproc current_tp_traverse) {
PyTypeObject* type = Py_TYPE(obj);
while (type && type->tp_traverse != current_tp_traverse)
type = type->tp_base;
while (type && type->tp_traverse == current_tp_traverse)
type = type->tp_base;
if (type && type->tp_traverse)
return type->tp_traverse(obj, v, a);
return 0;
}
/* CallNextTpClear */
static void __Pyx_call_next_tp_clear(PyObject* obj, inquiry current_tp_clear) {
PyTypeObject* type = Py_TYPE(obj);
while (type && type->tp_clear != current_tp_clear)
type = type->tp_base;
while (type && type->tp_clear == current_tp_clear)
type = type->tp_base;
if (type && type->tp_clear)
type->tp_clear(obj);
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* PyObjectGetAttrStrNoError */
static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
__Pyx_PyErr_Clear();
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
}
#endif
result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
if (unlikely(!result)) {
__Pyx_PyObject_GetAttrStr_ClearAttributeError();
}
return result;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
if (likely(reduce_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (reduce == object_reduce || PyErr_Occurred()) {
goto __PYX_BAD;
}
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
if (likely(setstate_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (!setstate || PyErr_Occurred()) {
goto __PYX_BAD;
}
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto __PYX_GOOD;
__PYX_BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
__PYX_GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
size_t size, enum __Pyx_ImportType_CheckSize check_size)
{
PyObject *result = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
result = PyObject_GetAttrString(module, class_name);
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if ((size_t)basicsize < size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(result);
return NULL;
}
#endif
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_char(unsigned char value) {
const unsigned char neg_one = (unsigned char) ((unsigned char) 0 - (unsigned char) 1), const_zero = (unsigned char) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(unsigned char) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(unsigned char) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(unsigned char) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(unsigned char) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(unsigned char) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(unsigned char),
little, !is_unsigned);
}
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_4403_1 |
crossvul-cpp_data_good_4658_0 | /*
* eap.c - Extensible Authentication Protocol for PPP (RFC 2284)
*
* Copyright (c) 2001 by Sun Microsystems, Inc.
* All rights reserved.
*
* Non-exclusive rights to redistribute, modify, translate, and use
* this software in source and binary forms, in whole or in part, is
* hereby granted, provided that the above copyright notice is
* duplicated in any source form, and that neither the name of the
* copyright holder nor the author is used to endorse or promote
* products derived from this software.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
* Original version by James Carlson
*
* This implementation of EAP supports MD5-Challenge and SRP-SHA1
* authentication styles. Note that support of MD5-Challenge is a
* requirement of RFC 2284, and that it's essentially just a
* reimplementation of regular RFC 1994 CHAP using EAP messages.
*
* As an authenticator ("server"), there are multiple phases for each
* style. In the first phase of each style, the unauthenticated peer
* name is queried using the EAP Identity request type. If the
* "remotename" option is used, then this phase is skipped, because
* the peer's name is presumed to be known.
*
* For MD5-Challenge, there are two phases, and the second phase
* consists of sending the challenge itself and handling the
* associated response.
*
* For SRP-SHA1, there are four phases. The second sends 's', 'N',
* and 'g'. The reply contains 'A'. The third sends 'B', and the
* reply contains 'M1'. The forth sends the 'M2' value.
*
* As an authenticatee ("client"), there's just a single phase --
* responding to the queries generated by the peer. EAP is an
* authenticator-driven protocol.
*
* Based on draft-ietf-pppext-eap-srp-03.txt.
*/
#define RCSID "$Id: eap.c,v 1.4 2004/11/09 22:39:25 paulus Exp $"
/*
* TODO:
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <pwd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <assert.h>
#include <errno.h>
#include "pppd.h"
#include "pathnames.h"
#include "md5.h"
#include "eap.h"
#ifdef USE_SRP
#include <t_pwd.h>
#include <t_server.h>
#include <t_client.h>
#include "pppcrypt.h"
#endif /* USE_SRP */
#ifndef SHA_DIGESTSIZE
#define SHA_DIGESTSIZE 20
#endif
eap_state eap_states[NUM_PPP]; /* EAP state; one for each unit */
#ifdef USE_SRP
static char *pn_secret = NULL; /* Pseudonym generating secret */
#endif
/*
* Command-line options.
*/
static option_t eap_option_list[] = {
{ "eap-restart", o_int, &eap_states[0].es_server.ea_timeout,
"Set retransmit timeout for EAP Requests (server)" },
{ "eap-max-sreq", o_int, &eap_states[0].es_server.ea_maxrequests,
"Set max number of EAP Requests sent (server)" },
{ "eap-timeout", o_int, &eap_states[0].es_client.ea_timeout,
"Set time limit for peer EAP authentication" },
{ "eap-max-rreq", o_int, &eap_states[0].es_client.ea_maxrequests,
"Set max number of EAP Requests allows (client)" },
{ "eap-interval", o_int, &eap_states[0].es_rechallenge,
"Set interval for EAP rechallenge" },
#ifdef USE_SRP
{ "srp-interval", o_int, &eap_states[0].es_lwrechallenge,
"Set interval for SRP lightweight rechallenge" },
{ "srp-pn-secret", o_string, &pn_secret,
"Long term pseudonym generation secret" },
{ "srp-use-pseudonym", o_bool, &eap_states[0].es_usepseudo,
"Use pseudonym if offered one by server", 1 },
#endif
{ NULL }
};
/*
* Protocol entry points.
*/
static void eap_init __P((int unit));
static void eap_input __P((int unit, u_char *inp, int inlen));
static void eap_protrej __P((int unit));
static void eap_lowerup __P((int unit));
static void eap_lowerdown __P((int unit));
static int eap_printpkt __P((u_char *inp, int inlen,
void (*)(void *arg, char *fmt, ...), void *arg));
struct protent eap_protent = {
PPP_EAP, /* protocol number */
eap_init, /* initialization procedure */
eap_input, /* process a received packet */
eap_protrej, /* process a received protocol-reject */
eap_lowerup, /* lower layer has gone up */
eap_lowerdown, /* lower layer has gone down */
NULL, /* open the protocol */
NULL, /* close the protocol */
eap_printpkt, /* print a packet in readable form */
NULL, /* process a received data packet */
1, /* protocol enabled */
"EAP", /* text name of protocol */
NULL, /* text name of corresponding data protocol */
eap_option_list, /* list of command-line options */
NULL, /* check requested options; assign defaults */
NULL, /* configure interface for demand-dial */
NULL /* say whether to bring up link for this pkt */
};
/*
* A well-known 2048 bit modulus.
*/
static const u_char wkmodulus[] = {
0xAC, 0x6B, 0xDB, 0x41, 0x32, 0x4A, 0x9A, 0x9B,
0xF1, 0x66, 0xDE, 0x5E, 0x13, 0x89, 0x58, 0x2F,
0xAF, 0x72, 0xB6, 0x65, 0x19, 0x87, 0xEE, 0x07,
0xFC, 0x31, 0x92, 0x94, 0x3D, 0xB5, 0x60, 0x50,
0xA3, 0x73, 0x29, 0xCB, 0xB4, 0xA0, 0x99, 0xED,
0x81, 0x93, 0xE0, 0x75, 0x77, 0x67, 0xA1, 0x3D,
0xD5, 0x23, 0x12, 0xAB, 0x4B, 0x03, 0x31, 0x0D,
0xCD, 0x7F, 0x48, 0xA9, 0xDA, 0x04, 0xFD, 0x50,
0xE8, 0x08, 0x39, 0x69, 0xED, 0xB7, 0x67, 0xB0,
0xCF, 0x60, 0x95, 0x17, 0x9A, 0x16, 0x3A, 0xB3,
0x66, 0x1A, 0x05, 0xFB, 0xD5, 0xFA, 0xAA, 0xE8,
0x29, 0x18, 0xA9, 0x96, 0x2F, 0x0B, 0x93, 0xB8,
0x55, 0xF9, 0x79, 0x93, 0xEC, 0x97, 0x5E, 0xEA,
0xA8, 0x0D, 0x74, 0x0A, 0xDB, 0xF4, 0xFF, 0x74,
0x73, 0x59, 0xD0, 0x41, 0xD5, 0xC3, 0x3E, 0xA7,
0x1D, 0x28, 0x1E, 0x44, 0x6B, 0x14, 0x77, 0x3B,
0xCA, 0x97, 0xB4, 0x3A, 0x23, 0xFB, 0x80, 0x16,
0x76, 0xBD, 0x20, 0x7A, 0x43, 0x6C, 0x64, 0x81,
0xF1, 0xD2, 0xB9, 0x07, 0x87, 0x17, 0x46, 0x1A,
0x5B, 0x9D, 0x32, 0xE6, 0x88, 0xF8, 0x77, 0x48,
0x54, 0x45, 0x23, 0xB5, 0x24, 0xB0, 0xD5, 0x7D,
0x5E, 0xA7, 0x7A, 0x27, 0x75, 0xD2, 0xEC, 0xFA,
0x03, 0x2C, 0xFB, 0xDB, 0xF5, 0x2F, 0xB3, 0x78,
0x61, 0x60, 0x27, 0x90, 0x04, 0xE5, 0x7A, 0xE6,
0xAF, 0x87, 0x4E, 0x73, 0x03, 0xCE, 0x53, 0x29,
0x9C, 0xCC, 0x04, 0x1C, 0x7B, 0xC3, 0x08, 0xD8,
0x2A, 0x56, 0x98, 0xF3, 0xA8, 0xD0, 0xC3, 0x82,
0x71, 0xAE, 0x35, 0xF8, 0xE9, 0xDB, 0xFB, 0xB6,
0x94, 0xB5, 0xC8, 0x03, 0xD8, 0x9F, 0x7A, 0xE4,
0x35, 0xDE, 0x23, 0x6D, 0x52, 0x5F, 0x54, 0x75,
0x9B, 0x65, 0xE3, 0x72, 0xFC, 0xD6, 0x8E, 0xF2,
0x0F, 0xA7, 0x11, 0x1F, 0x9E, 0x4A, 0xFF, 0x73
};
/* Local forward declarations. */
static void eap_server_timeout __P((void *arg));
/*
* Convert EAP state code to printable string for debug.
*/
static const char *
eap_state_name(esc)
enum eap_state_code esc;
{
static const char *state_names[] = { EAP_STATES };
return (state_names[(int)esc]);
}
/*
* eap_init - Initialize state for an EAP user. This is currently
* called once by main() during start-up.
*/
static void
eap_init(unit)
int unit;
{
eap_state *esp = &eap_states[unit];
BZERO(esp, sizeof (*esp));
esp->es_unit = unit;
esp->es_server.ea_timeout = EAP_DEFTIMEOUT;
esp->es_server.ea_maxrequests = EAP_DEFTRANSMITS;
esp->es_server.ea_id = (u_char)(drand48() * 0x100);
esp->es_client.ea_timeout = EAP_DEFREQTIME;
esp->es_client.ea_maxrequests = EAP_DEFALLOWREQ;
}
/*
* eap_client_timeout - Give up waiting for the peer to send any
* Request messages.
*/
static void
eap_client_timeout(arg)
void *arg;
{
eap_state *esp = (eap_state *) arg;
if (!eap_client_active(esp))
return;
error("EAP: timeout waiting for Request from peer");
auth_withpeer_fail(esp->es_unit, PPP_EAP);
esp->es_client.ea_state = eapBadAuth;
}
/*
* eap_authwithpeer - Authenticate to our peer (behave as client).
*
* Start client state and wait for requests. This is called only
* after eap_lowerup.
*/
void
eap_authwithpeer(unit, localname)
int unit;
char *localname;
{
eap_state *esp = &eap_states[unit];
/* Save the peer name we're given */
esp->es_client.ea_name = localname;
esp->es_client.ea_namelen = strlen(localname);
esp->es_client.ea_state = eapListen;
/*
* Start a timer so that if the other end just goes
* silent, we don't sit here waiting forever.
*/
if (esp->es_client.ea_timeout > 0)
TIMEOUT(eap_client_timeout, (void *)esp,
esp->es_client.ea_timeout);
}
/*
* Format a standard EAP Failure message and send it to the peer.
* (Server operation)
*/
static void
eap_send_failure(esp)
eap_state *esp;
{
u_char *outp;
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_FAILURE, outp);
esp->es_server.ea_id++;
PUTCHAR(esp->es_server.ea_id, outp);
PUTSHORT(EAP_HEADERLEN, outp);
output(esp->es_unit, outpacket_buf, EAP_HEADERLEN + PPP_HDRLEN);
esp->es_server.ea_state = eapBadAuth;
auth_peer_fail(esp->es_unit, PPP_EAP);
}
/*
* Format a standard EAP Success message and send it to the peer.
* (Server operation)
*/
static void
eap_send_success(esp)
eap_state *esp;
{
u_char *outp;
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_SUCCESS, outp);
esp->es_server.ea_id++;
PUTCHAR(esp->es_server.ea_id, outp);
PUTSHORT(EAP_HEADERLEN, outp);
output(esp->es_unit, outpacket_buf, PPP_HDRLEN + EAP_HEADERLEN);
auth_peer_success(esp->es_unit, PPP_EAP, 0,
esp->es_server.ea_peer, esp->es_server.ea_peerlen);
}
#ifdef USE_SRP
/*
* Set DES key according to pseudonym-generating secret and current
* date.
*/
static bool
pncrypt_setkey(int timeoffs)
{
struct tm *tp;
char tbuf[9];
SHA1_CTX ctxt;
u_char dig[SHA_DIGESTSIZE];
time_t reftime;
if (pn_secret == NULL)
return (0);
reftime = time(NULL) + timeoffs;
tp = localtime(&reftime);
SHA1Init(&ctxt);
SHA1Update(&ctxt, pn_secret, strlen(pn_secret));
strftime(tbuf, sizeof (tbuf), "%Y%m%d", tp);
SHA1Update(&ctxt, tbuf, strlen(tbuf));
SHA1Final(dig, &ctxt);
return (DesSetkey(dig));
}
static char base64[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
struct b64state {
u_int32_t bs_bits;
int bs_offs;
};
static int
b64enc(bs, inp, inlen, outp)
struct b64state *bs;
u_char *inp;
int inlen;
u_char *outp;
{
int outlen = 0;
while (inlen > 0) {
bs->bs_bits = (bs->bs_bits << 8) | *inp++;
inlen--;
bs->bs_offs += 8;
if (bs->bs_offs >= 24) {
*outp++ = base64[(bs->bs_bits >> 18) & 0x3F];
*outp++ = base64[(bs->bs_bits >> 12) & 0x3F];
*outp++ = base64[(bs->bs_bits >> 6) & 0x3F];
*outp++ = base64[bs->bs_bits & 0x3F];
outlen += 4;
bs->bs_offs = 0;
bs->bs_bits = 0;
}
}
return (outlen);
}
static int
b64flush(bs, outp)
struct b64state *bs;
u_char *outp;
{
int outlen = 0;
if (bs->bs_offs == 8) {
*outp++ = base64[(bs->bs_bits >> 2) & 0x3F];
*outp++ = base64[(bs->bs_bits << 4) & 0x3F];
outlen = 2;
} else if (bs->bs_offs == 16) {
*outp++ = base64[(bs->bs_bits >> 10) & 0x3F];
*outp++ = base64[(bs->bs_bits >> 4) & 0x3F];
*outp++ = base64[(bs->bs_bits << 2) & 0x3F];
outlen = 3;
}
bs->bs_offs = 0;
bs->bs_bits = 0;
return (outlen);
}
static int
b64dec(bs, inp, inlen, outp)
struct b64state *bs;
u_char *inp;
int inlen;
u_char *outp;
{
int outlen = 0;
char *cp;
while (inlen > 0) {
if ((cp = strchr(base64, *inp++)) == NULL)
break;
bs->bs_bits = (bs->bs_bits << 6) | (cp - base64);
inlen--;
bs->bs_offs += 6;
if (bs->bs_offs >= 8) {
*outp++ = bs->bs_bits >> (bs->bs_offs - 8);
outlen++;
bs->bs_offs -= 8;
}
}
return (outlen);
}
#endif /* USE_SRP */
/*
* Assume that current waiting server state is complete and figure
* next state to use based on available authentication data. 'status'
* indicates if there was an error in handling the last query. It is
* 0 for success and non-zero for failure.
*/
static void
eap_figure_next_state(esp, status)
eap_state *esp;
int status;
{
#ifdef USE_SRP
unsigned char secbuf[MAXWORDLEN], clear[8], *sp, *dp;
struct t_pw tpw;
struct t_confent *tce, mytce;
char *cp, *cp2;
struct t_server *ts;
int id, i, plen, toffs;
u_char vals[2];
struct b64state bs;
#endif /* USE_SRP */
esp->es_server.ea_timeout = esp->es_savedtime;
switch (esp->es_server.ea_state) {
case eapBadAuth:
return;
case eapIdentify:
#ifdef USE_SRP
/* Discard any previous session. */
ts = (struct t_server *)esp->es_server.ea_session;
if (ts != NULL) {
t_serverclose(ts);
esp->es_server.ea_session = NULL;
esp->es_server.ea_skey = NULL;
}
#endif /* USE_SRP */
if (status != 0) {
esp->es_server.ea_state = eapBadAuth;
break;
}
#ifdef USE_SRP
/* If we've got a pseudonym, try to decode to real name. */
if (esp->es_server.ea_peerlen > SRP_PSEUDO_LEN &&
strncmp(esp->es_server.ea_peer, SRP_PSEUDO_ID,
SRP_PSEUDO_LEN) == 0 &&
(esp->es_server.ea_peerlen - SRP_PSEUDO_LEN) * 3 / 4 <
sizeof (secbuf)) {
BZERO(&bs, sizeof (bs));
plen = b64dec(&bs,
esp->es_server.ea_peer + SRP_PSEUDO_LEN,
esp->es_server.ea_peerlen - SRP_PSEUDO_LEN,
secbuf);
toffs = 0;
for (i = 0; i < 5; i++) {
pncrypt_setkey(toffs);
toffs -= 86400;
if (!DesDecrypt(secbuf, clear)) {
dbglog("no DES here; cannot decode "
"pseudonym");
return;
}
id = *(unsigned char *)clear;
if (id + 1 <= plen && id + 9 > plen)
break;
}
if (plen % 8 == 0 && i < 5) {
/*
* Note that this is always shorter than the
* original stored string, so there's no need
* to realloc.
*/
if ((i = plen = *(unsigned char *)clear) > 7)
i = 7;
esp->es_server.ea_peerlen = plen;
dp = (unsigned char *)esp->es_server.ea_peer;
BCOPY(clear + 1, dp, i);
plen -= i;
dp += i;
sp = secbuf + 8;
while (plen > 0) {
(void) DesDecrypt(sp, dp);
sp += 8;
dp += 8;
plen -= 8;
}
esp->es_server.ea_peer[
esp->es_server.ea_peerlen] = '\0';
dbglog("decoded pseudonym to \"%.*q\"",
esp->es_server.ea_peerlen,
esp->es_server.ea_peer);
} else {
dbglog("failed to decode real name");
/* Stay in eapIdentfy state; requery */
break;
}
}
/* Look up user in secrets database. */
if (get_srp_secret(esp->es_unit, esp->es_server.ea_peer,
esp->es_server.ea_name, (char *)secbuf, 1) != 0) {
/* Set up default in case SRP entry is bad */
esp->es_server.ea_state = eapMD5Chall;
/* Get t_confent based on index in srp-secrets */
id = strtol((char *)secbuf, &cp, 10);
if (*cp++ != ':' || id < 0)
break;
if (id == 0) {
mytce.index = 0;
mytce.modulus.data = (u_char *)wkmodulus;
mytce.modulus.len = sizeof (wkmodulus);
mytce.generator.data = (u_char *)"\002";
mytce.generator.len = 1;
tce = &mytce;
} else if ((tce = gettcid(id)) != NULL) {
/*
* Client will have to verify this modulus/
* generator combination, and that will take
* a while. Lengthen the timeout here.
*/
if (esp->es_server.ea_timeout > 0 &&
esp->es_server.ea_timeout < 30)
esp->es_server.ea_timeout = 30;
} else {
break;
}
if ((cp2 = strchr(cp, ':')) == NULL)
break;
*cp2++ = '\0';
tpw.pebuf.name = esp->es_server.ea_peer;
tpw.pebuf.password.len = t_fromb64((char *)tpw.pwbuf,
cp);
tpw.pebuf.password.data = tpw.pwbuf;
tpw.pebuf.salt.len = t_fromb64((char *)tpw.saltbuf,
cp2);
tpw.pebuf.salt.data = tpw.saltbuf;
if ((ts = t_serveropenraw(&tpw.pebuf, tce)) == NULL)
break;
esp->es_server.ea_session = (void *)ts;
esp->es_server.ea_state = eapSRP1;
vals[0] = esp->es_server.ea_id + 1;
vals[1] = EAPT_SRP;
t_serveraddexdata(ts, vals, 2);
/* Generate B; must call before t_servergetkey() */
t_servergenexp(ts);
break;
}
#endif /* USE_SRP */
esp->es_server.ea_state = eapMD5Chall;
break;
case eapSRP1:
#ifdef USE_SRP
ts = (struct t_server *)esp->es_server.ea_session;
if (ts != NULL && status != 0) {
t_serverclose(ts);
esp->es_server.ea_session = NULL;
esp->es_server.ea_skey = NULL;
}
#endif /* USE_SRP */
if (status == 1) {
esp->es_server.ea_state = eapMD5Chall;
} else if (status != 0 || esp->es_server.ea_session == NULL) {
esp->es_server.ea_state = eapBadAuth;
} else {
esp->es_server.ea_state = eapSRP2;
}
break;
case eapSRP2:
#ifdef USE_SRP
ts = (struct t_server *)esp->es_server.ea_session;
if (ts != NULL && status != 0) {
t_serverclose(ts);
esp->es_server.ea_session = NULL;
esp->es_server.ea_skey = NULL;
}
#endif /* USE_SRP */
if (status != 0 || esp->es_server.ea_session == NULL) {
esp->es_server.ea_state = eapBadAuth;
} else {
esp->es_server.ea_state = eapSRP3;
}
break;
case eapSRP3:
case eapSRP4:
#ifdef USE_SRP
ts = (struct t_server *)esp->es_server.ea_session;
if (ts != NULL && status != 0) {
t_serverclose(ts);
esp->es_server.ea_session = NULL;
esp->es_server.ea_skey = NULL;
}
#endif /* USE_SRP */
if (status != 0 || esp->es_server.ea_session == NULL) {
esp->es_server.ea_state = eapBadAuth;
} else {
esp->es_server.ea_state = eapOpen;
}
break;
case eapMD5Chall:
if (status != 0) {
esp->es_server.ea_state = eapBadAuth;
} else {
esp->es_server.ea_state = eapOpen;
}
break;
default:
esp->es_server.ea_state = eapBadAuth;
break;
}
if (esp->es_server.ea_state == eapBadAuth)
eap_send_failure(esp);
}
/*
* Format an EAP Request message and send it to the peer. Message
* type depends on current state. (Server operation)
*/
static void
eap_send_request(esp)
eap_state *esp;
{
u_char *outp;
u_char *lenloc;
u_char *ptr;
int outlen;
int challen;
char *str;
#ifdef USE_SRP
struct t_server *ts;
u_char clear[8], cipher[8], dig[SHA_DIGESTSIZE], *optr, *cp;
int i, j;
struct b64state b64;
SHA1_CTX ctxt;
#endif /* USE_SRP */
/* Handle both initial auth and restart */
if (esp->es_server.ea_state < eapIdentify &&
esp->es_server.ea_state != eapInitial) {
esp->es_server.ea_state = eapIdentify;
if (explicit_remote) {
/*
* If we already know the peer's
* unauthenticated name, then there's no
* reason to ask. Go to next state instead.
*/
esp->es_server.ea_peer = remote_name;
esp->es_server.ea_peerlen = strlen(remote_name);
eap_figure_next_state(esp, 0);
}
}
if (esp->es_server.ea_maxrequests > 0 &&
esp->es_server.ea_requests >= esp->es_server.ea_maxrequests) {
if (esp->es_server.ea_responses > 0)
error("EAP: too many Requests sent");
else
error("EAP: no response to Requests");
eap_send_failure(esp);
return;
}
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_REQUEST, outp);
PUTCHAR(esp->es_server.ea_id, outp);
lenloc = outp;
INCPTR(2, outp);
switch (esp->es_server.ea_state) {
case eapIdentify:
PUTCHAR(EAPT_IDENTITY, outp);
str = "Name";
challen = strlen(str);
BCOPY(str, outp, challen);
INCPTR(challen, outp);
break;
case eapMD5Chall:
PUTCHAR(EAPT_MD5CHAP, outp);
/*
* pick a random challenge length between
* MIN_CHALLENGE_LENGTH and MAX_CHALLENGE_LENGTH
*/
challen = (drand48() *
(MAX_CHALLENGE_LENGTH - MIN_CHALLENGE_LENGTH)) +
MIN_CHALLENGE_LENGTH;
PUTCHAR(challen, outp);
esp->es_challen = challen;
ptr = esp->es_challenge;
while (--challen >= 0)
*ptr++ = (u_char) (drand48() * 0x100);
BCOPY(esp->es_challenge, outp, esp->es_challen);
INCPTR(esp->es_challen, outp);
BCOPY(esp->es_server.ea_name, outp, esp->es_server.ea_namelen);
INCPTR(esp->es_server.ea_namelen, outp);
break;
#ifdef USE_SRP
case eapSRP1:
PUTCHAR(EAPT_SRP, outp);
PUTCHAR(EAPSRP_CHALLENGE, outp);
PUTCHAR(esp->es_server.ea_namelen, outp);
BCOPY(esp->es_server.ea_name, outp, esp->es_server.ea_namelen);
INCPTR(esp->es_server.ea_namelen, outp);
ts = (struct t_server *)esp->es_server.ea_session;
assert(ts != NULL);
PUTCHAR(ts->s.len, outp);
BCOPY(ts->s.data, outp, ts->s.len);
INCPTR(ts->s.len, outp);
if (ts->g.len == 1 && ts->g.data[0] == 2) {
PUTCHAR(0, outp);
} else {
PUTCHAR(ts->g.len, outp);
BCOPY(ts->g.data, outp, ts->g.len);
INCPTR(ts->g.len, outp);
}
if (ts->n.len != sizeof (wkmodulus) ||
BCMP(ts->n.data, wkmodulus, sizeof (wkmodulus)) != 0) {
BCOPY(ts->n.data, outp, ts->n.len);
INCPTR(ts->n.len, outp);
}
break;
case eapSRP2:
PUTCHAR(EAPT_SRP, outp);
PUTCHAR(EAPSRP_SKEY, outp);
ts = (struct t_server *)esp->es_server.ea_session;
assert(ts != NULL);
BCOPY(ts->B.data, outp, ts->B.len);
INCPTR(ts->B.len, outp);
break;
case eapSRP3:
PUTCHAR(EAPT_SRP, outp);
PUTCHAR(EAPSRP_SVALIDATOR, outp);
PUTLONG(SRPVAL_EBIT, outp);
ts = (struct t_server *)esp->es_server.ea_session;
assert(ts != NULL);
BCOPY(t_serverresponse(ts), outp, SHA_DIGESTSIZE);
INCPTR(SHA_DIGESTSIZE, outp);
if (pncrypt_setkey(0)) {
/* Generate pseudonym */
optr = outp;
cp = (unsigned char *)esp->es_server.ea_peer;
if ((j = i = esp->es_server.ea_peerlen) > 7)
j = 7;
clear[0] = i;
BCOPY(cp, clear + 1, j);
i -= j;
cp += j;
if (!DesEncrypt(clear, cipher)) {
dbglog("no DES here; not generating pseudonym");
break;
}
BZERO(&b64, sizeof (b64));
outp++; /* space for pseudonym length */
outp += b64enc(&b64, cipher, 8, outp);
while (i >= 8) {
(void) DesEncrypt(cp, cipher);
outp += b64enc(&b64, cipher, 8, outp);
cp += 8;
i -= 8;
}
if (i > 0) {
BCOPY(cp, clear, i);
cp += i;
while (i < 8) {
*cp++ = drand48() * 0x100;
i++;
}
(void) DesEncrypt(clear, cipher);
outp += b64enc(&b64, cipher, 8, outp);
}
outp += b64flush(&b64, outp);
/* Set length and pad out to next 20 octet boundary */
i = outp - optr - 1;
*optr = i;
i %= SHA_DIGESTSIZE;
if (i != 0) {
while (i < SHA_DIGESTSIZE) {
*outp++ = drand48() * 0x100;
i++;
}
}
/* Obscure the pseudonym with SHA1 hash */
SHA1Init(&ctxt);
SHA1Update(&ctxt, &esp->es_server.ea_id, 1);
SHA1Update(&ctxt, esp->es_server.ea_skey,
SESSION_KEY_LEN);
SHA1Update(&ctxt, esp->es_server.ea_peer,
esp->es_server.ea_peerlen);
while (optr < outp) {
SHA1Final(dig, &ctxt);
cp = dig;
while (cp < dig + SHA_DIGESTSIZE)
*optr++ ^= *cp++;
SHA1Init(&ctxt);
SHA1Update(&ctxt, &esp->es_server.ea_id, 1);
SHA1Update(&ctxt, esp->es_server.ea_skey,
SESSION_KEY_LEN);
SHA1Update(&ctxt, optr - SHA_DIGESTSIZE,
SHA_DIGESTSIZE);
}
}
break;
case eapSRP4:
PUTCHAR(EAPT_SRP, outp);
PUTCHAR(EAPSRP_LWRECHALLENGE, outp);
challen = MIN_CHALLENGE_LENGTH +
((MAX_CHALLENGE_LENGTH - MIN_CHALLENGE_LENGTH) * drand48());
esp->es_challen = challen;
ptr = esp->es_challenge;
while (--challen >= 0)
*ptr++ = drand48() * 0x100;
BCOPY(esp->es_challenge, outp, esp->es_challen);
INCPTR(esp->es_challen, outp);
break;
#endif /* USE_SRP */
default:
return;
}
outlen = (outp - outpacket_buf) - PPP_HDRLEN;
PUTSHORT(outlen, lenloc);
output(esp->es_unit, outpacket_buf, outlen + PPP_HDRLEN);
esp->es_server.ea_requests++;
if (esp->es_server.ea_timeout > 0)
TIMEOUT(eap_server_timeout, esp, esp->es_server.ea_timeout);
}
/*
* eap_authpeer - Authenticate our peer (behave as server).
*
* Start server state and send first request. This is called only
* after eap_lowerup.
*/
void
eap_authpeer(unit, localname)
int unit;
char *localname;
{
eap_state *esp = &eap_states[unit];
/* Save the name we're given. */
esp->es_server.ea_name = localname;
esp->es_server.ea_namelen = strlen(localname);
esp->es_savedtime = esp->es_server.ea_timeout;
/* Lower layer up yet? */
if (esp->es_server.ea_state == eapInitial ||
esp->es_server.ea_state == eapPending) {
esp->es_server.ea_state = eapPending;
return;
}
esp->es_server.ea_state = eapPending;
/* ID number not updated here intentionally; hashed into M1 */
eap_send_request(esp);
}
/*
* eap_server_timeout - Retransmission timer for sending Requests
* expired.
*/
static void
eap_server_timeout(arg)
void *arg;
{
eap_state *esp = (eap_state *) arg;
if (!eap_server_active(esp))
return;
/* EAP ID number must not change on timeout. */
eap_send_request(esp);
}
/*
* When it's time to send rechallenge the peer, this timeout is
* called. Once the rechallenge is successful, the response handler
* will restart the timer. If it fails, then the link is dropped.
*/
static void
eap_rechallenge(arg)
void *arg;
{
eap_state *esp = (eap_state *)arg;
if (esp->es_server.ea_state != eapOpen &&
esp->es_server.ea_state != eapSRP4)
return;
esp->es_server.ea_requests = 0;
esp->es_server.ea_state = eapIdentify;
eap_figure_next_state(esp, 0);
esp->es_server.ea_id++;
eap_send_request(esp);
}
static void
srp_lwrechallenge(arg)
void *arg;
{
eap_state *esp = (eap_state *)arg;
if (esp->es_server.ea_state != eapOpen ||
esp->es_server.ea_type != EAPT_SRP)
return;
esp->es_server.ea_requests = 0;
esp->es_server.ea_state = eapSRP4;
esp->es_server.ea_id++;
eap_send_request(esp);
}
/*
* eap_lowerup - The lower layer is now up.
*
* This is called before either eap_authpeer or eap_authwithpeer. See
* link_established() in auth.c. All that's necessary here is to
* return to closed state so that those two routines will do the right
* thing.
*/
static void
eap_lowerup(unit)
int unit;
{
eap_state *esp = &eap_states[unit];
/* Discard any (possibly authenticated) peer name. */
if (esp->es_server.ea_peer != NULL &&
esp->es_server.ea_peer != remote_name)
free(esp->es_server.ea_peer);
esp->es_server.ea_peer = NULL;
if (esp->es_client.ea_peer != NULL)
free(esp->es_client.ea_peer);
esp->es_client.ea_peer = NULL;
esp->es_client.ea_state = eapClosed;
esp->es_server.ea_state = eapClosed;
}
/*
* eap_lowerdown - The lower layer is now down.
*
* Cancel all timeouts and return to initial state.
*/
static void
eap_lowerdown(unit)
int unit;
{
eap_state *esp = &eap_states[unit];
if (eap_client_active(esp) && esp->es_client.ea_timeout > 0) {
UNTIMEOUT(eap_client_timeout, (void *)esp);
}
if (eap_server_active(esp)) {
if (esp->es_server.ea_timeout > 0) {
UNTIMEOUT(eap_server_timeout, (void *)esp);
}
} else {
if ((esp->es_server.ea_state == eapOpen ||
esp->es_server.ea_state == eapSRP4) &&
esp->es_rechallenge > 0) {
UNTIMEOUT(eap_rechallenge, (void *)esp);
}
if (esp->es_server.ea_state == eapOpen &&
esp->es_lwrechallenge > 0) {
UNTIMEOUT(srp_lwrechallenge, (void *)esp);
}
}
esp->es_client.ea_state = esp->es_server.ea_state = eapInitial;
esp->es_client.ea_requests = esp->es_server.ea_requests = 0;
}
/*
* eap_protrej - Peer doesn't speak this protocol.
*
* This shouldn't happen. If it does, it represents authentication
* failure.
*/
static void
eap_protrej(unit)
int unit;
{
eap_state *esp = &eap_states[unit];
if (eap_client_active(esp)) {
error("EAP authentication failed due to Protocol-Reject");
auth_withpeer_fail(unit, PPP_EAP);
}
if (eap_server_active(esp)) {
error("EAP authentication of peer failed on Protocol-Reject");
auth_peer_fail(unit, PPP_EAP);
}
eap_lowerdown(unit);
}
/*
* Format and send a regular EAP Response message.
*/
static void
eap_send_response(esp, id, typenum, str, lenstr)
eap_state *esp;
u_char id;
u_char typenum;
u_char *str;
int lenstr;
{
u_char *outp;
int msglen;
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_RESPONSE, outp);
PUTCHAR(id, outp);
esp->es_client.ea_id = id;
msglen = EAP_HEADERLEN + sizeof (u_char) + lenstr;
PUTSHORT(msglen, outp);
PUTCHAR(typenum, outp);
if (lenstr > 0) {
BCOPY(str, outp, lenstr);
}
output(esp->es_unit, outpacket_buf, PPP_HDRLEN + msglen);
}
/*
* Format and send an MD5-Challenge EAP Response message.
*/
static void
eap_chap_response(esp, id, hash, name, namelen)
eap_state *esp;
u_char id;
u_char *hash;
char *name;
int namelen;
{
u_char *outp;
int msglen;
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_RESPONSE, outp);
PUTCHAR(id, outp);
esp->es_client.ea_id = id;
msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + MD5_SIGNATURE_SIZE +
namelen;
PUTSHORT(msglen, outp);
PUTCHAR(EAPT_MD5CHAP, outp);
PUTCHAR(MD5_SIGNATURE_SIZE, outp);
BCOPY(hash, outp, MD5_SIGNATURE_SIZE);
INCPTR(MD5_SIGNATURE_SIZE, outp);
if (namelen > 0) {
BCOPY(name, outp, namelen);
}
output(esp->es_unit, outpacket_buf, PPP_HDRLEN + msglen);
}
#ifdef USE_SRP
/*
* Format and send a SRP EAP Response message.
*/
static void
eap_srp_response(esp, id, subtypenum, str, lenstr)
eap_state *esp;
u_char id;
u_char subtypenum;
u_char *str;
int lenstr;
{
u_char *outp;
int msglen;
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_RESPONSE, outp);
PUTCHAR(id, outp);
esp->es_client.ea_id = id;
msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + lenstr;
PUTSHORT(msglen, outp);
PUTCHAR(EAPT_SRP, outp);
PUTCHAR(subtypenum, outp);
if (lenstr > 0) {
BCOPY(str, outp, lenstr);
}
output(esp->es_unit, outpacket_buf, PPP_HDRLEN + msglen);
}
/*
* Format and send a SRP EAP Client Validator Response message.
*/
static void
eap_srpval_response(esp, id, flags, str)
eap_state *esp;
u_char id;
u_int32_t flags;
u_char *str;
{
u_char *outp;
int msglen;
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_RESPONSE, outp);
PUTCHAR(id, outp);
esp->es_client.ea_id = id;
msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + sizeof (u_int32_t) +
SHA_DIGESTSIZE;
PUTSHORT(msglen, outp);
PUTCHAR(EAPT_SRP, outp);
PUTCHAR(EAPSRP_CVALIDATOR, outp);
PUTLONG(flags, outp);
BCOPY(str, outp, SHA_DIGESTSIZE);
output(esp->es_unit, outpacket_buf, PPP_HDRLEN + msglen);
}
#endif /* USE_SRP */
static void
eap_send_nak(esp, id, type)
eap_state *esp;
u_char id;
u_char type;
{
u_char *outp;
int msglen;
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_RESPONSE, outp);
PUTCHAR(id, outp);
esp->es_client.ea_id = id;
msglen = EAP_HEADERLEN + 2 * sizeof (u_char);
PUTSHORT(msglen, outp);
PUTCHAR(EAPT_NAK, outp);
PUTCHAR(type, outp);
output(esp->es_unit, outpacket_buf, PPP_HDRLEN + msglen);
}
#ifdef USE_SRP
static char *
name_of_pn_file()
{
char *user, *path, *file;
struct passwd *pw;
size_t pl;
static bool pnlogged = 0;
pw = getpwuid(getuid());
if (pw == NULL || (user = pw->pw_dir) == NULL || user[0] == 0) {
errno = EINVAL;
return (NULL);
}
file = _PATH_PSEUDONYM;
pl = strlen(user) + strlen(file) + 2;
path = malloc(pl);
if (path == NULL)
return (NULL);
(void) slprintf(path, pl, "%s/%s", user, file);
if (!pnlogged) {
dbglog("pseudonym file: %s", path);
pnlogged = 1;
}
return (path);
}
static int
open_pn_file(modebits)
mode_t modebits;
{
char *path;
int fd, err;
if ((path = name_of_pn_file()) == NULL)
return (-1);
fd = open(path, modebits, S_IRUSR | S_IWUSR);
err = errno;
free(path);
errno = err;
return (fd);
}
static void
remove_pn_file()
{
char *path;
if ((path = name_of_pn_file()) != NULL) {
(void) unlink(path);
(void) free(path);
}
}
static void
write_pseudonym(esp, inp, len, id)
eap_state *esp;
u_char *inp;
int len, id;
{
u_char val;
u_char *datp, *digp;
SHA1_CTX ctxt;
u_char dig[SHA_DIGESTSIZE];
int dsize, fd, olen = len;
/*
* Do the decoding by working backwards. This eliminates the need
* to save the decoded output in a separate buffer.
*/
val = id;
while (len > 0) {
if ((dsize = len % SHA_DIGESTSIZE) == 0)
dsize = SHA_DIGESTSIZE;
len -= dsize;
datp = inp + len;
SHA1Init(&ctxt);
SHA1Update(&ctxt, &val, 1);
SHA1Update(&ctxt, esp->es_client.ea_skey, SESSION_KEY_LEN);
if (len > 0) {
SHA1Update(&ctxt, datp, SHA_DIGESTSIZE);
} else {
SHA1Update(&ctxt, esp->es_client.ea_name,
esp->es_client.ea_namelen);
}
SHA1Final(dig, &ctxt);
for (digp = dig; digp < dig + SHA_DIGESTSIZE; digp++)
*datp++ ^= *digp;
}
/* Now check that the result is sane */
if (olen <= 0 || *inp + 1 > olen) {
dbglog("EAP: decoded pseudonym is unusable <%.*B>", olen, inp);
return;
}
/* Save it away */
fd = open_pn_file(O_WRONLY | O_CREAT | O_TRUNC);
if (fd < 0) {
dbglog("EAP: error saving pseudonym: %m");
return;
}
len = write(fd, inp + 1, *inp);
if (close(fd) != -1 && len == *inp) {
dbglog("EAP: saved pseudonym");
esp->es_usedpseudo = 0;
} else {
dbglog("EAP: failed to save pseudonym");
remove_pn_file();
}
}
#endif /* USE_SRP */
/*
* eap_request - Receive EAP Request message (client mode).
*/
static void
eap_request(esp, inp, id, len)
eap_state *esp;
u_char *inp;
int id;
int len;
{
u_char typenum;
u_char vallen;
int secret_len;
char secret[MAXWORDLEN];
char rhostname[256];
MD5_CTX mdContext;
u_char hash[MD5_SIGNATURE_SIZE];
#ifdef USE_SRP
struct t_client *tc;
struct t_num sval, gval, Nval, *Ap, Bval;
u_char vals[2];
SHA1_CTX ctxt;
u_char dig[SHA_DIGESTSIZE];
int fd;
#endif /* USE_SRP */
/*
* Note: we update es_client.ea_id *only if* a Response
* message is being generated. Otherwise, we leave it the
* same for duplicate detection purposes.
*/
esp->es_client.ea_requests++;
if (esp->es_client.ea_maxrequests != 0 &&
esp->es_client.ea_requests > esp->es_client.ea_maxrequests) {
info("EAP: received too many Request messages");
if (esp->es_client.ea_timeout > 0) {
UNTIMEOUT(eap_client_timeout, (void *)esp);
}
auth_withpeer_fail(esp->es_unit, PPP_EAP);
return;
}
if (len <= 0) {
error("EAP: empty Request message discarded");
return;
}
GETCHAR(typenum, inp);
len--;
switch (typenum) {
case EAPT_IDENTITY:
if (len > 0)
info("EAP: Identity prompt \"%.*q\"", len, inp);
#ifdef USE_SRP
if (esp->es_usepseudo &&
(esp->es_usedpseudo == 0 ||
(esp->es_usedpseudo == 1 &&
id == esp->es_client.ea_id))) {
esp->es_usedpseudo = 1;
/* Try to get a pseudonym */
if ((fd = open_pn_file(O_RDONLY)) >= 0) {
strcpy(rhostname, SRP_PSEUDO_ID);
len = read(fd, rhostname + SRP_PSEUDO_LEN,
sizeof (rhostname) - SRP_PSEUDO_LEN);
/* XXX NAI unsupported */
if (len > 0) {
eap_send_response(esp, id, typenum,
rhostname, len + SRP_PSEUDO_LEN);
}
(void) close(fd);
if (len > 0)
break;
}
}
/* Stop using pseudonym now. */
if (esp->es_usepseudo && esp->es_usedpseudo != 2) {
remove_pn_file();
esp->es_usedpseudo = 2;
}
#endif /* USE_SRP */
eap_send_response(esp, id, typenum, esp->es_client.ea_name,
esp->es_client.ea_namelen);
break;
case EAPT_NOTIFICATION:
if (len > 0)
info("EAP: Notification \"%.*q\"", len, inp);
eap_send_response(esp, id, typenum, NULL, 0);
break;
case EAPT_NAK:
/*
* Avoid the temptation to send Response Nak in reply
* to Request Nak here. It can only lead to trouble.
*/
warn("EAP: unexpected Nak in Request; ignored");
/* Return because we're waiting for something real. */
return;
case EAPT_MD5CHAP:
if (len < 1) {
error("EAP: received MD5-Challenge with no data");
/* Bogus request; wait for something real. */
return;
}
GETCHAR(vallen, inp);
len--;
if (vallen < 8 || vallen > len) {
error("EAP: MD5-Challenge with bad length %d (8..%d)",
vallen, len);
/* Try something better. */
eap_send_nak(esp, id, EAPT_SRP);
break;
}
/* Not so likely to happen. */
if (len - vallen >= sizeof (rhostname)) {
dbglog("EAP: trimming really long peer name down");
BCOPY(inp + vallen, rhostname, sizeof (rhostname) - 1);
rhostname[sizeof (rhostname) - 1] = '\0';
} else {
BCOPY(inp + vallen, rhostname, len - vallen);
rhostname[len - vallen] = '\0';
}
/* In case the remote doesn't give us his name. */
if (explicit_remote ||
(remote_name[0] != '\0' && vallen == len))
strlcpy(rhostname, remote_name, sizeof (rhostname));
/*
* Get the secret for authenticating ourselves with
* the specified host.
*/
if (!get_secret(esp->es_unit, esp->es_client.ea_name,
rhostname, secret, &secret_len, 0)) {
dbglog("EAP: no MD5 secret for auth to %q", rhostname);
eap_send_nak(esp, id, EAPT_SRP);
break;
}
MD5_Init(&mdContext);
typenum = id;
MD5_Update(&mdContext, &typenum, 1);
MD5_Update(&mdContext, (u_char *)secret, secret_len);
BZERO(secret, sizeof (secret));
MD5_Update(&mdContext, inp, vallen);
MD5_Final(hash, &mdContext);
eap_chap_response(esp, id, hash, esp->es_client.ea_name,
esp->es_client.ea_namelen);
break;
#ifdef USE_SRP
case EAPT_SRP:
if (len < 1) {
error("EAP: received empty SRP Request");
/* Bogus request; wait for something real. */
return;
}
/* Get subtype */
GETCHAR(vallen, inp);
len--;
switch (vallen) {
case EAPSRP_CHALLENGE:
tc = NULL;
if (esp->es_client.ea_session != NULL) {
tc = (struct t_client *)esp->es_client.
ea_session;
/*
* If this is a new challenge, then start
* over with a new client session context.
* Otherwise, just resend last response.
*/
if (id != esp->es_client.ea_id) {
t_clientclose(tc);
esp->es_client.ea_session = NULL;
tc = NULL;
}
}
/* No session key just yet */
esp->es_client.ea_skey = NULL;
if (tc == NULL) {
GETCHAR(vallen, inp);
len--;
if (vallen >= len) {
error("EAP: badly-formed SRP Challenge"
" (name)");
/* Ignore badly-formed messages */
return;
}
BCOPY(inp, rhostname, vallen);
rhostname[vallen] = '\0';
INCPTR(vallen, inp);
len -= vallen;
/*
* In case the remote doesn't give us his name,
* use configured name.
*/
if (explicit_remote ||
(remote_name[0] != '\0' && vallen == 0)) {
strlcpy(rhostname, remote_name,
sizeof (rhostname));
}
if (esp->es_client.ea_peer != NULL)
free(esp->es_client.ea_peer);
esp->es_client.ea_peer = strdup(rhostname);
esp->es_client.ea_peerlen = strlen(rhostname);
GETCHAR(vallen, inp);
len--;
if (vallen >= len) {
error("EAP: badly-formed SRP Challenge"
" (s)");
/* Ignore badly-formed messages */
return;
}
sval.data = inp;
sval.len = vallen;
INCPTR(vallen, inp);
len -= vallen;
GETCHAR(vallen, inp);
len--;
if (vallen > len) {
error("EAP: badly-formed SRP Challenge"
" (g)");
/* Ignore badly-formed messages */
return;
}
/* If no generator present, then use value 2 */
if (vallen == 0) {
gval.data = (u_char *)"\002";
gval.len = 1;
} else {
gval.data = inp;
gval.len = vallen;
}
INCPTR(vallen, inp);
len -= vallen;
/*
* If no modulus present, then use well-known
* value.
*/
if (len == 0) {
Nval.data = (u_char *)wkmodulus;
Nval.len = sizeof (wkmodulus);
} else {
Nval.data = inp;
Nval.len = len;
}
tc = t_clientopen(esp->es_client.ea_name,
&Nval, &gval, &sval);
if (tc == NULL) {
eap_send_nak(esp, id, EAPT_MD5CHAP);
break;
}
esp->es_client.ea_session = (void *)tc;
/* Add Challenge ID & type to verifier */
vals[0] = id;
vals[1] = EAPT_SRP;
t_clientaddexdata(tc, vals, 2);
}
Ap = t_clientgenexp(tc);
eap_srp_response(esp, id, EAPSRP_CKEY, Ap->data,
Ap->len);
break;
case EAPSRP_SKEY:
tc = (struct t_client *)esp->es_client.ea_session;
if (tc == NULL) {
warn("EAP: peer sent Subtype 2 without 1");
eap_send_nak(esp, id, EAPT_MD5CHAP);
break;
}
if (esp->es_client.ea_skey != NULL) {
/*
* ID number should not change here. Warn
* if it does (but otherwise ignore).
*/
if (id != esp->es_client.ea_id) {
warn("EAP: ID changed from %d to %d "
"in SRP Subtype 2 rexmit",
esp->es_client.ea_id, id);
}
} else {
if (get_srp_secret(esp->es_unit,
esp->es_client.ea_name,
esp->es_client.ea_peer, secret, 0) == 0) {
/*
* Can't work with this peer because
* the secret is missing. Just give
* up.
*/
eap_send_nak(esp, id, EAPT_MD5CHAP);
break;
}
Bval.data = inp;
Bval.len = len;
t_clientpasswd(tc, secret);
BZERO(secret, sizeof (secret));
esp->es_client.ea_skey =
t_clientgetkey(tc, &Bval);
if (esp->es_client.ea_skey == NULL) {
/* Server is rogue; stop now */
error("EAP: SRP server is rogue");
goto client_failure;
}
}
eap_srpval_response(esp, id, SRPVAL_EBIT,
t_clientresponse(tc));
break;
case EAPSRP_SVALIDATOR:
tc = (struct t_client *)esp->es_client.ea_session;
if (tc == NULL || esp->es_client.ea_skey == NULL) {
warn("EAP: peer sent Subtype 3 without 1/2");
eap_send_nak(esp, id, EAPT_MD5CHAP);
break;
}
/*
* If we're already open, then this ought to be a
* duplicate. Otherwise, check that the server is
* who we think it is.
*/
if (esp->es_client.ea_state == eapOpen) {
if (id != esp->es_client.ea_id) {
warn("EAP: ID changed from %d to %d "
"in SRP Subtype 3 rexmit",
esp->es_client.ea_id, id);
}
} else {
len -= sizeof (u_int32_t) + SHA_DIGESTSIZE;
if (len < 0 || t_clientverify(tc, inp +
sizeof (u_int32_t)) != 0) {
error("EAP: SRP server verification "
"failed");
goto client_failure;
}
GETLONG(esp->es_client.ea_keyflags, inp);
/* Save pseudonym if user wants it. */
if (len > 0 && esp->es_usepseudo) {
INCPTR(SHA_DIGESTSIZE, inp);
write_pseudonym(esp, inp, len, id);
}
}
/*
* We've verified our peer. We're now mostly done,
* except for waiting on the regular EAP Success
* message.
*/
eap_srp_response(esp, id, EAPSRP_ACK, NULL, 0);
break;
case EAPSRP_LWRECHALLENGE:
if (len < 4) {
warn("EAP: malformed Lightweight rechallenge");
return;
}
SHA1Init(&ctxt);
vals[0] = id;
SHA1Update(&ctxt, vals, 1);
SHA1Update(&ctxt, esp->es_client.ea_skey,
SESSION_KEY_LEN);
SHA1Update(&ctxt, inp, len);
SHA1Update(&ctxt, esp->es_client.ea_name,
esp->es_client.ea_namelen);
SHA1Final(dig, &ctxt);
eap_srp_response(esp, id, EAPSRP_LWRECHALLENGE, dig,
SHA_DIGESTSIZE);
break;
default:
error("EAP: unknown SRP Subtype %d", vallen);
eap_send_nak(esp, id, EAPT_MD5CHAP);
break;
}
break;
#endif /* USE_SRP */
default:
info("EAP: unknown authentication type %d; Naking", typenum);
eap_send_nak(esp, id, EAPT_SRP);
break;
}
if (esp->es_client.ea_timeout > 0) {
UNTIMEOUT(eap_client_timeout, (void *)esp);
TIMEOUT(eap_client_timeout, (void *)esp,
esp->es_client.ea_timeout);
}
return;
#ifdef USE_SRP
client_failure:
esp->es_client.ea_state = eapBadAuth;
if (esp->es_client.ea_timeout > 0) {
UNTIMEOUT(eap_client_timeout, (void *)esp);
}
esp->es_client.ea_session = NULL;
t_clientclose(tc);
auth_withpeer_fail(esp->es_unit, PPP_EAP);
#endif /* USE_SRP */
}
/*
* eap_response - Receive EAP Response message (server mode).
*/
static void
eap_response(esp, inp, id, len)
eap_state *esp;
u_char *inp;
int id;
int len;
{
u_char typenum;
u_char vallen;
int secret_len;
char secret[MAXSECRETLEN];
char rhostname[256];
MD5_CTX mdContext;
u_char hash[MD5_SIGNATURE_SIZE];
#ifdef USE_SRP
struct t_server *ts;
struct t_num A;
SHA1_CTX ctxt;
u_char dig[SHA_DIGESTSIZE];
#endif /* USE_SRP */
if (esp->es_server.ea_id != id) {
dbglog("EAP: discarding Response %d; expected ID %d", id,
esp->es_server.ea_id);
return;
}
esp->es_server.ea_responses++;
if (len <= 0) {
error("EAP: empty Response message discarded");
return;
}
GETCHAR(typenum, inp);
len--;
switch (typenum) {
case EAPT_IDENTITY:
if (esp->es_server.ea_state != eapIdentify) {
dbglog("EAP discarding unwanted Identify \"%.q\"", len,
inp);
break;
}
info("EAP: unauthenticated peer name \"%.*q\"", len, inp);
if (esp->es_server.ea_peer != NULL &&
esp->es_server.ea_peer != remote_name)
free(esp->es_server.ea_peer);
esp->es_server.ea_peer = malloc(len + 1);
if (esp->es_server.ea_peer == NULL) {
esp->es_server.ea_peerlen = 0;
eap_figure_next_state(esp, 1);
break;
}
BCOPY(inp, esp->es_server.ea_peer, len);
esp->es_server.ea_peer[len] = '\0';
esp->es_server.ea_peerlen = len;
eap_figure_next_state(esp, 0);
break;
case EAPT_NOTIFICATION:
dbglog("EAP unexpected Notification; response discarded");
break;
case EAPT_NAK:
if (len < 1) {
info("EAP: Nak Response with no suggested protocol");
eap_figure_next_state(esp, 1);
break;
}
GETCHAR(vallen, inp);
len--;
if (!explicit_remote && esp->es_server.ea_state == eapIdentify){
/* Peer cannot Nak Identify Request */
eap_figure_next_state(esp, 1);
break;
}
switch (vallen) {
case EAPT_SRP:
/* Run through SRP validator selection again. */
esp->es_server.ea_state = eapIdentify;
eap_figure_next_state(esp, 0);
break;
case EAPT_MD5CHAP:
esp->es_server.ea_state = eapMD5Chall;
break;
default:
dbglog("EAP: peer requesting unknown Type %d", vallen);
switch (esp->es_server.ea_state) {
case eapSRP1:
case eapSRP2:
case eapSRP3:
esp->es_server.ea_state = eapMD5Chall;
break;
case eapMD5Chall:
case eapSRP4:
esp->es_server.ea_state = eapIdentify;
eap_figure_next_state(esp, 0);
break;
default:
break;
}
break;
}
break;
case EAPT_MD5CHAP:
if (esp->es_server.ea_state != eapMD5Chall) {
error("EAP: unexpected MD5-Response");
eap_figure_next_state(esp, 1);
break;
}
if (len < 1) {
error("EAP: received MD5-Response with no data");
eap_figure_next_state(esp, 1);
break;
}
GETCHAR(vallen, inp);
len--;
if (vallen != 16 || vallen > len) {
error("EAP: MD5-Response with bad length %d", vallen);
eap_figure_next_state(esp, 1);
break;
}
/* Not so likely to happen. */
if (len - vallen >= sizeof (rhostname)) {
dbglog("EAP: trimming really long peer name down");
BCOPY(inp + vallen, rhostname, sizeof (rhostname) - 1);
rhostname[sizeof (rhostname) - 1] = '\0';
} else {
BCOPY(inp + vallen, rhostname, len - vallen);
rhostname[len - vallen] = '\0';
}
/* In case the remote doesn't give us his name. */
if (explicit_remote ||
(remote_name[0] != '\0' && vallen == len))
strlcpy(rhostname, remote_name, sizeof (rhostname));
/*
* Get the secret for authenticating the specified
* host.
*/
if (!get_secret(esp->es_unit, rhostname,
esp->es_server.ea_name, secret, &secret_len, 1)) {
dbglog("EAP: no MD5 secret for auth of %q", rhostname);
eap_send_failure(esp);
break;
}
MD5_Init(&mdContext);
MD5_Update(&mdContext, &esp->es_server.ea_id, 1);
MD5_Update(&mdContext, (u_char *)secret, secret_len);
BZERO(secret, sizeof (secret));
MD5_Update(&mdContext, esp->es_challenge, esp->es_challen);
MD5_Final(hash, &mdContext);
if (BCMP(hash, inp, MD5_SIGNATURE_SIZE) != 0) {
eap_send_failure(esp);
break;
}
esp->es_server.ea_type = EAPT_MD5CHAP;
eap_send_success(esp);
eap_figure_next_state(esp, 0);
if (esp->es_rechallenge != 0)
TIMEOUT(eap_rechallenge, esp, esp->es_rechallenge);
break;
#ifdef USE_SRP
case EAPT_SRP:
if (len < 1) {
error("EAP: empty SRP Response");
eap_figure_next_state(esp, 1);
break;
}
GETCHAR(typenum, inp);
len--;
switch (typenum) {
case EAPSRP_CKEY:
if (esp->es_server.ea_state != eapSRP1) {
error("EAP: unexpected SRP Subtype 1 Response");
eap_figure_next_state(esp, 1);
break;
}
A.data = inp;
A.len = len;
ts = (struct t_server *)esp->es_server.ea_session;
assert(ts != NULL);
esp->es_server.ea_skey = t_servergetkey(ts, &A);
if (esp->es_server.ea_skey == NULL) {
/* Client's A value is bogus; terminate now */
error("EAP: bogus A value from client");
eap_send_failure(esp);
} else {
eap_figure_next_state(esp, 0);
}
break;
case EAPSRP_CVALIDATOR:
if (esp->es_server.ea_state != eapSRP2) {
error("EAP: unexpected SRP Subtype 2 Response");
eap_figure_next_state(esp, 1);
break;
}
if (len < sizeof (u_int32_t) + SHA_DIGESTSIZE) {
error("EAP: M1 length %d < %d", len,
sizeof (u_int32_t) + SHA_DIGESTSIZE);
eap_figure_next_state(esp, 1);
break;
}
GETLONG(esp->es_server.ea_keyflags, inp);
ts = (struct t_server *)esp->es_server.ea_session;
assert(ts != NULL);
if (t_serververify(ts, inp)) {
info("EAP: unable to validate client identity");
eap_send_failure(esp);
break;
}
eap_figure_next_state(esp, 0);
break;
case EAPSRP_ACK:
if (esp->es_server.ea_state != eapSRP3) {
error("EAP: unexpected SRP Subtype 3 Response");
eap_send_failure(esp);
break;
}
esp->es_server.ea_type = EAPT_SRP;
eap_send_success(esp);
eap_figure_next_state(esp, 0);
if (esp->es_rechallenge != 0)
TIMEOUT(eap_rechallenge, esp,
esp->es_rechallenge);
if (esp->es_lwrechallenge != 0)
TIMEOUT(srp_lwrechallenge, esp,
esp->es_lwrechallenge);
break;
case EAPSRP_LWRECHALLENGE:
if (esp->es_server.ea_state != eapSRP4) {
info("EAP: unexpected SRP Subtype 4 Response");
return;
}
if (len != SHA_DIGESTSIZE) {
error("EAP: bad Lightweight rechallenge "
"response");
return;
}
SHA1Init(&ctxt);
vallen = id;
SHA1Update(&ctxt, &vallen, 1);
SHA1Update(&ctxt, esp->es_server.ea_skey,
SESSION_KEY_LEN);
SHA1Update(&ctxt, esp->es_challenge, esp->es_challen);
SHA1Update(&ctxt, esp->es_server.ea_peer,
esp->es_server.ea_peerlen);
SHA1Final(dig, &ctxt);
if (BCMP(dig, inp, SHA_DIGESTSIZE) != 0) {
error("EAP: failed Lightweight rechallenge");
eap_send_failure(esp);
break;
}
esp->es_server.ea_state = eapOpen;
if (esp->es_lwrechallenge != 0)
TIMEOUT(srp_lwrechallenge, esp,
esp->es_lwrechallenge);
break;
}
break;
#endif /* USE_SRP */
default:
/* This can't happen. */
error("EAP: unknown Response type %d; ignored", typenum);
return;
}
if (esp->es_server.ea_timeout > 0) {
UNTIMEOUT(eap_server_timeout, (void *)esp);
}
if (esp->es_server.ea_state != eapBadAuth &&
esp->es_server.ea_state != eapOpen) {
esp->es_server.ea_id++;
eap_send_request(esp);
}
}
/*
* eap_success - Receive EAP Success message (client mode).
*/
static void
eap_success(esp, inp, id, len)
eap_state *esp;
u_char *inp;
int id;
int len;
{
if (esp->es_client.ea_state != eapOpen && !eap_client_active(esp)) {
dbglog("EAP unexpected success message in state %s (%d)",
eap_state_name(esp->es_client.ea_state),
esp->es_client.ea_state);
return;
}
if (esp->es_client.ea_timeout > 0) {
UNTIMEOUT(eap_client_timeout, (void *)esp);
}
if (len > 0) {
/* This is odd. The spec doesn't allow for this. */
PRINTMSG(inp, len);
}
esp->es_client.ea_state = eapOpen;
auth_withpeer_success(esp->es_unit, PPP_EAP, 0);
}
/*
* eap_failure - Receive EAP Failure message (client mode).
*/
static void
eap_failure(esp, inp, id, len)
eap_state *esp;
u_char *inp;
int id;
int len;
{
if (!eap_client_active(esp)) {
dbglog("EAP unexpected failure message in state %s (%d)",
eap_state_name(esp->es_client.ea_state),
esp->es_client.ea_state);
}
if (esp->es_client.ea_timeout > 0) {
UNTIMEOUT(eap_client_timeout, (void *)esp);
}
if (len > 0) {
/* This is odd. The spec doesn't allow for this. */
PRINTMSG(inp, len);
}
esp->es_client.ea_state = eapBadAuth;
error("EAP: peer reports authentication failure");
auth_withpeer_fail(esp->es_unit, PPP_EAP);
}
/*
* eap_input - Handle received EAP message.
*/
static void
eap_input(unit, inp, inlen)
int unit;
u_char *inp;
int inlen;
{
eap_state *esp = &eap_states[unit];
u_char code, id;
int len;
/*
* Parse header (code, id and length). If packet too short,
* drop it.
*/
if (inlen < EAP_HEADERLEN) {
error("EAP: packet too short: %d < %d", inlen, EAP_HEADERLEN);
return;
}
GETCHAR(code, inp);
GETCHAR(id, inp);
GETSHORT(len, inp);
if (len < EAP_HEADERLEN || len > inlen) {
error("EAP: packet has illegal length field %d (%d..%d)", len,
EAP_HEADERLEN, inlen);
return;
}
len -= EAP_HEADERLEN;
/* Dispatch based on message code */
switch (code) {
case EAP_REQUEST:
eap_request(esp, inp, id, len);
break;
case EAP_RESPONSE:
eap_response(esp, inp, id, len);
break;
case EAP_SUCCESS:
eap_success(esp, inp, id, len);
break;
case EAP_FAILURE:
eap_failure(esp, inp, id, len);
break;
default: /* XXX Need code reject */
/* Note: it's not legal to send EAP Nak here. */
warn("EAP: unknown code %d received", code);
break;
}
}
/*
* eap_printpkt - print the contents of an EAP packet.
*/
static char *eap_codenames[] = {
"Request", "Response", "Success", "Failure"
};
static char *eap_typenames[] = {
"Identity", "Notification", "Nak", "MD5-Challenge",
"OTP", "Generic-Token", NULL, NULL,
"RSA", "DSS", "KEA", "KEA-Validate",
"TLS", "Defender", "Windows 2000", "Arcot",
"Cisco", "Nokia", "SRP"
};
static int
eap_printpkt(inp, inlen, printer, arg)
u_char *inp;
int inlen;
void (*printer) __P((void *, char *, ...));
void *arg;
{
int code, id, len, rtype, vallen;
u_char *pstart;
u_int32_t uval;
if (inlen < EAP_HEADERLEN)
return (0);
pstart = inp;
GETCHAR(code, inp);
GETCHAR(id, inp);
GETSHORT(len, inp);
if (len < EAP_HEADERLEN || len > inlen)
return (0);
if (code >= 1 && code <= sizeof(eap_codenames) / sizeof(char *))
printer(arg, " %s", eap_codenames[code-1]);
else
printer(arg, " code=0x%x", code);
printer(arg, " id=0x%x", id);
len -= EAP_HEADERLEN;
switch (code) {
case EAP_REQUEST:
if (len < 1) {
printer(arg, " <missing type>");
break;
}
GETCHAR(rtype, inp);
len--;
if (rtype >= 1 &&
rtype <= sizeof (eap_typenames) / sizeof (char *))
printer(arg, " %s", eap_typenames[rtype-1]);
else
printer(arg, " type=0x%x", rtype);
switch (rtype) {
case EAPT_IDENTITY:
case EAPT_NOTIFICATION:
if (len > 0) {
printer(arg, " <Message ");
print_string((char *)inp, len, printer, arg);
printer(arg, ">");
INCPTR(len, inp);
len = 0;
} else {
printer(arg, " <No message>");
}
break;
case EAPT_MD5CHAP:
if (len <= 0)
break;
GETCHAR(vallen, inp);
len--;
if (vallen > len)
goto truncated;
printer(arg, " <Value%.*B>", vallen, inp);
INCPTR(vallen, inp);
len -= vallen;
if (len > 0) {
printer(arg, " <Name ");
print_string((char *)inp, len, printer, arg);
printer(arg, ">");
INCPTR(len, inp);
len = 0;
} else {
printer(arg, " <No name>");
}
break;
case EAPT_SRP:
if (len < 3)
goto truncated;
GETCHAR(vallen, inp);
len--;
printer(arg, "-%d", vallen);
switch (vallen) {
case EAPSRP_CHALLENGE:
GETCHAR(vallen, inp);
len--;
if (vallen >= len)
goto truncated;
if (vallen > 0) {
printer(arg, " <Name ");
print_string((char *)inp, vallen, printer,
arg);
printer(arg, ">");
} else {
printer(arg, " <No name>");
}
INCPTR(vallen, inp);
len -= vallen;
GETCHAR(vallen, inp);
len--;
if (vallen >= len)
goto truncated;
printer(arg, " <s%.*B>", vallen, inp);
INCPTR(vallen, inp);
len -= vallen;
GETCHAR(vallen, inp);
len--;
if (vallen > len)
goto truncated;
if (vallen == 0) {
printer(arg, " <Default g=2>");
} else {
printer(arg, " <g%.*B>", vallen, inp);
}
INCPTR(vallen, inp);
len -= vallen;
if (len == 0) {
printer(arg, " <Default N>");
} else {
printer(arg, " <N%.*B>", len, inp);
INCPTR(len, inp);
len = 0;
}
break;
case EAPSRP_SKEY:
printer(arg, " <B%.*B>", len, inp);
INCPTR(len, inp);
len = 0;
break;
case EAPSRP_SVALIDATOR:
if (len < sizeof (u_int32_t))
break;
GETLONG(uval, inp);
len -= sizeof (u_int32_t);
if (uval & SRPVAL_EBIT) {
printer(arg, " E");
uval &= ~SRPVAL_EBIT;
}
if (uval != 0) {
printer(arg, " f<%X>", uval);
}
if ((vallen = len) > SHA_DIGESTSIZE)
vallen = SHA_DIGESTSIZE;
printer(arg, " <M2%.*B%s>", len, inp,
len < SHA_DIGESTSIZE ? "?" : "");
INCPTR(vallen, inp);
len -= vallen;
if (len > 0) {
printer(arg, " <PN%.*B>", len, inp);
INCPTR(len, inp);
len = 0;
}
break;
case EAPSRP_LWRECHALLENGE:
printer(arg, " <Challenge%.*B>", len, inp);
INCPTR(len, inp);
len = 0;
break;
}
break;
}
break;
case EAP_RESPONSE:
if (len < 1)
break;
GETCHAR(rtype, inp);
len--;
if (rtype >= 1 &&
rtype <= sizeof (eap_typenames) / sizeof (char *))
printer(arg, " %s", eap_typenames[rtype-1]);
else
printer(arg, " type=0x%x", rtype);
switch (rtype) {
case EAPT_IDENTITY:
if (len > 0) {
printer(arg, " <Name ");
print_string((char *)inp, len, printer, arg);
printer(arg, ">");
INCPTR(len, inp);
len = 0;
}
break;
case EAPT_NAK:
if (len <= 0) {
printer(arg, " <missing hint>");
break;
}
GETCHAR(rtype, inp);
len--;
printer(arg, " <Suggested-type %02X", rtype);
if (rtype >= 1 &&
rtype < sizeof (eap_typenames) / sizeof (char *))
printer(arg, " (%s)", eap_typenames[rtype-1]);
printer(arg, ">");
break;
case EAPT_MD5CHAP:
if (len <= 0) {
printer(arg, " <missing length>");
break;
}
GETCHAR(vallen, inp);
len--;
if (vallen > len)
goto truncated;
printer(arg, " <Value%.*B>", vallen, inp);
INCPTR(vallen, inp);
len -= vallen;
if (len > 0) {
printer(arg, " <Name ");
print_string((char *)inp, len, printer, arg);
printer(arg, ">");
INCPTR(len, inp);
len = 0;
} else {
printer(arg, " <No name>");
}
break;
case EAPT_SRP:
if (len < 1)
goto truncated;
GETCHAR(vallen, inp);
len--;
printer(arg, "-%d", vallen);
switch (vallen) {
case EAPSRP_CKEY:
printer(arg, " <A%.*B>", len, inp);
INCPTR(len, inp);
len = 0;
break;
case EAPSRP_CVALIDATOR:
if (len < sizeof (u_int32_t))
break;
GETLONG(uval, inp);
len -= sizeof (u_int32_t);
if (uval & SRPVAL_EBIT) {
printer(arg, " E");
uval &= ~SRPVAL_EBIT;
}
if (uval != 0) {
printer(arg, " f<%X>", uval);
}
printer(arg, " <M1%.*B%s>", len, inp,
len == SHA_DIGESTSIZE ? "" : "?");
INCPTR(len, inp);
len = 0;
break;
case EAPSRP_ACK:
break;
case EAPSRP_LWRECHALLENGE:
printer(arg, " <Response%.*B%s>", len, inp,
len == SHA_DIGESTSIZE ? "" : "?");
if ((vallen = len) > SHA_DIGESTSIZE)
vallen = SHA_DIGESTSIZE;
INCPTR(vallen, inp);
len -= vallen;
break;
}
break;
}
break;
case EAP_SUCCESS: /* No payload expected for these! */
case EAP_FAILURE:
break;
truncated:
printer(arg, " <truncated>");
break;
}
if (len > 8)
printer(arg, "%8B...", inp);
else if (len > 0)
printer(arg, "%.*B", len, inp);
INCPTR(len, inp);
return (inp - pstart);
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_4658_0 |
crossvul-cpp_data_bad_3862_0 | /*
* Copyright (c) 2018 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
/** @file mqtt_decoder.c
*
* @brief Decoder functions needed for decoding packets received from the
* broker.
*/
#include <logging/log.h>
LOG_MODULE_REGISTER(net_mqtt_dec, CONFIG_MQTT_LOG_LEVEL);
#include "mqtt_internal.h"
#include "mqtt_os.h"
/**
* @brief Unpacks unsigned 8 bit value from the buffer from the offset
* requested.
*
* @param[inout] buf A pointer to the buf_ctx structure containing current
* buffer position.
* @param[out] val Memory where the value is to be unpacked.
*
* @retval 0 if the procedure is successful.
* @retval -EINVAL if the buffer would be exceeded during the read
*/
static int unpack_uint8(struct buf_ctx *buf, u8_t *val)
{
MQTT_TRC(">> cur:%p, end:%p", buf->cur, buf->end);
if ((buf->end - buf->cur) < sizeof(u8_t)) {
return -EINVAL;
}
*val = *(buf->cur++);
MQTT_TRC("<< val:%02x", *val);
return 0;
}
/**
* @brief Unpacks unsigned 16 bit value from the buffer from the offset
* requested.
*
* @param[inout] buf A pointer to the buf_ctx structure containing current
* buffer position.
* @param[out] val Memory where the value is to be unpacked.
*
* @retval 0 if the procedure is successful.
* @retval -EINVAL if the buffer would be exceeded during the read
*/
static int unpack_uint16(struct buf_ctx *buf, u16_t *val)
{
MQTT_TRC(">> cur:%p, end:%p", buf->cur, buf->end);
if ((buf->end - buf->cur) < sizeof(u16_t)) {
return -EINVAL;
}
*val = *(buf->cur++) << 8; /* MSB */
*val |= *(buf->cur++); /* LSB */
MQTT_TRC("<< val:%04x", *val);
return 0;
}
/**
* @brief Unpacks utf8 string from the buffer from the offset requested.
*
* @param[inout] buf A pointer to the buf_ctx structure containing current
* buffer position.
* @param[out] str Pointer to a string that will hold the string location
* in the buffer.
*
* @retval 0 if the procedure is successful.
* @retval -EINVAL if the buffer would be exceeded during the read
*/
static int unpack_utf8_str(struct buf_ctx *buf, struct mqtt_utf8 *str)
{
u16_t utf8_strlen;
int err_code;
MQTT_TRC(">> cur:%p, end:%p", buf->cur, buf->end);
err_code = unpack_uint16(buf, &utf8_strlen);
if (err_code != 0) {
return err_code;
}
if ((buf->end - buf->cur) < utf8_strlen) {
return -EINVAL;
}
str->size = utf8_strlen;
/* Zero length UTF8 strings permitted. */
if (utf8_strlen) {
/* Point to right location in buffer. */
str->utf8 = buf->cur;
buf->cur += utf8_strlen;
} else {
str->utf8 = NULL;
}
MQTT_TRC("<< str_size:%08x", (u32_t)GET_UT8STR_BUFFER_SIZE(str));
return 0;
}
/**
* @brief Unpacks binary string from the buffer from the offset requested.
*
* @param[in] length Binary string length.
* @param[inout] buf A pointer to the buf_ctx structure containing current
* buffer position.
* @param[out] str Pointer to a binary string that will hold the binary string
* location in the buffer.
*
* @retval 0 if the procedure is successful.
* @retval -EINVAL if the buffer would be exceeded during the read
*/
static int unpack_data(u32_t length, struct buf_ctx *buf,
struct mqtt_binstr *str)
{
MQTT_TRC(">> cur:%p, end:%p", buf->cur, buf->end);
if ((buf->end - buf->cur) < length) {
return -EINVAL;
}
str->len = length;
/* Zero length binary strings are permitted. */
if (length > 0) {
str->data = buf->cur;
buf->cur += length;
} else {
str->data = NULL;
}
MQTT_TRC("<< bin len:%08x", GET_BINSTR_BUFFER_SIZE(str));
return 0;
}
/**@brief Decode MQTT Packet Length in the MQTT fixed header.
*
* @param[inout] buf A pointer to the buf_ctx structure containing current
* buffer position.
* @param[out] length Length of variable header and payload in the
* MQTT message.
*
* @retval 0 if the procedure is successful.
* @retval -EINVAL if the length decoding would use more that 4 bytes.
* @retval -EAGAIN if the buffer would be exceeded during the read.
*/
static int packet_length_decode(struct buf_ctx *buf, u32_t *length)
{
u8_t shift = 0U;
u8_t bytes = 0U;
*length = 0U;
do {
if (bytes >= MQTT_MAX_LENGTH_BYTES) {
return -EINVAL;
}
if (buf->cur >= buf->end) {
return -EAGAIN;
}
*length += ((u32_t)*(buf->cur) & MQTT_LENGTH_VALUE_MASK)
<< shift;
shift += MQTT_LENGTH_SHIFT;
bytes++;
} while ((*(buf->cur++) & MQTT_LENGTH_CONTINUATION_BIT) != 0U);
if (*length > MQTT_MAX_PAYLOAD_SIZE) {
return -EINVAL;
}
MQTT_TRC("length:0x%08x", *length);
return 0;
}
int fixed_header_decode(struct buf_ctx *buf, u8_t *type_and_flags,
u32_t *length)
{
int err_code;
err_code = unpack_uint8(buf, type_and_flags);
if (err_code != 0) {
return err_code;
}
return packet_length_decode(buf, length);
}
int connect_ack_decode(const struct mqtt_client *client, struct buf_ctx *buf,
struct mqtt_connack_param *param)
{
int err_code;
u8_t flags, ret_code;
err_code = unpack_uint8(buf, &flags);
if (err_code != 0) {
return err_code;
}
err_code = unpack_uint8(buf, &ret_code);
if (err_code != 0) {
return err_code;
}
if (client->protocol_version == MQTT_VERSION_3_1_1) {
param->session_present_flag =
flags & MQTT_CONNACK_FLAG_SESSION_PRESENT;
MQTT_TRC("[CID %p]: session_present_flag: %d", client,
param->session_present_flag);
}
param->return_code = (enum mqtt_conn_return_code)ret_code;
return 0;
}
int publish_decode(u8_t flags, u32_t var_length, struct buf_ctx *buf,
struct mqtt_publish_param *param)
{
int err_code;
u32_t var_header_length;
param->dup_flag = flags & MQTT_HEADER_DUP_MASK;
param->retain_flag = flags & MQTT_HEADER_RETAIN_MASK;
param->message.topic.qos = ((flags & MQTT_HEADER_QOS_MASK) >> 1);
err_code = unpack_utf8_str(buf, ¶m->message.topic.topic);
if (err_code != 0) {
return err_code;
}
var_header_length = param->message.topic.topic.size + sizeof(u16_t);
if (param->message.topic.qos > MQTT_QOS_0_AT_MOST_ONCE) {
err_code = unpack_uint16(buf, ¶m->message_id);
if (err_code != 0) {
return err_code;
}
var_header_length += sizeof(u16_t);
}
param->message.payload.data = NULL;
param->message.payload.len = var_length - var_header_length;
return 0;
}
int publish_ack_decode(struct buf_ctx *buf, struct mqtt_puback_param *param)
{
return unpack_uint16(buf, ¶m->message_id);
}
int publish_receive_decode(struct buf_ctx *buf, struct mqtt_pubrec_param *param)
{
return unpack_uint16(buf, ¶m->message_id);
}
int publish_release_decode(struct buf_ctx *buf, struct mqtt_pubrel_param *param)
{
return unpack_uint16(buf, ¶m->message_id);
}
int publish_complete_decode(struct buf_ctx *buf,
struct mqtt_pubcomp_param *param)
{
return unpack_uint16(buf, ¶m->message_id);
}
int subscribe_ack_decode(struct buf_ctx *buf, struct mqtt_suback_param *param)
{
int err_code;
err_code = unpack_uint16(buf, ¶m->message_id);
if (err_code != 0) {
return err_code;
}
return unpack_data(buf->end - buf->cur, buf, ¶m->return_codes);
}
int unsubscribe_ack_decode(struct buf_ctx *buf,
struct mqtt_unsuback_param *param)
{
return unpack_uint16(buf, ¶m->message_id);
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_3862_0 |
crossvul-cpp_data_good_287_0 | /*
* Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 2000
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that: (1) source code distributions
* retain the above copyright notice and this paragraph in its entirety, (2)
* distributions including binary code include the above copyright notice and
* this paragraph in its entirety in the documentation or other materials
* provided with the distribution, and (3) all advertising materials mentioning
* features or use of this software display the following acknowledgement:
* ``This product includes software developed by the University of California,
* Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
* the University nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior
* written permission.
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
* Support for splitting captures into multiple files with a maximum
* file size:
*
* Copyright (c) 2001
* Seth Webster <swebster@sst.ll.mit.edu>
*/
#ifndef lint
static const char copyright[] _U_ =
"@(#) Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 2000\n\
The Regents of the University of California. All rights reserved.\n";
#endif
/*
* tcpdump - dump traffic on a network
*
* First written in 1987 by Van Jacobson, Lawrence Berkeley Laboratory.
* Mercilessly hacked and occasionally improved since then via the
* combined efforts of Van, Steve McCanne and Craig Leres of LBL.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
/*
* Mac OS X may ship pcap.h from libpcap 0.6 with a libpcap based on
* 0.8. That means it has pcap_findalldevs() but the header doesn't
* define pcap_if_t, meaning that we can't actually *use* pcap_findalldevs().
*/
#ifdef HAVE_PCAP_FINDALLDEVS
#ifndef HAVE_PCAP_IF_T
#undef HAVE_PCAP_FINDALLDEVS
#endif
#endif
#include <netdissect-stdinc.h>
#include <sys/stat.h>
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
#ifdef HAVE_LIBCRYPTO
#include <openssl/crypto.h>
#endif
#ifdef HAVE_GETOPT_LONG
#include <getopt.h>
#else
#include "getopt_long.h"
#endif
/* Capsicum-specific code requires macros from <net/bpf.h>, which will fail
* to compile if <pcap.h> has already been included; including the headers
* in the opposite order works fine.
*/
#ifdef HAVE_CAPSICUM
#include <sys/capability.h>
#include <sys/ioccom.h>
#include <net/bpf.h>
#include <libgen.h>
#endif /* HAVE_CAPSICUM */
#include <pcap.h>
#include <signal.h>
#include <stdio.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#ifndef _WIN32
#include <sys/wait.h>
#include <sys/resource.h>
#include <pwd.h>
#include <grp.h>
#endif /* _WIN32 */
/* capabilities convenience library */
/* If a code depends on HAVE_LIBCAP_NG, it depends also on HAVE_CAP_NG_H.
* If HAVE_CAP_NG_H is not defined, undefine HAVE_LIBCAP_NG.
* Thus, the later tests are done only on HAVE_LIBCAP_NG.
*/
#ifdef HAVE_LIBCAP_NG
#ifdef HAVE_CAP_NG_H
#include <cap-ng.h>
#else
#undef HAVE_LIBCAP_NG
#endif /* HAVE_CAP_NG_H */
#endif /* HAVE_LIBCAP_NG */
#include "netdissect.h"
#include "interface.h"
#include "addrtoname.h"
#include "machdep.h"
#include "setsignal.h"
#include "gmt2local.h"
#include "pcap-missing.h"
#include "ascii_strcasecmp.h"
#include "print.h"
#ifndef PATH_MAX
#define PATH_MAX 1024
#endif
#ifdef SIGINFO
#define SIGNAL_REQ_INFO SIGINFO
#elif SIGUSR1
#define SIGNAL_REQ_INFO SIGUSR1
#endif
static int Bflag; /* buffer size */
static long Cflag; /* rotate dump files after this many bytes */
static int Cflag_count; /* Keep track of which file number we're writing */
static int Dflag; /* list available devices and exit */
/*
* This is exported because, in some versions of libpcap, if libpcap
* is built with optimizer debugging code (which is *NOT* the default
* configuration!), the library *imports*(!) a variable named dflag,
* under the expectation that tcpdump is exporting it, to govern
* how much debugging information to print when optimizing
* the generated BPF code.
*
* This is a horrible hack; newer versions of libpcap don't import
* dflag but, instead, *if* built with optimizer debugging code,
* *export* a routine to set that flag.
*/
int dflag; /* print filter code */
static int Gflag; /* rotate dump files after this many seconds */
static int Gflag_count; /* number of files created with Gflag rotation */
static time_t Gflag_time; /* The last time_t the dump file was rotated. */
static int Lflag; /* list available data link types and exit */
static int Iflag; /* rfmon (monitor) mode */
#ifdef HAVE_PCAP_SET_TSTAMP_TYPE
static int Jflag; /* list available time stamp types */
#endif
static int jflag = -1; /* packet time stamp source */
static int pflag; /* don't go promiscuous */
#ifdef HAVE_PCAP_SETDIRECTION
static int Qflag = -1; /* restrict captured packet by send/receive direction */
#endif
static int Uflag; /* "unbuffered" output of dump files */
static int Wflag; /* recycle output files after this number of files */
static int WflagChars;
static char *zflag = NULL; /* compress each savefile using a specified command (like gzip or bzip2) */
static int immediate_mode;
static int infodelay;
static int infoprint;
char *program_name;
/* Forwards */
static void error(FORMAT_STRING(const char *), ...) NORETURN PRINTFLIKE(1, 2);
static void warning(FORMAT_STRING(const char *), ...) PRINTFLIKE(1, 2);
static void exit_tcpdump(int) NORETURN;
static RETSIGTYPE cleanup(int);
static RETSIGTYPE child_cleanup(int);
static void print_version(void);
static void print_usage(void);
static void show_tstamp_types_and_exit(pcap_t *, const char *device) NORETURN;
static void show_dlts_and_exit(pcap_t *, const char *device) NORETURN;
#ifdef HAVE_PCAP_FINDALLDEVS
static void show_devices_and_exit (void) NORETURN;
#endif
static void print_packet(u_char *, const struct pcap_pkthdr *, const u_char *);
static void dump_packet_and_trunc(u_char *, const struct pcap_pkthdr *, const u_char *);
static void dump_packet(u_char *, const struct pcap_pkthdr *, const u_char *);
static void droproot(const char *, const char *);
#ifdef SIGNAL_REQ_INFO
RETSIGTYPE requestinfo(int);
#endif
#if defined(USE_WIN32_MM_TIMER)
#include <MMsystem.h>
static UINT timer_id;
static void CALLBACK verbose_stats_dump(UINT, UINT, DWORD_PTR, DWORD_PTR, DWORD_PTR);
#elif defined(HAVE_ALARM)
static void verbose_stats_dump(int sig);
#endif
static void info(int);
static u_int packets_captured;
#ifdef HAVE_PCAP_FINDALLDEVS
static const struct tok status_flags[] = {
#ifdef PCAP_IF_UP
{ PCAP_IF_UP, "Up" },
#endif
#ifdef PCAP_IF_RUNNING
{ PCAP_IF_RUNNING, "Running" },
#endif
{ PCAP_IF_LOOPBACK, "Loopback" },
{ 0, NULL }
};
#endif
static pcap_t *pd;
static int supports_monitor_mode;
extern int optind;
extern int opterr;
extern char *optarg;
struct dump_info {
char *WFileName;
char *CurrentFileName;
pcap_t *pd;
pcap_dumper_t *p;
#ifdef HAVE_CAPSICUM
int dirfd;
#endif
};
#if defined(HAVE_PCAP_SET_PARSER_DEBUG)
/*
* We have pcap_set_parser_debug() in libpcap; declare it (it's not declared
* by any libpcap header, because it's a special hack, only available if
* libpcap was configured to include it, and only intended for use by
* libpcap developers trying to debug the parser for filter expressions).
*/
#ifdef _WIN32
__declspec(dllimport)
#else /* _WIN32 */
extern
#endif /* _WIN32 */
void pcap_set_parser_debug(int);
#elif defined(HAVE_PCAP_DEBUG) || defined(HAVE_YYDEBUG)
/*
* We don't have pcap_set_parser_debug() in libpcap, but we do have
* pcap_debug or yydebug. Make a local version of pcap_set_parser_debug()
* to set the flag, and define HAVE_PCAP_SET_PARSER_DEBUG.
*/
static void
pcap_set_parser_debug(int value)
{
#ifdef HAVE_PCAP_DEBUG
extern int pcap_debug;
pcap_debug = value;
#else /* HAVE_PCAP_DEBUG */
extern int yydebug;
yydebug = value;
#endif /* HAVE_PCAP_DEBUG */
}
#define HAVE_PCAP_SET_PARSER_DEBUG
#endif
#if defined(HAVE_PCAP_SET_OPTIMIZER_DEBUG)
/*
* We have pcap_set_optimizer_debug() in libpcap; declare it (it's not declared
* by any libpcap header, because it's a special hack, only available if
* libpcap was configured to include it, and only intended for use by
* libpcap developers trying to debug the optimizer for filter expressions).
*/
#ifdef _WIN32
__declspec(dllimport)
#else /* _WIN32 */
extern
#endif /* _WIN32 */
void pcap_set_optimizer_debug(int);
#endif
/* VARARGS */
static void
error(const char *fmt, ...)
{
va_list ap;
(void)fprintf(stderr, "%s: ", program_name);
va_start(ap, fmt);
(void)vfprintf(stderr, fmt, ap);
va_end(ap);
if (*fmt) {
fmt += strlen(fmt);
if (fmt[-1] != '\n')
(void)fputc('\n', stderr);
}
exit_tcpdump(1);
/* NOTREACHED */
}
/* VARARGS */
static void
warning(const char *fmt, ...)
{
va_list ap;
(void)fprintf(stderr, "%s: WARNING: ", program_name);
va_start(ap, fmt);
(void)vfprintf(stderr, fmt, ap);
va_end(ap);
if (*fmt) {
fmt += strlen(fmt);
if (fmt[-1] != '\n')
(void)fputc('\n', stderr);
}
}
static void
exit_tcpdump(int status)
{
nd_cleanup();
exit(status);
}
#ifdef HAVE_PCAP_SET_TSTAMP_TYPE
static void
show_tstamp_types_and_exit(pcap_t *pc, const char *device)
{
int n_tstamp_types;
int *tstamp_types = 0;
const char *tstamp_type_name;
int i;
n_tstamp_types = pcap_list_tstamp_types(pc, &tstamp_types);
if (n_tstamp_types < 0)
error("%s", pcap_geterr(pc));
if (n_tstamp_types == 0) {
fprintf(stderr, "Time stamp type cannot be set for %s\n",
device);
exit_tcpdump(0);
}
fprintf(stderr, "Time stamp types for %s (use option -j to set):\n",
device);
for (i = 0; i < n_tstamp_types; i++) {
tstamp_type_name = pcap_tstamp_type_val_to_name(tstamp_types[i]);
if (tstamp_type_name != NULL) {
(void) fprintf(stderr, " %s (%s)\n", tstamp_type_name,
pcap_tstamp_type_val_to_description(tstamp_types[i]));
} else {
(void) fprintf(stderr, " %d\n", tstamp_types[i]);
}
}
pcap_free_tstamp_types(tstamp_types);
exit_tcpdump(0);
}
#endif
static void
show_dlts_and_exit(pcap_t *pc, const char *device)
{
int n_dlts, i;
int *dlts = 0;
const char *dlt_name;
n_dlts = pcap_list_datalinks(pc, &dlts);
if (n_dlts < 0)
error("%s", pcap_geterr(pc));
else if (n_dlts == 0 || !dlts)
error("No data link types.");
/*
* If the interface is known to support monitor mode, indicate
* whether these are the data link types available when not in
* monitor mode, if -I wasn't specified, or when in monitor mode,
* when -I was specified (the link-layer types available in
* monitor mode might be different from the ones available when
* not in monitor mode).
*/
if (supports_monitor_mode)
(void) fprintf(stderr, "Data link types for %s %s (use option -y to set):\n",
device,
Iflag ? "when in monitor mode" : "when not in monitor mode");
else
(void) fprintf(stderr, "Data link types for %s (use option -y to set):\n",
device);
for (i = 0; i < n_dlts; i++) {
dlt_name = pcap_datalink_val_to_name(dlts[i]);
if (dlt_name != NULL) {
(void) fprintf(stderr, " %s (%s)", dlt_name,
pcap_datalink_val_to_description(dlts[i]));
/*
* OK, does tcpdump handle that type?
*/
if (!has_printer(dlts[i]))
(void) fprintf(stderr, " (printing not supported)");
fprintf(stderr, "\n");
} else {
(void) fprintf(stderr, " DLT %d (printing not supported)\n",
dlts[i]);
}
}
#ifdef HAVE_PCAP_FREE_DATALINKS
pcap_free_datalinks(dlts);
#endif
exit_tcpdump(0);
}
#ifdef HAVE_PCAP_FINDALLDEVS
static void
show_devices_and_exit (void)
{
pcap_if_t *dev, *devlist;
char ebuf[PCAP_ERRBUF_SIZE];
int i;
if (pcap_findalldevs(&devlist, ebuf) < 0)
error("%s", ebuf);
for (i = 0, dev = devlist; dev != NULL; i++, dev = dev->next) {
printf("%d.%s", i+1, dev->name);
if (dev->description != NULL)
printf(" (%s)", dev->description);
if (dev->flags != 0)
printf(" [%s]", bittok2str(status_flags, "none", dev->flags));
printf("\n");
}
pcap_freealldevs(devlist);
exit_tcpdump(0);
}
#endif /* HAVE_PCAP_FINDALLDEVS */
/*
* Short options.
*
* Note that there we use all letters for short options except for g, k,
* o, and P, and those are used by other versions of tcpdump, and we should
* only use them for the same purposes that the other versions of tcpdump
* use them:
*
* OS X tcpdump uses -g to force non--v output for IP to be on one
* line, making it more "g"repable;
*
* OS X tcpdump uses -k to specify that packet comments in pcap-ng files
* should be printed;
*
* OpenBSD tcpdump uses -o to indicate that OS fingerprinting should be done
* for hosts sending TCP SYN packets;
*
* OS X tcpdump uses -P to indicate that -w should write pcap-ng rather
* than pcap files.
*
* OS X tcpdump also uses -Q to specify expressions that match packet
* metadata, including but not limited to the packet direction.
* The expression syntax is different from a simple "in|out|inout",
* and those expressions aren't accepted by OS X tcpdump, but the
* equivalents would be "in" = "dir=in", "out" = "dir=out", and
* "inout" = "dir=in or dir=out", and the parser could conceivably
* special-case "in", "out", and "inout" as expressions for backwards
* compatibility, so all is not (yet) lost.
*/
/*
* Set up flags that might or might not be supported depending on the
* version of libpcap we're using.
*/
#if defined(HAVE_PCAP_CREATE) || defined(_WIN32)
#define B_FLAG "B:"
#define B_FLAG_USAGE " [ -B size ]"
#else /* defined(HAVE_PCAP_CREATE) || defined(_WIN32) */
#define B_FLAG
#define B_FLAG_USAGE
#endif /* defined(HAVE_PCAP_CREATE) || defined(_WIN32) */
#ifdef HAVE_PCAP_CREATE
#define I_FLAG "I"
#else /* HAVE_PCAP_CREATE */
#define I_FLAG
#endif /* HAVE_PCAP_CREATE */
#ifdef HAVE_PCAP_SET_TSTAMP_TYPE
#define j_FLAG "j:"
#define j_FLAG_USAGE " [ -j tstamptype ]"
#define J_FLAG "J"
#else /* PCAP_ERROR_TSTAMP_TYPE_NOTSUP */
#define j_FLAG
#define j_FLAG_USAGE
#define J_FLAG
#endif /* PCAP_ERROR_TSTAMP_TYPE_NOTSUP */
#ifdef HAVE_PCAP_FINDALLDEVS
#define D_FLAG "D"
#else
#define D_FLAG
#endif
#ifdef HAVE_PCAP_DUMP_FLUSH
#define U_FLAG "U"
#else
#define U_FLAG
#endif
#ifdef HAVE_PCAP_SETDIRECTION
#define Q_FLAG "Q:"
#else
#define Q_FLAG
#endif
#define SHORTOPTS "aAb" B_FLAG "c:C:d" D_FLAG "eE:fF:G:hHi:" I_FLAG j_FLAG J_FLAG "KlLm:M:nNOpq" Q_FLAG "r:s:StT:u" U_FLAG "vV:w:W:xXy:Yz:Z:#"
/*
* Long options.
*
* We do not currently have long options corresponding to all short
* options; we should probably pick appropriate option names for them.
*
* However, the short options where the number of times the option is
* specified matters, such as -v and -d and -t, should probably not
* just map to a long option, as saying
*
* tcpdump --verbose --verbose
*
* doesn't make sense; it should be --verbosity={N} or something such
* as that.
*
* For long options with no corresponding short options, we define values
* outside the range of ASCII graphic characters, make that the last
* component of the entry for the long option, and have a case for that
* option in the switch statement.
*/
#define OPTION_VERSION 128
#define OPTION_TSTAMP_PRECISION 129
#define OPTION_IMMEDIATE_MODE 130
static const struct option longopts[] = {
#if defined(HAVE_PCAP_CREATE) || defined(_WIN32)
{ "buffer-size", required_argument, NULL, 'B' },
#endif
{ "list-interfaces", no_argument, NULL, 'D' },
{ "help", no_argument, NULL, 'h' },
{ "interface", required_argument, NULL, 'i' },
#ifdef HAVE_PCAP_CREATE
{ "monitor-mode", no_argument, NULL, 'I' },
#endif
#ifdef HAVE_PCAP_SET_TSTAMP_TYPE
{ "time-stamp-type", required_argument, NULL, 'j' },
{ "list-time-stamp-types", no_argument, NULL, 'J' },
#endif
#ifdef HAVE_PCAP_SET_TSTAMP_PRECISION
{ "time-stamp-precision", required_argument, NULL, OPTION_TSTAMP_PRECISION},
#endif
{ "dont-verify-checksums", no_argument, NULL, 'K' },
{ "list-data-link-types", no_argument, NULL, 'L' },
{ "no-optimize", no_argument, NULL, 'O' },
{ "no-promiscuous-mode", no_argument, NULL, 'p' },
#ifdef HAVE_PCAP_SETDIRECTION
{ "direction", required_argument, NULL, 'Q' },
#endif
{ "snapshot-length", required_argument, NULL, 's' },
{ "absolute-tcp-sequence-numbers", no_argument, NULL, 'S' },
#ifdef HAVE_PCAP_DUMP_FLUSH
{ "packet-buffered", no_argument, NULL, 'U' },
#endif
{ "linktype", required_argument, NULL, 'y' },
#ifdef HAVE_PCAP_SET_IMMEDIATE_MODE
{ "immediate-mode", no_argument, NULL, OPTION_IMMEDIATE_MODE },
#endif
#ifdef HAVE_PCAP_SET_PARSER_DEBUG
{ "debug-filter-parser", no_argument, NULL, 'Y' },
#endif
{ "relinquish-privileges", required_argument, NULL, 'Z' },
{ "number", no_argument, NULL, '#' },
{ "version", no_argument, NULL, OPTION_VERSION },
{ NULL, 0, NULL, 0 }
};
#ifndef _WIN32
/* Drop root privileges and chroot if necessary */
static void
droproot(const char *username, const char *chroot_dir)
{
struct passwd *pw = NULL;
if (chroot_dir && !username) {
fprintf(stderr, "%s: Chroot without dropping root is insecure\n",
program_name);
exit_tcpdump(1);
}
pw = getpwnam(username);
if (pw) {
if (chroot_dir) {
if (chroot(chroot_dir) != 0 || chdir ("/") != 0) {
fprintf(stderr, "%s: Couldn't chroot/chdir to '%.64s': %s\n",
program_name, chroot_dir, pcap_strerror(errno));
exit_tcpdump(1);
}
}
#ifdef HAVE_LIBCAP_NG
{
int ret = capng_change_id(pw->pw_uid, pw->pw_gid, CAPNG_NO_FLAG);
if (ret < 0)
error("capng_change_id(): return %d\n", ret);
else
fprintf(stderr, "dropped privs to %s\n", username);
}
#else
if (initgroups(pw->pw_name, pw->pw_gid) != 0 ||
setgid(pw->pw_gid) != 0 || setuid(pw->pw_uid) != 0) {
fprintf(stderr, "%s: Couldn't change to '%.32s' uid=%lu gid=%lu: %s\n",
program_name, username,
(unsigned long)pw->pw_uid,
(unsigned long)pw->pw_gid,
pcap_strerror(errno));
exit_tcpdump(1);
}
else {
fprintf(stderr, "dropped privs to %s\n", username);
}
#endif /* HAVE_LIBCAP_NG */
}
else {
fprintf(stderr, "%s: Couldn't find user '%.32s'\n",
program_name, username);
exit_tcpdump(1);
}
#ifdef HAVE_LIBCAP_NG
/* We don't need CAP_SETUID, CAP_SETGID and CAP_SYS_CHROOT any more. */
capng_updatev(
CAPNG_DROP,
CAPNG_EFFECTIVE | CAPNG_PERMITTED,
CAP_SETUID,
CAP_SETGID,
CAP_SYS_CHROOT,
-1);
capng_apply(CAPNG_SELECT_BOTH);
#endif /* HAVE_LIBCAP_NG */
}
#endif /* _WIN32 */
static int
getWflagChars(int x)
{
int c = 0;
x -= 1;
while (x > 0) {
c += 1;
x /= 10;
}
return c;
}
static void
MakeFilename(char *buffer, char *orig_name, int cnt, int max_chars)
{
char *filename = malloc(PATH_MAX + 1);
if (filename == NULL)
error("Makefilename: malloc");
/* Process with strftime if Gflag is set. */
if (Gflag != 0) {
struct tm *local_tm;
/* Convert Gflag_time to a usable format */
if ((local_tm = localtime(&Gflag_time)) == NULL) {
error("MakeTimedFilename: localtime");
}
/* There's no good way to detect an error in strftime since a return
* value of 0 isn't necessarily failure.
*/
strftime(filename, PATH_MAX, orig_name, local_tm);
} else {
strncpy(filename, orig_name, PATH_MAX);
}
if (cnt == 0 && max_chars == 0)
strncpy(buffer, filename, PATH_MAX + 1);
else
if (snprintf(buffer, PATH_MAX + 1, "%s%0*d", filename, max_chars, cnt) > PATH_MAX)
/* Report an error if the filename is too large */
error("too many output files or filename is too long (> %d)", PATH_MAX);
free(filename);
}
static char *
get_next_file(FILE *VFile, char *ptr)
{
char *ret;
size_t len;
ret = fgets(ptr, PATH_MAX, VFile);
if (!ret)
return NULL;
len = strlen (ptr);
if (len > 0 && ptr[len - 1] == '\n')
ptr[len - 1] = '\0';
return ret;
}
#ifdef HAVE_PCAP_SET_TSTAMP_PRECISION
static int
tstamp_precision_from_string(const char *precision)
{
if (strncmp(precision, "nano", strlen("nano")) == 0)
return PCAP_TSTAMP_PRECISION_NANO;
if (strncmp(precision, "micro", strlen("micro")) == 0)
return PCAP_TSTAMP_PRECISION_MICRO;
return -EINVAL;
}
static const char *
tstamp_precision_to_string(int precision)
{
switch (precision) {
case PCAP_TSTAMP_PRECISION_MICRO:
return "micro";
case PCAP_TSTAMP_PRECISION_NANO:
return "nano";
default:
return "unknown";
}
}
#endif
#ifdef HAVE_CAPSICUM
/*
* Ensure that, on a dump file's descriptor, we have all the rights
* necessary to make the standard I/O library work with an fdopen()ed
* FILE * from that descriptor.
*
* A long time ago, in a galaxy far far away, AT&T decided that, instead
* of providing separate APIs for getting and setting the FD_ flags on a
* descriptor, getting and setting the O_ flags on a descriptor, and
* locking files, they'd throw them all into a kitchen-sink fcntl() call
* along the lines of ioctl(), the fact that ioctl() operations are
* largely specific to particular character devices but fcntl() operations
* are either generic to all descriptors or generic to all descriptors for
* regular files nonwithstanding.
*
* The Capsicum people decided that fine-grained control of descriptor
* operations was required, so that you need to grant permission for
* reading, writing, seeking, and fcntl-ing. The latter, courtesy of
* AT&T's decision, means that "fcntl-ing" isn't a thing, but a motley
* collection of things, so there are *individual* fcntls for which
* permission needs to be granted.
*
* The FreeBSD standard I/O people implemented some optimizations that
* requires that the standard I/O routines be able to determine whether
* the descriptor for the FILE * is open append-only or not; as that
* descriptor could have come from an open() rather than an fopen(),
* that requires that it be able to do an F_GETFL fcntl() to read
* the O_ flags.
*
* Tcpdump uses ftell() to determine how much data has been written
* to a file in order to, when used with -C, determine when it's time
* to rotate capture files. ftell() therefore needs to do an lseek()
* to find out the file offset and must, thanks to the aforementioned
* optimization, also know whether the descriptor is open append-only
* or not.
*
* The net result of all the above is that we need to grant CAP_SEEK,
* CAP_WRITE, and CAP_FCNTL with the CAP_FCNTL_GETFL subcapability.
*
* Perhaps this is the universe's way of saying that either
*
* 1) there needs to be an fopenat() call and a pcap_dump_openat() call
* using it, so that Capsicum-capable tcpdump wouldn't need to do
* an fdopen()
*
* or
*
* 2) there needs to be a cap_fdopen() call in the FreeBSD standard
* I/O library that knows what rights are needed by the standard
* I/O library, based on the open mode, and assigns them, perhaps
* with an additional argument indicating, for example, whether
* seeking should be allowed, so that tcpdump doesn't need to know
* what the standard I/O library happens to require this week.
*/
static void
set_dumper_capsicum_rights(pcap_dumper_t *p)
{
int fd = fileno(pcap_dump_file(p));
cap_rights_t rights;
cap_rights_init(&rights, CAP_SEEK, CAP_WRITE, CAP_FCNTL);
if (cap_rights_limit(fd, &rights) < 0 && errno != ENOSYS) {
error("unable to limit dump descriptor");
}
if (cap_fcntls_limit(fd, CAP_FCNTL_GETFL) < 0 && errno != ENOSYS) {
error("unable to limit dump descriptor fcntls");
}
}
#endif
/*
* Copy arg vector into a new buffer, concatenating arguments with spaces.
*/
static char *
copy_argv(register char **argv)
{
register char **p;
register u_int len = 0;
char *buf;
char *src, *dst;
p = argv;
if (*p == NULL)
return 0;
while (*p)
len += strlen(*p++) + 1;
buf = (char *)malloc(len);
if (buf == NULL)
error("copy_argv: malloc");
p = argv;
dst = buf;
while ((src = *p++) != NULL) {
while ((*dst++ = *src++) != '\0')
;
dst[-1] = ' ';
}
dst[-1] = '\0';
return buf;
}
/*
* On Windows, we need to open the file in binary mode, so that
* we get all the bytes specified by the size we get from "fstat()".
* On UNIX, that's not necessary. O_BINARY is defined on Windows;
* we define it as 0 if it's not defined, so it does nothing.
*/
#ifndef O_BINARY
#define O_BINARY 0
#endif
static char *
read_infile(char *fname)
{
register int i, fd, cc;
register char *cp;
struct stat buf;
fd = open(fname, O_RDONLY|O_BINARY);
if (fd < 0)
error("can't open %s: %s", fname, pcap_strerror(errno));
if (fstat(fd, &buf) < 0)
error("can't stat %s: %s", fname, pcap_strerror(errno));
cp = malloc((u_int)buf.st_size + 1);
if (cp == NULL)
error("malloc(%d) for %s: %s", (u_int)buf.st_size + 1,
fname, pcap_strerror(errno));
cc = read(fd, cp, (u_int)buf.st_size);
if (cc < 0)
error("read %s: %s", fname, pcap_strerror(errno));
if (cc != buf.st_size)
error("short read %s (%d != %d)", fname, cc, (int)buf.st_size);
close(fd);
/* replace "# comment" with spaces */
for (i = 0; i < cc; i++) {
if (cp[i] == '#')
while (i < cc && cp[i] != '\n')
cp[i++] = ' ';
}
cp[cc] = '\0';
return (cp);
}
#ifdef HAVE_PCAP_FINDALLDEVS
static long
parse_interface_number(const char *device)
{
long devnum;
char *end;
devnum = strtol(device, &end, 10);
if (device != end && *end == '\0') {
/*
* It's all-numeric, but is it a valid number?
*/
if (devnum <= 0) {
/*
* No, it's not an ordinal.
*/
error("Invalid adapter index");
}
return (devnum);
} else {
/*
* It's not all-numeric; return -1, so our caller
* knows that.
*/
return (-1);
}
}
static char *
find_interface_by_number(long devnum)
{
pcap_if_t *dev, *devlist;
long i;
char ebuf[PCAP_ERRBUF_SIZE];
char *device;
if (pcap_findalldevs(&devlist, ebuf) < 0)
error("%s", ebuf);
/*
* Look for the devnum-th entry in the list of devices (1-based).
*/
for (i = 0, dev = devlist; i < devnum-1 && dev != NULL;
i++, dev = dev->next)
;
if (dev == NULL)
error("Invalid adapter index");
device = strdup(dev->name);
pcap_freealldevs(devlist);
return (device);
}
#endif
static pcap_t *
open_interface(const char *device, netdissect_options *ndo, char *ebuf)
{
pcap_t *pc;
#ifdef HAVE_PCAP_CREATE
int status;
char *cp;
#endif
#ifdef HAVE_PCAP_CREATE
pc = pcap_create(device, ebuf);
if (pc == NULL) {
/*
* If this failed with "No such device", that means
* the interface doesn't exist; return NULL, so that
* the caller can see whether the device name is
* actually an interface index.
*/
if (strstr(ebuf, "No such device") != NULL)
return (NULL);
error("%s", ebuf);
}
#ifdef HAVE_PCAP_SET_TSTAMP_TYPE
if (Jflag)
show_tstamp_types_and_exit(pc, device);
#endif
#ifdef HAVE_PCAP_SET_TSTAMP_PRECISION
status = pcap_set_tstamp_precision(pc, ndo->ndo_tstamp_precision);
if (status != 0)
error("%s: Can't set %ssecond time stamp precision: %s",
device,
tstamp_precision_to_string(ndo->ndo_tstamp_precision),
pcap_statustostr(status));
#endif
#ifdef HAVE_PCAP_SET_IMMEDIATE_MODE
if (immediate_mode) {
status = pcap_set_immediate_mode(pc, 1);
if (status != 0)
error("%s: Can't set immediate mode: %s",
device,
pcap_statustostr(status));
}
#endif
/*
* Is this an interface that supports monitor mode?
*/
if (pcap_can_set_rfmon(pc) == 1)
supports_monitor_mode = 1;
else
supports_monitor_mode = 0;
status = pcap_set_snaplen(pc, ndo->ndo_snaplen);
if (status != 0)
error("%s: Can't set snapshot length: %s",
device, pcap_statustostr(status));
status = pcap_set_promisc(pc, !pflag);
if (status != 0)
error("%s: Can't set promiscuous mode: %s",
device, pcap_statustostr(status));
if (Iflag) {
status = pcap_set_rfmon(pc, 1);
if (status != 0)
error("%s: Can't set monitor mode: %s",
device, pcap_statustostr(status));
}
status = pcap_set_timeout(pc, 1000);
if (status != 0)
error("%s: pcap_set_timeout failed: %s",
device, pcap_statustostr(status));
if (Bflag != 0) {
status = pcap_set_buffer_size(pc, Bflag);
if (status != 0)
error("%s: Can't set buffer size: %s",
device, pcap_statustostr(status));
}
#ifdef HAVE_PCAP_SET_TSTAMP_TYPE
if (jflag != -1) {
status = pcap_set_tstamp_type(pc, jflag);
if (status < 0)
error("%s: Can't set time stamp type: %s",
device, pcap_statustostr(status));
else if (status > 0)
warning("When trying to set timestamp type '%s' on %s: %s",
pcap_tstamp_type_val_to_name(jflag), device,
pcap_statustostr(status));
}
#endif
status = pcap_activate(pc);
if (status < 0) {
/*
* pcap_activate() failed.
*/
cp = pcap_geterr(pc);
if (status == PCAP_ERROR)
error("%s", cp);
else if (status == PCAP_ERROR_NO_SUCH_DEVICE) {
/*
* Return an error for our caller to handle.
*/
snprintf(ebuf, PCAP_ERRBUF_SIZE, "%s: %s\n(%s)",
device, pcap_statustostr(status), cp);
pcap_close(pc);
return (NULL);
} else if (status == PCAP_ERROR_PERM_DENIED && *cp != '\0')
error("%s: %s\n(%s)", device,
pcap_statustostr(status), cp);
else
error("%s: %s", device,
pcap_statustostr(status));
} else if (status > 0) {
/*
* pcap_activate() succeeded, but it's warning us
* of a problem it had.
*/
cp = pcap_geterr(pc);
if (status == PCAP_WARNING)
warning("%s", cp);
else if (status == PCAP_WARNING_PROMISC_NOTSUP &&
*cp != '\0')
warning("%s: %s\n(%s)", device,
pcap_statustostr(status), cp);
else
warning("%s: %s", device,
pcap_statustostr(status));
}
#ifdef HAVE_PCAP_SETDIRECTION
if (Qflag != -1) {
status = pcap_setdirection(pc, Qflag);
if (status != 0)
error("%s: pcap_setdirection() failed: %s",
device, pcap_geterr(pc));
}
#endif /* HAVE_PCAP_SETDIRECTION */
#else /* HAVE_PCAP_CREATE */
*ebuf = '\0';
pc = pcap_open_live(device, ndo->ndo_snaplen, !pflag, 1000, ebuf);
if (pc == NULL) {
/*
* If this failed with "No such device", that means
* the interface doesn't exist; return NULL, so that
* the caller can see whether the device name is
* actually an interface index.
*/
if (strstr(ebuf, "No such device") != NULL)
return (NULL);
error("%s", ebuf);
}
if (*ebuf)
warning("%s", ebuf);
#endif /* HAVE_PCAP_CREATE */
return (pc);
}
int
main(int argc, char **argv)
{
register int cnt, op, i;
bpf_u_int32 localnet =0 , netmask = 0;
int timezone_offset = 0;
register char *cp, *infile, *cmdbuf, *device, *RFileName, *VFileName, *WFileName;
pcap_handler callback;
int dlt;
const char *dlt_name;
struct bpf_program fcode;
#ifndef _WIN32
RETSIGTYPE (*oldhandler)(int);
#endif
struct dump_info dumpinfo;
u_char *pcap_userdata;
char ebuf[PCAP_ERRBUF_SIZE];
char VFileLine[PATH_MAX + 1];
char *username = NULL;
char *chroot_dir = NULL;
char *ret = NULL;
char *end;
#ifdef HAVE_PCAP_FINDALLDEVS
pcap_if_t *devlist;
long devnum;
#endif
int status;
FILE *VFile;
#ifdef HAVE_CAPSICUM
cap_rights_t rights;
int cansandbox;
#endif /* HAVE_CAPSICUM */
int Oflag = 1; /* run filter code optimizer */
int yflag_dlt = -1;
const char *yflag_dlt_name = NULL;
netdissect_options Ndo;
netdissect_options *ndo = &Ndo;
/*
* Initialize the netdissect code.
*/
if (nd_init(ebuf, sizeof ebuf) == -1)
error("%s", ebuf);
memset(ndo, 0, sizeof(*ndo));
ndo_set_function_pointers(ndo);
ndo->ndo_snaplen = DEFAULT_SNAPLEN;
cnt = -1;
device = NULL;
infile = NULL;
RFileName = NULL;
VFileName = NULL;
VFile = NULL;
WFileName = NULL;
dlt = -1;
if ((cp = strrchr(argv[0], '/')) != NULL)
ndo->program_name = program_name = cp + 1;
else
ndo->program_name = program_name = argv[0];
#ifdef _WIN32
if (pcap_wsockinit() != 0)
error("Attempting to initialize Winsock failed");
#endif /* _WIN32 */
/*
* On platforms where the CPU doesn't support unaligned loads,
* force unaligned accesses to abort with SIGBUS, rather than
* being fixed up (slowly) by the OS kernel; on those platforms,
* misaligned accesses are bugs, and we want tcpdump to crash so
* that the bugs are reported.
*/
if (abort_on_misalignment(ebuf, sizeof(ebuf)) < 0)
error("%s", ebuf);
while (
(op = getopt_long(argc, argv, SHORTOPTS, longopts, NULL)) != -1)
switch (op) {
case 'a':
/* compatibility for old -a */
break;
case 'A':
++ndo->ndo_Aflag;
break;
case 'b':
++ndo->ndo_bflag;
break;
#if defined(HAVE_PCAP_CREATE) || defined(_WIN32)
case 'B':
Bflag = atoi(optarg)*1024;
if (Bflag <= 0)
error("invalid packet buffer size %s", optarg);
break;
#endif /* defined(HAVE_PCAP_CREATE) || defined(_WIN32) */
case 'c':
cnt = atoi(optarg);
if (cnt <= 0)
error("invalid packet count %s", optarg);
break;
case 'C':
Cflag = atoi(optarg) * 1000000;
if (Cflag <= 0)
error("invalid file size %s", optarg);
break;
case 'd':
++dflag;
break;
case 'D':
Dflag++;
break;
case 'L':
Lflag++;
break;
case 'e':
++ndo->ndo_eflag;
break;
case 'E':
#ifndef HAVE_LIBCRYPTO
warning("crypto code not compiled in");
#endif
ndo->ndo_espsecret = optarg;
break;
case 'f':
++ndo->ndo_fflag;
break;
case 'F':
infile = optarg;
break;
case 'G':
Gflag = atoi(optarg);
if (Gflag < 0)
error("invalid number of seconds %s", optarg);
/* We will create one file initially. */
Gflag_count = 0;
/* Grab the current time for rotation use. */
if ((Gflag_time = time(NULL)) == (time_t)-1) {
error("main: can't get current time: %s",
pcap_strerror(errno));
}
break;
case 'h':
print_usage();
exit_tcpdump(0);
break;
case 'H':
++ndo->ndo_Hflag;
break;
case 'i':
device = optarg;
break;
#ifdef HAVE_PCAP_CREATE
case 'I':
++Iflag;
break;
#endif /* HAVE_PCAP_CREATE */
#ifdef HAVE_PCAP_SET_TSTAMP_TYPE
case 'j':
jflag = pcap_tstamp_type_name_to_val(optarg);
if (jflag < 0)
error("invalid time stamp type %s", optarg);
break;
case 'J':
Jflag++;
break;
#endif
case 'l':
#ifdef _WIN32
/*
* _IOLBF is the same as _IOFBF in Microsoft's C
* libraries; the only alternative they offer
* is _IONBF.
*
* XXX - this should really be checking for MSVC++,
* not _WIN32, if, for example, MinGW has its own
* C library that is more UNIX-compatible.
*/
setvbuf(stdout, NULL, _IONBF, 0);
#else /* _WIN32 */
#ifdef HAVE_SETLINEBUF
setlinebuf(stdout);
#else
setvbuf(stdout, NULL, _IOLBF, 0);
#endif
#endif /* _WIN32 */
break;
case 'K':
++ndo->ndo_Kflag;
break;
case 'm':
if (nd_have_smi_support()) {
if (nd_load_smi_module(optarg, ebuf, sizeof ebuf) == -1)
error("%s", ebuf);
} else {
(void)fprintf(stderr, "%s: ignoring option `-m %s' ",
program_name, optarg);
(void)fprintf(stderr, "(no libsmi support)\n");
}
break;
case 'M':
/* TCP-MD5 shared secret */
#ifndef HAVE_LIBCRYPTO
warning("crypto code not compiled in");
#endif
ndo->ndo_sigsecret = optarg;
break;
case 'n':
++ndo->ndo_nflag;
break;
case 'N':
++ndo->ndo_Nflag;
break;
case 'O':
Oflag = 0;
break;
case 'p':
++pflag;
break;
case 'q':
++ndo->ndo_qflag;
++ndo->ndo_suppress_default_print;
break;
#ifdef HAVE_PCAP_SETDIRECTION
case 'Q':
if (ascii_strcasecmp(optarg, "in") == 0)
Qflag = PCAP_D_IN;
else if (ascii_strcasecmp(optarg, "out") == 0)
Qflag = PCAP_D_OUT;
else if (ascii_strcasecmp(optarg, "inout") == 0)
Qflag = PCAP_D_INOUT;
else
error("unknown capture direction `%s'", optarg);
break;
#endif /* HAVE_PCAP_SETDIRECTION */
case 'r':
RFileName = optarg;
break;
case 's':
ndo->ndo_snaplen = strtol(optarg, &end, 0);
if (optarg == end || *end != '\0'
|| ndo->ndo_snaplen < 0 || ndo->ndo_snaplen > MAXIMUM_SNAPLEN)
error("invalid snaplen %s", optarg);
else if (ndo->ndo_snaplen == 0)
ndo->ndo_snaplen = MAXIMUM_SNAPLEN;
break;
case 'S':
++ndo->ndo_Sflag;
break;
case 't':
++ndo->ndo_tflag;
break;
case 'T':
if (ascii_strcasecmp(optarg, "vat") == 0)
ndo->ndo_packettype = PT_VAT;
else if (ascii_strcasecmp(optarg, "wb") == 0)
ndo->ndo_packettype = PT_WB;
else if (ascii_strcasecmp(optarg, "rpc") == 0)
ndo->ndo_packettype = PT_RPC;
else if (ascii_strcasecmp(optarg, "rtp") == 0)
ndo->ndo_packettype = PT_RTP;
else if (ascii_strcasecmp(optarg, "rtcp") == 0)
ndo->ndo_packettype = PT_RTCP;
else if (ascii_strcasecmp(optarg, "snmp") == 0)
ndo->ndo_packettype = PT_SNMP;
else if (ascii_strcasecmp(optarg, "cnfp") == 0)
ndo->ndo_packettype = PT_CNFP;
else if (ascii_strcasecmp(optarg, "tftp") == 0)
ndo->ndo_packettype = PT_TFTP;
else if (ascii_strcasecmp(optarg, "aodv") == 0)
ndo->ndo_packettype = PT_AODV;
else if (ascii_strcasecmp(optarg, "carp") == 0)
ndo->ndo_packettype = PT_CARP;
else if (ascii_strcasecmp(optarg, "radius") == 0)
ndo->ndo_packettype = PT_RADIUS;
else if (ascii_strcasecmp(optarg, "zmtp1") == 0)
ndo->ndo_packettype = PT_ZMTP1;
else if (ascii_strcasecmp(optarg, "vxlan") == 0)
ndo->ndo_packettype = PT_VXLAN;
else if (ascii_strcasecmp(optarg, "pgm") == 0)
ndo->ndo_packettype = PT_PGM;
else if (ascii_strcasecmp(optarg, "pgm_zmtp1") == 0)
ndo->ndo_packettype = PT_PGM_ZMTP1;
else if (ascii_strcasecmp(optarg, "lmp") == 0)
ndo->ndo_packettype = PT_LMP;
else if (ascii_strcasecmp(optarg, "resp") == 0)
ndo->ndo_packettype = PT_RESP;
else
error("unknown packet type `%s'", optarg);
break;
case 'u':
++ndo->ndo_uflag;
break;
#ifdef HAVE_PCAP_DUMP_FLUSH
case 'U':
++Uflag;
break;
#endif
case 'v':
++ndo->ndo_vflag;
break;
case 'V':
VFileName = optarg;
break;
case 'w':
WFileName = optarg;
break;
case 'W':
Wflag = atoi(optarg);
if (Wflag <= 0)
error("invalid number of output files %s", optarg);
WflagChars = getWflagChars(Wflag);
break;
case 'x':
++ndo->ndo_xflag;
++ndo->ndo_suppress_default_print;
break;
case 'X':
++ndo->ndo_Xflag;
++ndo->ndo_suppress_default_print;
break;
case 'y':
yflag_dlt_name = optarg;
yflag_dlt =
pcap_datalink_name_to_val(yflag_dlt_name);
if (yflag_dlt < 0)
error("invalid data link type %s", yflag_dlt_name);
break;
#ifdef HAVE_PCAP_SET_PARSER_DEBUG
case 'Y':
{
/* Undocumented flag */
pcap_set_parser_debug(1);
}
break;
#endif
case 'z':
zflag = optarg;
break;
case 'Z':
username = optarg;
break;
case '#':
ndo->ndo_packet_number = 1;
break;
case OPTION_VERSION:
print_version();
exit_tcpdump(0);
break;
#ifdef HAVE_PCAP_SET_TSTAMP_PRECISION
case OPTION_TSTAMP_PRECISION:
ndo->ndo_tstamp_precision = tstamp_precision_from_string(optarg);
if (ndo->ndo_tstamp_precision < 0)
error("unsupported time stamp precision");
break;
#endif
#ifdef HAVE_PCAP_SET_IMMEDIATE_MODE
case OPTION_IMMEDIATE_MODE:
immediate_mode = 1;
break;
#endif
default:
print_usage();
exit_tcpdump(1);
/* NOTREACHED */
}
#ifdef HAVE_PCAP_FINDALLDEVS
if (Dflag)
show_devices_and_exit();
#endif
switch (ndo->ndo_tflag) {
case 0: /* Default */
case 4: /* Default + Date*/
timezone_offset = gmt2local(0);
break;
case 1: /* No time stamp */
case 2: /* Unix timeval style */
case 3: /* Microseconds since previous packet */
case 5: /* Microseconds since first packet */
break;
default: /* Not supported */
error("only -t, -tt, -ttt, -tttt and -ttttt are supported");
break;
}
if (ndo->ndo_fflag != 0 && (VFileName != NULL || RFileName != NULL))
error("-f can not be used with -V or -r");
if (VFileName != NULL && RFileName != NULL)
error("-V and -r are mutually exclusive.");
#ifdef HAVE_PCAP_SET_IMMEDIATE_MODE
/*
* If we're printing dissected packets to the standard output
* rather than saving raw packets to a file, and the standard
* output is a terminal, use immediate mode, as the user's
* probably expecting to see packets pop up immediately.
*/
if (WFileName == NULL && isatty(1))
immediate_mode = 1;
#endif
#ifdef WITH_CHROOT
/* if run as root, prepare for chrooting */
if (getuid() == 0 || geteuid() == 0) {
/* future extensibility for cmd-line arguments */
if (!chroot_dir)
chroot_dir = WITH_CHROOT;
}
#endif
#ifdef WITH_USER
/* if run as root, prepare for dropping root privileges */
if (getuid() == 0 || geteuid() == 0) {
/* Run with '-Z root' to restore old behaviour */
if (!username)
username = WITH_USER;
}
#endif
if (RFileName != NULL || VFileName != NULL) {
/*
* If RFileName is non-null, it's the pathname of a
* savefile to read. If VFileName is non-null, it's
* the pathname of a file containing a list of pathnames
* (one per line) of savefiles to read.
*
* In either case, we're reading a savefile, not doing
* a live capture.
*/
#ifndef _WIN32
/*
* We don't need network access, so relinquish any set-UID
* or set-GID privileges we have (if any).
*
* We do *not* want set-UID privileges when opening a
* trace file, as that might let the user read other
* people's trace files (especially if we're set-UID
* root).
*/
if (setgid(getgid()) != 0 || setuid(getuid()) != 0 )
fprintf(stderr, "Warning: setgid/setuid failed !\n");
#endif /* _WIN32 */
if (VFileName != NULL) {
if (VFileName[0] == '-' && VFileName[1] == '\0')
VFile = stdin;
else
VFile = fopen(VFileName, "r");
if (VFile == NULL)
error("Unable to open file: %s\n", pcap_strerror(errno));
ret = get_next_file(VFile, VFileLine);
if (!ret)
error("Nothing in %s\n", VFileName);
RFileName = VFileLine;
}
#ifdef HAVE_PCAP_SET_TSTAMP_PRECISION
pd = pcap_open_offline_with_tstamp_precision(RFileName,
ndo->ndo_tstamp_precision, ebuf);
#else
pd = pcap_open_offline(RFileName, ebuf);
#endif
if (pd == NULL)
error("%s", ebuf);
#ifdef HAVE_CAPSICUM
cap_rights_init(&rights, CAP_READ);
if (cap_rights_limit(fileno(pcap_file(pd)), &rights) < 0 &&
errno != ENOSYS) {
error("unable to limit pcap descriptor");
}
#endif
dlt = pcap_datalink(pd);
dlt_name = pcap_datalink_val_to_name(dlt);
if (dlt_name == NULL) {
fprintf(stderr, "reading from file %s, link-type %u\n",
RFileName, dlt);
} else {
fprintf(stderr,
"reading from file %s, link-type %s (%s)\n",
RFileName, dlt_name,
pcap_datalink_val_to_description(dlt));
}
} else {
/*
* We're doing a live capture.
*/
if (device == NULL) {
/*
* No interface was specified. Pick one.
*/
#ifdef HAVE_PCAP_FINDALLDEVS
/*
* Find the list of interfaces, and pick
* the first interface.
*/
if (pcap_findalldevs(&devlist, ebuf) >= 0 &&
devlist != NULL) {
device = strdup(devlist->name);
pcap_freealldevs(devlist);
}
#else /* HAVE_PCAP_FINDALLDEVS */
/*
* Use whatever interface pcap_lookupdev()
* chooses.
*/
device = pcap_lookupdev(ebuf);
#endif
if (device == NULL)
error("%s", ebuf);
}
/*
* Try to open the interface with the specified name.
*/
pd = open_interface(device, ndo, ebuf);
if (pd == NULL) {
/*
* That failed. If we can get a list of
* interfaces, and the interface name
* is purely numeric, try to use it as
* a 1-based index in the list of
* interfaces.
*/
#ifdef HAVE_PCAP_FINDALLDEVS
devnum = parse_interface_number(device);
if (devnum == -1) {
/*
* It's not a number; just report
* the open error and fail.
*/
error("%s", ebuf);
}
/*
* OK, it's a number; try to find the
* interface with that index, and try
* to open it.
*
* find_interface_by_number() exits if it
* couldn't be found.
*/
device = find_interface_by_number(devnum);
pd = open_interface(device, ndo, ebuf);
if (pd == NULL)
error("%s", ebuf);
#else /* HAVE_PCAP_FINDALLDEVS */
/*
* We can't get a list of interfaces; just
* fail.
*/
error("%s", ebuf);
#endif /* HAVE_PCAP_FINDALLDEVS */
}
/*
* Let user own process after socket has been opened.
*/
#ifndef _WIN32
if (setgid(getgid()) != 0 || setuid(getuid()) != 0)
fprintf(stderr, "Warning: setgid/setuid failed !\n");
#endif /* _WIN32 */
#if !defined(HAVE_PCAP_CREATE) && defined(_WIN32)
if(Bflag != 0)
if(pcap_setbuff(pd, Bflag)==-1){
error("%s", pcap_geterr(pd));
}
#endif /* !defined(HAVE_PCAP_CREATE) && defined(_WIN32) */
if (Lflag)
show_dlts_and_exit(pd, device);
if (yflag_dlt >= 0) {
#ifdef HAVE_PCAP_SET_DATALINK
if (pcap_set_datalink(pd, yflag_dlt) < 0)
error("%s", pcap_geterr(pd));
#else
/*
* We don't actually support changing the
* data link type, so we only let them
* set it to what it already is.
*/
if (yflag_dlt != pcap_datalink(pd)) {
error("%s is not one of the DLTs supported by this device\n",
yflag_dlt_name);
}
#endif
(void)fprintf(stderr, "%s: data link type %s\n",
program_name, yflag_dlt_name);
(void)fflush(stderr);
}
i = pcap_snapshot(pd);
if (ndo->ndo_snaplen < i) {
warning("snaplen raised from %d to %d", ndo->ndo_snaplen, i);
ndo->ndo_snaplen = i;
}
if(ndo->ndo_fflag != 0) {
if (pcap_lookupnet(device, &localnet, &netmask, ebuf) < 0) {
warning("foreign (-f) flag used but: %s", ebuf);
}
}
}
if (infile)
cmdbuf = read_infile(infile);
else
cmdbuf = copy_argv(&argv[optind]);
#ifdef HAVE_PCAP_SET_OPTIMIZER_DEBUG
pcap_set_optimizer_debug(dflag);
#endif
if (pcap_compile(pd, &fcode, cmdbuf, Oflag, netmask) < 0)
error("%s", pcap_geterr(pd));
if (dflag) {
bpf_dump(&fcode, dflag);
pcap_close(pd);
free(cmdbuf);
pcap_freecode(&fcode);
exit_tcpdump(0);
}
init_print(ndo, localnet, netmask, timezone_offset);
#ifndef _WIN32
(void)setsignal(SIGPIPE, cleanup);
(void)setsignal(SIGTERM, cleanup);
(void)setsignal(SIGINT, cleanup);
#endif /* _WIN32 */
#if defined(HAVE_FORK) || defined(HAVE_VFORK)
(void)setsignal(SIGCHLD, child_cleanup);
#endif
/* Cooperate with nohup(1) */
#ifndef _WIN32
if ((oldhandler = setsignal(SIGHUP, cleanup)) != SIG_DFL)
(void)setsignal(SIGHUP, oldhandler);
#endif /* _WIN32 */
#ifndef _WIN32
/*
* If a user name was specified with "-Z", attempt to switch to
* that user's UID. This would probably be used with sudo,
* to allow tcpdump to be run in a special restricted
* account (if you just want to allow users to open capture
* devices, and can't just give users that permission,
* you'd make tcpdump set-UID or set-GID).
*
* Tcpdump doesn't necessarily write only to one savefile;
* the general only way to allow a -Z instance to write to
* savefiles as the user under whose UID it's run, rather
* than as the user specified with -Z, would thus be to switch
* to the original user ID before opening a capture file and
* then switch back to the -Z user ID after opening the savefile.
* Switching to the -Z user ID only after opening the first
* savefile doesn't handle the general case.
*/
if (getuid() == 0 || geteuid() == 0) {
#ifdef HAVE_LIBCAP_NG
/* Initialize capng */
capng_clear(CAPNG_SELECT_BOTH);
if (username) {
capng_updatev(
CAPNG_ADD,
CAPNG_PERMITTED | CAPNG_EFFECTIVE,
CAP_SETUID,
CAP_SETGID,
-1);
}
if (chroot_dir) {
capng_update(
CAPNG_ADD,
CAPNG_PERMITTED | CAPNG_EFFECTIVE,
CAP_SYS_CHROOT
);
}
if (WFileName) {
capng_update(
CAPNG_ADD,
CAPNG_PERMITTED | CAPNG_EFFECTIVE,
CAP_DAC_OVERRIDE
);
}
capng_apply(CAPNG_SELECT_BOTH);
#endif /* HAVE_LIBCAP_NG */
if (username || chroot_dir)
droproot(username, chroot_dir);
}
#endif /* _WIN32 */
if (pcap_setfilter(pd, &fcode) < 0)
error("%s", pcap_geterr(pd));
#ifdef HAVE_CAPSICUM
if (RFileName == NULL && VFileName == NULL) {
static const unsigned long cmds[] = { BIOCGSTATS, BIOCROTZBUF };
/*
* The various libpcap devices use a combination of
* read (bpf), ioctl (bpf, netmap), poll (netmap)
* so we add the relevant access rights.
*/
cap_rights_init(&rights, CAP_IOCTL, CAP_READ, CAP_EVENT);
if (cap_rights_limit(pcap_fileno(pd), &rights) < 0 &&
errno != ENOSYS) {
error("unable to limit pcap descriptor");
}
if (cap_ioctls_limit(pcap_fileno(pd), cmds,
sizeof(cmds) / sizeof(cmds[0])) < 0 && errno != ENOSYS) {
error("unable to limit ioctls on pcap descriptor");
}
}
#endif
if (WFileName) {
pcap_dumper_t *p;
/* Do not exceed the default PATH_MAX for files. */
dumpinfo.CurrentFileName = (char *)malloc(PATH_MAX + 1);
if (dumpinfo.CurrentFileName == NULL)
error("malloc of dumpinfo.CurrentFileName");
/* We do not need numbering for dumpfiles if Cflag isn't set. */
if (Cflag != 0)
MakeFilename(dumpinfo.CurrentFileName, WFileName, 0, WflagChars);
else
MakeFilename(dumpinfo.CurrentFileName, WFileName, 0, 0);
p = pcap_dump_open(pd, dumpinfo.CurrentFileName);
#ifdef HAVE_LIBCAP_NG
/* Give up CAP_DAC_OVERRIDE capability.
* Only allow it to be restored if the -C or -G flag have been
* set since we may need to create more files later on.
*/
capng_update(
CAPNG_DROP,
(Cflag || Gflag ? 0 : CAPNG_PERMITTED)
| CAPNG_EFFECTIVE,
CAP_DAC_OVERRIDE
);
capng_apply(CAPNG_SELECT_BOTH);
#endif /* HAVE_LIBCAP_NG */
if (p == NULL)
error("%s", pcap_geterr(pd));
#ifdef HAVE_CAPSICUM
set_dumper_capsicum_rights(p);
#endif
if (Cflag != 0 || Gflag != 0) {
#ifdef HAVE_CAPSICUM
dumpinfo.WFileName = strdup(basename(WFileName));
if (dumpinfo.WFileName == NULL) {
error("Unable to allocate memory for file %s",
WFileName);
}
dumpinfo.dirfd = open(dirname(WFileName),
O_DIRECTORY | O_RDONLY);
if (dumpinfo.dirfd < 0) {
error("unable to open directory %s",
dirname(WFileName));
}
cap_rights_init(&rights, CAP_CREATE, CAP_FCNTL,
CAP_FTRUNCATE, CAP_LOOKUP, CAP_SEEK, CAP_WRITE);
if (cap_rights_limit(dumpinfo.dirfd, &rights) < 0 &&
errno != ENOSYS) {
error("unable to limit directory rights");
}
if (cap_fcntls_limit(dumpinfo.dirfd, CAP_FCNTL_GETFL) < 0 &&
errno != ENOSYS) {
error("unable to limit dump descriptor fcntls");
}
#else /* !HAVE_CAPSICUM */
dumpinfo.WFileName = WFileName;
#endif
callback = dump_packet_and_trunc;
dumpinfo.pd = pd;
dumpinfo.p = p;
pcap_userdata = (u_char *)&dumpinfo;
} else {
callback = dump_packet;
pcap_userdata = (u_char *)p;
}
#ifdef HAVE_PCAP_DUMP_FLUSH
if (Uflag)
pcap_dump_flush(p);
#endif
} else {
dlt = pcap_datalink(pd);
ndo->ndo_if_printer = get_if_printer(ndo, dlt);
callback = print_packet;
pcap_userdata = (u_char *)ndo;
}
#ifdef SIGNAL_REQ_INFO
/*
* We can't get statistics when reading from a file rather
* than capturing from a device.
*/
if (RFileName == NULL)
(void)setsignal(SIGNAL_REQ_INFO, requestinfo);
#endif
if (ndo->ndo_vflag > 0 && WFileName) {
/*
* When capturing to a file, "-v" means tcpdump should,
* every 10 seconds, "v"erbosely report the number of
* packets captured.
*/
#ifdef USE_WIN32_MM_TIMER
/* call verbose_stats_dump() each 1000 +/-100msec */
timer_id = timeSetEvent(1000, 100, verbose_stats_dump, 0, TIME_PERIODIC);
setvbuf(stderr, NULL, _IONBF, 0);
#elif defined(HAVE_ALARM)
(void)setsignal(SIGALRM, verbose_stats_dump);
alarm(1);
#endif
}
if (RFileName == NULL) {
/*
* Live capture (if -V was specified, we set RFileName
* to a file from the -V file). Print a message to
* the standard error on UN*X.
*/
if (!ndo->ndo_vflag && !WFileName) {
(void)fprintf(stderr,
"%s: verbose output suppressed, use -v or -vv for full protocol decode\n",
program_name);
} else
(void)fprintf(stderr, "%s: ", program_name);
dlt = pcap_datalink(pd);
dlt_name = pcap_datalink_val_to_name(dlt);
if (dlt_name == NULL) {
(void)fprintf(stderr, "listening on %s, link-type %u, capture size %u bytes\n",
device, dlt, ndo->ndo_snaplen);
} else {
(void)fprintf(stderr, "listening on %s, link-type %s (%s), capture size %u bytes\n",
device, dlt_name,
pcap_datalink_val_to_description(dlt), ndo->ndo_snaplen);
}
(void)fflush(stderr);
}
#ifdef HAVE_CAPSICUM
cansandbox = (ndo->ndo_nflag && VFileName == NULL && zflag == NULL);
if (cansandbox && cap_enter() < 0 && errno != ENOSYS)
error("unable to enter the capability mode");
#endif /* HAVE_CAPSICUM */
do {
status = pcap_loop(pd, cnt, callback, pcap_userdata);
if (WFileName == NULL) {
/*
* We're printing packets. Flush the printed output,
* so it doesn't get intermingled with error output.
*/
if (status == -2) {
/*
* We got interrupted, so perhaps we didn't
* manage to finish a line we were printing.
* Print an extra newline, just in case.
*/
putchar('\n');
}
(void)fflush(stdout);
}
if (status == -2) {
/*
* We got interrupted. If we are reading multiple
* files (via -V) set these so that we stop.
*/
VFileName = NULL;
ret = NULL;
}
if (status == -1) {
/*
* Error. Report it.
*/
(void)fprintf(stderr, "%s: pcap_loop: %s\n",
program_name, pcap_geterr(pd));
}
if (RFileName == NULL) {
/*
* We're doing a live capture. Report the capture
* statistics.
*/
info(1);
}
pcap_close(pd);
if (VFileName != NULL) {
ret = get_next_file(VFile, VFileLine);
if (ret) {
int new_dlt;
RFileName = VFileLine;
pd = pcap_open_offline(RFileName, ebuf);
if (pd == NULL)
error("%s", ebuf);
#ifdef HAVE_CAPSICUM
cap_rights_init(&rights, CAP_READ);
if (cap_rights_limit(fileno(pcap_file(pd)),
&rights) < 0 && errno != ENOSYS) {
error("unable to limit pcap descriptor");
}
#endif
new_dlt = pcap_datalink(pd);
if (new_dlt != dlt) {
/*
* The new file has a different
* link-layer header type from the
* previous one.
*/
if (WFileName != NULL) {
/*
* We're writing raw packets
* that match the filter to
* a pcap file. pcap files
* don't support multiple
* different link-layer
* header types, so we fail
* here.
*/
error("%s: new dlt does not match original", RFileName);
}
/*
* We're printing the decoded packets;
* switch to the new DLT.
*
* To do that, we need to change
* the printer, change the DLT name,
* and recompile the filter with
* the new DLT.
*/
dlt = new_dlt;
ndo->ndo_if_printer = get_if_printer(ndo, dlt);
if (pcap_compile(pd, &fcode, cmdbuf, Oflag, netmask) < 0)
error("%s", pcap_geterr(pd));
}
/*
* Set the filter on the new file.
*/
if (pcap_setfilter(pd, &fcode) < 0)
error("%s", pcap_geterr(pd));
/*
* Report the new file.
*/
dlt_name = pcap_datalink_val_to_name(dlt);
if (dlt_name == NULL) {
fprintf(stderr, "reading from file %s, link-type %u\n",
RFileName, dlt);
} else {
fprintf(stderr,
"reading from file %s, link-type %s (%s)\n",
RFileName, dlt_name,
pcap_datalink_val_to_description(dlt));
}
}
}
}
while (ret != NULL);
free(cmdbuf);
pcap_freecode(&fcode);
exit_tcpdump(status == -1 ? 1 : 0);
}
/* make a clean exit on interrupts */
static RETSIGTYPE
cleanup(int signo _U_)
{
#ifdef USE_WIN32_MM_TIMER
if (timer_id)
timeKillEvent(timer_id);
timer_id = 0;
#elif defined(HAVE_ALARM)
alarm(0);
#endif
#ifdef HAVE_PCAP_BREAKLOOP
/*
* We have "pcap_breakloop()"; use it, so that we do as little
* as possible in the signal handler (it's probably not safe
* to do anything with standard I/O streams in a signal handler -
* the ANSI C standard doesn't say it is).
*/
pcap_breakloop(pd);
#else
/*
* We don't have "pcap_breakloop()"; this isn't safe, but
* it's the best we can do. Print the summary if we're
* not reading from a savefile - i.e., if we're doing a
* live capture - and exit.
*/
if (pd != NULL && pcap_file(pd) == NULL) {
/*
* We got interrupted, so perhaps we didn't
* manage to finish a line we were printing.
* Print an extra newline, just in case.
*/
putchar('\n');
(void)fflush(stdout);
info(1);
}
exit_tcpdump(0);
#endif
}
/*
On windows, we do not use a fork, so we do not care less about
waiting a child processes to die
*/
#if defined(HAVE_FORK) || defined(HAVE_VFORK)
static RETSIGTYPE
child_cleanup(int signo _U_)
{
wait(NULL);
}
#endif /* HAVE_FORK && HAVE_VFORK */
static void
info(register int verbose)
{
struct pcap_stat stats;
/*
* Older versions of libpcap didn't set ps_ifdrop on some
* platforms; initialize it to 0 to handle that.
*/
stats.ps_ifdrop = 0;
if (pcap_stats(pd, &stats) < 0) {
(void)fprintf(stderr, "pcap_stats: %s\n", pcap_geterr(pd));
infoprint = 0;
return;
}
if (!verbose)
fprintf(stderr, "%s: ", program_name);
(void)fprintf(stderr, "%u packet%s captured", packets_captured,
PLURAL_SUFFIX(packets_captured));
if (!verbose)
fputs(", ", stderr);
else
putc('\n', stderr);
(void)fprintf(stderr, "%u packet%s received by filter", stats.ps_recv,
PLURAL_SUFFIX(stats.ps_recv));
if (!verbose)
fputs(", ", stderr);
else
putc('\n', stderr);
(void)fprintf(stderr, "%u packet%s dropped by kernel", stats.ps_drop,
PLURAL_SUFFIX(stats.ps_drop));
if (stats.ps_ifdrop != 0) {
if (!verbose)
fputs(", ", stderr);
else
putc('\n', stderr);
(void)fprintf(stderr, "%u packet%s dropped by interface\n",
stats.ps_ifdrop, PLURAL_SUFFIX(stats.ps_ifdrop));
} else
putc('\n', stderr);
infoprint = 0;
}
#if defined(HAVE_FORK) || defined(HAVE_VFORK)
#ifdef HAVE_FORK
#define fork_subprocess() fork()
#else
#define fork_subprocess() vfork()
#endif
static void
compress_savefile(const char *filename)
{
pid_t child;
child = fork_subprocess();
if (child == -1) {
fprintf(stderr,
"compress_savefile: fork failed: %s\n",
pcap_strerror(errno));
return;
}
if (child != 0) {
/* Parent process. */
return;
}
/*
* Child process.
* Set to lowest priority so that this doesn't disturb the capture.
*/
#ifdef NZERO
setpriority(PRIO_PROCESS, 0, NZERO - 1);
#else
setpriority(PRIO_PROCESS, 0, 19);
#endif
if (execlp(zflag, zflag, filename, (char *)NULL) == -1)
fprintf(stderr,
"compress_savefile: execlp(%s, %s) failed: %s\n",
zflag,
filename,
pcap_strerror(errno));
#ifdef HAVE_FORK
exit(1);
#else
_exit(1);
#endif
}
#else /* HAVE_FORK && HAVE_VFORK */
static void
compress_savefile(const char *filename)
{
fprintf(stderr,
"compress_savefile failed. Functionality not implemented under your system\n");
}
#endif /* HAVE_FORK && HAVE_VFORK */
static void
dump_packet_and_trunc(u_char *user, const struct pcap_pkthdr *h, const u_char *sp)
{
struct dump_info *dump_info;
++packets_captured;
++infodelay;
dump_info = (struct dump_info *)user;
/*
* XXX - this won't force the file to rotate on the specified time
* boundary, but it will rotate on the first packet received after the
* specified Gflag number of seconds. Note: if a Gflag time boundary
* and a Cflag size boundary coincide, the time rotation will occur
* first thereby cancelling the Cflag boundary (since the file should
* be 0).
*/
if (Gflag != 0) {
/* Check if it is time to rotate */
time_t t;
/* Get the current time */
if ((t = time(NULL)) == (time_t)-1) {
error("dump_and_trunc_packet: can't get current_time: %s",
pcap_strerror(errno));
}
/* If the time is greater than the specified window, rotate */
if (t - Gflag_time >= Gflag) {
#ifdef HAVE_CAPSICUM
FILE *fp;
int fd;
#endif
/* Update the Gflag_time */
Gflag_time = t;
/* Update Gflag_count */
Gflag_count++;
/*
* Close the current file and open a new one.
*/
pcap_dump_close(dump_info->p);
/*
* Compress the file we just closed, if the user asked for it
*/
if (zflag != NULL)
compress_savefile(dump_info->CurrentFileName);
/*
* Check to see if we've exceeded the Wflag (when
* not using Cflag).
*/
if (Cflag == 0 && Wflag > 0 && Gflag_count >= Wflag) {
(void)fprintf(stderr, "Maximum file limit reached: %d\n",
Wflag);
info(1);
exit_tcpdump(0);
/* NOTREACHED */
}
if (dump_info->CurrentFileName != NULL)
free(dump_info->CurrentFileName);
/* Allocate space for max filename + \0. */
dump_info->CurrentFileName = (char *)malloc(PATH_MAX + 1);
if (dump_info->CurrentFileName == NULL)
error("dump_packet_and_trunc: malloc");
/*
* Gflag was set otherwise we wouldn't be here. Reset the count
* so multiple files would end with 1,2,3 in the filename.
* The counting is handled with the -C flow after this.
*/
Cflag_count = 0;
/*
* This is always the first file in the Cflag
* rotation: e.g. 0
* We also don't need numbering if Cflag is not set.
*/
if (Cflag != 0)
MakeFilename(dump_info->CurrentFileName, dump_info->WFileName, 0,
WflagChars);
else
MakeFilename(dump_info->CurrentFileName, dump_info->WFileName, 0, 0);
#ifdef HAVE_LIBCAP_NG
capng_update(CAPNG_ADD, CAPNG_EFFECTIVE, CAP_DAC_OVERRIDE);
capng_apply(CAPNG_SELECT_BOTH);
#endif /* HAVE_LIBCAP_NG */
#ifdef HAVE_CAPSICUM
fd = openat(dump_info->dirfd,
dump_info->CurrentFileName,
O_CREAT | O_WRONLY | O_TRUNC, 0644);
if (fd < 0) {
error("unable to open file %s",
dump_info->CurrentFileName);
}
fp = fdopen(fd, "w");
if (fp == NULL) {
error("unable to fdopen file %s",
dump_info->CurrentFileName);
}
dump_info->p = pcap_dump_fopen(dump_info->pd, fp);
#else /* !HAVE_CAPSICUM */
dump_info->p = pcap_dump_open(dump_info->pd, dump_info->CurrentFileName);
#endif
#ifdef HAVE_LIBCAP_NG
capng_update(CAPNG_DROP, CAPNG_EFFECTIVE, CAP_DAC_OVERRIDE);
capng_apply(CAPNG_SELECT_BOTH);
#endif /* HAVE_LIBCAP_NG */
if (dump_info->p == NULL)
error("%s", pcap_geterr(pd));
#ifdef HAVE_CAPSICUM
set_dumper_capsicum_rights(dump_info->p);
#endif
}
}
/*
* XXX - this won't prevent capture files from getting
* larger than Cflag - the last packet written to the
* file could put it over Cflag.
*/
if (Cflag != 0) {
long size = pcap_dump_ftell(dump_info->p);
if (size == -1)
error("ftell fails on output file");
if (size > Cflag) {
#ifdef HAVE_CAPSICUM
FILE *fp;
int fd;
#endif
/*
* Close the current file and open a new one.
*/
pcap_dump_close(dump_info->p);
/*
* Compress the file we just closed, if the user
* asked for it.
*/
if (zflag != NULL)
compress_savefile(dump_info->CurrentFileName);
Cflag_count++;
if (Wflag > 0) {
if (Cflag_count >= Wflag)
Cflag_count = 0;
}
if (dump_info->CurrentFileName != NULL)
free(dump_info->CurrentFileName);
dump_info->CurrentFileName = (char *)malloc(PATH_MAX + 1);
if (dump_info->CurrentFileName == NULL)
error("dump_packet_and_trunc: malloc");
MakeFilename(dump_info->CurrentFileName, dump_info->WFileName, Cflag_count, WflagChars);
#ifdef HAVE_LIBCAP_NG
capng_update(CAPNG_ADD, CAPNG_EFFECTIVE, CAP_DAC_OVERRIDE);
capng_apply(CAPNG_SELECT_BOTH);
#endif /* HAVE_LIBCAP_NG */
#ifdef HAVE_CAPSICUM
fd = openat(dump_info->dirfd, dump_info->CurrentFileName,
O_CREAT | O_WRONLY | O_TRUNC, 0644);
if (fd < 0) {
error("unable to open file %s",
dump_info->CurrentFileName);
}
fp = fdopen(fd, "w");
if (fp == NULL) {
error("unable to fdopen file %s",
dump_info->CurrentFileName);
}
dump_info->p = pcap_dump_fopen(dump_info->pd, fp);
#else /* !HAVE_CAPSICUM */
dump_info->p = pcap_dump_open(dump_info->pd, dump_info->CurrentFileName);
#endif
#ifdef HAVE_LIBCAP_NG
capng_update(CAPNG_DROP, CAPNG_EFFECTIVE, CAP_DAC_OVERRIDE);
capng_apply(CAPNG_SELECT_BOTH);
#endif /* HAVE_LIBCAP_NG */
if (dump_info->p == NULL)
error("%s", pcap_geterr(pd));
#ifdef HAVE_CAPSICUM
set_dumper_capsicum_rights(dump_info->p);
#endif
}
}
pcap_dump((u_char *)dump_info->p, h, sp);
#ifdef HAVE_PCAP_DUMP_FLUSH
if (Uflag)
pcap_dump_flush(dump_info->p);
#endif
--infodelay;
if (infoprint)
info(0);
}
static void
dump_packet(u_char *user, const struct pcap_pkthdr *h, const u_char *sp)
{
++packets_captured;
++infodelay;
pcap_dump(user, h, sp);
#ifdef HAVE_PCAP_DUMP_FLUSH
if (Uflag)
pcap_dump_flush((pcap_dumper_t *)user);
#endif
--infodelay;
if (infoprint)
info(0);
}
static void
print_packet(u_char *user, const struct pcap_pkthdr *h, const u_char *sp)
{
++packets_captured;
++infodelay;
pretty_print_packet((netdissect_options *)user, h, sp, packets_captured);
--infodelay;
if (infoprint)
info(0);
}
#ifdef _WIN32
/*
* XXX - there should really be libpcap calls to get the version
* number as a string (the string would be generated from #defines
* at run time, so that it's not generated from string constants
* in the library, as, on many UNIX systems, those constants would
* be statically linked into the application executable image, and
* would thus reflect the version of libpcap on the system on
* which the application was *linked*, not the system on which it's
* *running*.
*
* That routine should be documented, unlike the "version[]"
* string, so that UNIX vendors providing their own libpcaps
* don't omit it (as a couple of vendors have...).
*
* Packet.dll should perhaps also export a routine to return the
* version number of the Packet.dll code, to supply the
* "Wpcap_version" information on Windows.
*/
char WDversion[]="current-git.tcpdump.org";
#if !defined(HAVE_GENERATED_VERSION)
char version[]="current-git.tcpdump.org";
#endif
char pcap_version[]="current-git.tcpdump.org";
char Wpcap_version[]="3.1";
#endif
#ifdef SIGNAL_REQ_INFO
RETSIGTYPE requestinfo(int signo _U_)
{
if (infodelay)
++infoprint;
else
info(0);
}
#endif
/*
* Called once each second in verbose mode while dumping to file
*/
#ifdef USE_WIN32_MM_TIMER
void CALLBACK verbose_stats_dump (UINT timer_id _U_, UINT msg _U_, DWORD_PTR arg _U_,
DWORD_PTR dw1 _U_, DWORD_PTR dw2 _U_)
{
if (infodelay == 0)
fprintf(stderr, "Got %u\r", packets_captured);
}
#elif defined(HAVE_ALARM)
static void verbose_stats_dump(int sig _U_)
{
if (infodelay == 0)
fprintf(stderr, "Got %u\r", packets_captured);
alarm(1);
}
#endif
USES_APPLE_DEPRECATED_API
static void
print_version(void)
{
extern char version[];
#ifndef HAVE_PCAP_LIB_VERSION
#if defined(_WIN32) || defined(HAVE_PCAP_VERSION)
extern char pcap_version[];
#else /* defined(_WIN32) || defined(HAVE_PCAP_VERSION) */
static char pcap_version[] = "unknown";
#endif /* defined(_WIN32) || defined(HAVE_PCAP_VERSION) */
#endif /* HAVE_PCAP_LIB_VERSION */
const char *smi_version_string;
#ifdef HAVE_PCAP_LIB_VERSION
#ifdef _WIN32
(void)fprintf(stderr, "%s version %s, based on tcpdump version %s\n", program_name, WDversion, version);
#else /* _WIN32 */
(void)fprintf(stderr, "%s version %s\n", program_name, version);
#endif /* _WIN32 */
(void)fprintf(stderr, "%s\n",pcap_lib_version());
#else /* HAVE_PCAP_LIB_VERSION */
#ifdef _WIN32
(void)fprintf(stderr, "%s version %s, based on tcpdump version %s\n", program_name, WDversion, version);
(void)fprintf(stderr, "WinPcap version %s, based on libpcap version %s\n",Wpcap_version, pcap_version);
#else /* _WIN32 */
(void)fprintf(stderr, "%s version %s\n", program_name, version);
(void)fprintf(stderr, "libpcap version %s\n", pcap_version);
#endif /* _WIN32 */
#endif /* HAVE_PCAP_LIB_VERSION */
#if defined(HAVE_LIBCRYPTO) && defined(SSLEAY_VERSION)
(void)fprintf (stderr, "%s\n", SSLeay_version(SSLEAY_VERSION));
#endif
smi_version_string = nd_smi_version_string();
if (smi_version_string != NULL)
(void)fprintf (stderr, "SMI-library: %s\n", smi_version_string);
#if defined(__SANITIZE_ADDRESS__)
(void)fprintf (stderr, "Compiled with AddressSanitizer/GCC.\n");
#elif defined(__has_feature)
# if __has_feature(address_sanitizer)
(void)fprintf (stderr, "Compiled with AddressSanitizer/CLang.\n");
# endif
#endif /* __SANITIZE_ADDRESS__ or __has_feature */
}
USES_APPLE_RST
static void
print_usage(void)
{
print_version();
(void)fprintf(stderr,
"Usage: %s [-aAbd" D_FLAG "efhH" I_FLAG J_FLAG "KlLnNOpqStu" U_FLAG "vxX#]" B_FLAG_USAGE " [ -c count ]\n", program_name);
(void)fprintf(stderr,
"\t\t[ -C file_size ] [ -E algo:secret ] [ -F file ] [ -G seconds ]\n");
(void)fprintf(stderr,
"\t\t[ -i interface ]" j_FLAG_USAGE " [ -M secret ] [ --number ]\n");
#ifdef HAVE_PCAP_SETDIRECTION
(void)fprintf(stderr,
"\t\t[ -Q in|out|inout ]\n");
#endif
(void)fprintf(stderr,
"\t\t[ -r file ] [ -s snaplen ] ");
#ifdef HAVE_PCAP_SET_TSTAMP_PRECISION
(void)fprintf(stderr, "[ --time-stamp-precision precision ]\n");
(void)fprintf(stderr,
"\t\t");
#endif
#ifdef HAVE_PCAP_SET_IMMEDIATE_MODE
(void)fprintf(stderr, "[ --immediate-mode ] ");
#endif
(void)fprintf(stderr, "[ -T type ] [ --version ] [ -V file ]\n");
(void)fprintf(stderr,
"\t\t[ -w file ] [ -W filecount ] [ -y datalinktype ] [ -z postrotate-command ]\n");
(void)fprintf(stderr,
"\t\t[ -Z user ] [ expression ]\n");
}
/*
* Local Variables:
* c-style: whitesmith
* c-basic-offset: 8
* End:
*/
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_287_0 |
crossvul-cpp_data_bad_3888_0 | /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*
* memcached - memory caching daemon
*
* https://www.memcached.org/
*
* Copyright 2003 Danga Interactive, Inc. All rights reserved.
*
* Use and distribution licensed under the BSD license. See
* the LICENSE file for full text.
*
* Authors:
* Anatoly Vorobey <mellon@pobox.com>
* Brad Fitzpatrick <brad@danga.com>
*/
#include "memcached.h"
#ifdef EXTSTORE
#include "storage.h"
#endif
#include "authfile.h"
#include "restart.h"
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <signal.h>
#include <sys/param.h>
#include <sys/resource.h>
#include <sys/uio.h>
#include <ctype.h>
#include <stdarg.h>
/* some POSIX systems need the following definition
* to get mlockall flags out of sys/mman.h. */
#ifndef _P1003_1B_VISIBLE
#define _P1003_1B_VISIBLE
#endif
#include <pwd.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <sysexits.h>
#include <stddef.h>
#ifdef HAVE_GETOPT_LONG
#include <getopt.h>
#endif
#ifdef TLS
#include "tls.h"
#endif
#if defined(__FreeBSD__)
#include <sys/sysctl.h>
#endif
/*
* forward declarations
*/
static void drive_machine(conn *c);
static int new_socket(struct addrinfo *ai);
static ssize_t tcp_read(conn *arg, void *buf, size_t count);
static ssize_t tcp_sendmsg(conn *arg, struct msghdr *msg, int flags);
static ssize_t tcp_write(conn *arg, void *buf, size_t count);
enum try_read_result {
READ_DATA_RECEIVED,
READ_NO_DATA_RECEIVED,
READ_ERROR, /** an error occurred (on the socket) (or client closed connection) */
READ_MEMORY_ERROR /** failed to allocate more memory */
};
static int try_read_command_negotiate(conn *c);
static int try_read_command_udp(conn *c);
static int try_read_command_binary(conn *c);
static int try_read_command_ascii(conn *c);
static int try_read_command_asciiauth(conn *c);
static enum try_read_result try_read_network(conn *c);
static enum try_read_result try_read_udp(conn *c);
static void conn_set_state(conn *c, enum conn_states state);
static int start_conn_timeout_thread();
static mc_resp* resp_finish(conn *c, mc_resp *resp);
/* stats */
static void stats_init(void);
static void server_stats(ADD_STAT add_stats, conn *c);
static void process_stat_settings(ADD_STAT add_stats, void *c);
static void conn_to_str(const conn *c, char *addr, char *svr_addr);
/** Return a datum for stats in binary protocol */
static bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c);
/* defaults */
static void settings_init(void);
/* event handling, network IO */
static void event_handler(const int fd, const short which, void *arg);
static void conn_close(conn *c);
static void conn_init(void);
static bool update_event(conn *c, const int new_flags);
static void complete_nread(conn *c);
static void process_command(conn *c, char *command);
static void write_and_free(conn *c, char *buf, int bytes);
static void write_bin_error(conn *c, protocol_binary_response_status err,
const char *errstr, int swallow);
static void write_bin_miss_response(conn *c, char *key, size_t nkey);
#ifdef EXTSTORE
static void _get_extstore_cb(void *e, obj_io *io, int ret);
static inline int _get_extstore(conn *c, item *it, mc_resp *resp);
#endif
static void conn_free(conn *c);
/** binprot handlers **/
static void process_bin_flush(conn *c, char *extbuf);
static void process_bin_append_prepend(conn *c);
static void process_bin_update(conn *c, char *extbuf);
static void process_bin_get_or_touch(conn *c, char *extbuf);
static void process_bin_delete(conn *c);
static void complete_incr_bin(conn *c, char *extbuf);
static void process_bin_stat(conn *c);
static void process_bin_sasl_auth(conn *c);
/** exported globals **/
struct stats stats;
struct stats_state stats_state;
struct settings settings;
time_t process_started; /* when the process was started */
conn **conns;
struct slab_rebalance slab_rebal;
volatile int slab_rebalance_signal;
#ifdef EXTSTORE
/* hoping this is temporary; I'd prefer to cut globals, but will complete this
* battle another day.
*/
void *ext_storage = NULL;
#endif
/** file scope variables **/
static conn *listen_conn = NULL;
static int max_fds;
static struct event_base *main_base;
enum transmit_result {
TRANSMIT_COMPLETE, /** All done writing. */
TRANSMIT_INCOMPLETE, /** More data remaining to write. */
TRANSMIT_SOFT_ERROR, /** Can't write any more right now. */
TRANSMIT_HARD_ERROR /** Can't write (c->state is set to conn_closing) */
};
/* Default methods to read from/ write to a socket */
ssize_t tcp_read(conn *c, void *buf, size_t count) {
assert (c != NULL);
return read(c->sfd, buf, count);
}
ssize_t tcp_sendmsg(conn *c, struct msghdr *msg, int flags) {
assert (c != NULL);
return sendmsg(c->sfd, msg, flags);
}
ssize_t tcp_write(conn *c, void *buf, size_t count) {
assert (c != NULL);
return write(c->sfd, buf, count);
}
static enum transmit_result transmit(conn *c);
/* This reduces the latency without adding lots of extra wiring to be able to
* notify the listener thread of when to listen again.
* Also, the clock timer could be broken out into its own thread and we
* can block the listener via a condition.
*/
static volatile bool allow_new_conns = true;
static bool stop_main_loop = false;
static struct event maxconnsevent;
static void maxconns_handler(const int fd, const short which, void *arg) {
struct timeval t = {.tv_sec = 0, .tv_usec = 10000};
if (fd == -42 || allow_new_conns == false) {
/* reschedule in 10ms if we need to keep polling */
evtimer_set(&maxconnsevent, maxconns_handler, 0);
event_base_set(main_base, &maxconnsevent);
evtimer_add(&maxconnsevent, &t);
} else {
evtimer_del(&maxconnsevent);
accept_new_conns(true);
}
}
#define REALTIME_MAXDELTA 60*60*24*30
/* Negative exptimes can underflow and end up immortal. realtime() will
immediately expire values that are greater than REALTIME_MAXDELTA, but less
than process_started, so lets aim for that. */
#define EXPTIME_TO_POSITIVE_TIME(exptime) (exptime < 0) ? \
REALTIME_MAXDELTA + 1 : exptime
/*
* given time value that's either unix time or delta from current unix time, return
* unix time. Use the fact that delta can't exceed one month (and real time value can't
* be that low).
*/
static rel_time_t realtime(const time_t exptime) {
/* no. of seconds in 30 days - largest possible delta exptime */
if (exptime == 0) return 0; /* 0 means never expire */
if (exptime > REALTIME_MAXDELTA) {
/* if item expiration is at/before the server started, give it an
expiration time of 1 second after the server started.
(because 0 means don't expire). without this, we'd
underflow and wrap around to some large value way in the
future, effectively making items expiring in the past
really expiring never */
if (exptime <= process_started)
return (rel_time_t)1;
return (rel_time_t)(exptime - process_started);
} else {
return (rel_time_t)(exptime + current_time);
}
}
static void stats_init(void) {
memset(&stats, 0, sizeof(struct stats));
memset(&stats_state, 0, sizeof(struct stats_state));
stats_state.accepting_conns = true; /* assuming we start in this state. */
/* make the time we started always be 2 seconds before we really
did, so time(0) - time.started is never zero. if so, things
like 'settings.oldest_live' which act as booleans as well as
values are now false in boolean context... */
process_started = time(0) - ITEM_UPDATE_INTERVAL - 2;
stats_prefix_init(settings.prefix_delimiter);
}
static void stats_reset(void) {
STATS_LOCK();
memset(&stats, 0, sizeof(struct stats));
stats_prefix_clear();
STATS_UNLOCK();
threadlocal_stats_reset();
item_stats_reset();
}
static void settings_init(void) {
settings.use_cas = true;
settings.access = 0700;
settings.port = 11211;
settings.udpport = 0;
#ifdef TLS
settings.ssl_enabled = false;
settings.ssl_ctx = NULL;
settings.ssl_chain_cert = NULL;
settings.ssl_key = NULL;
settings.ssl_verify_mode = SSL_VERIFY_NONE;
settings.ssl_keyformat = SSL_FILETYPE_PEM;
settings.ssl_ciphers = NULL;
settings.ssl_ca_cert = NULL;
settings.ssl_last_cert_refresh_time = current_time;
settings.ssl_wbuf_size = 16 * 1024; // default is 16KB (SSL max frame size is 17KB)
#endif
/* By default this string should be NULL for getaddrinfo() */
settings.inter = NULL;
settings.maxbytes = 64 * 1024 * 1024; /* default is 64MB */
settings.maxconns = 1024; /* to limit connections-related memory to about 5MB */
settings.verbose = 0;
settings.oldest_live = 0;
settings.oldest_cas = 0; /* supplements accuracy of oldest_live */
settings.evict_to_free = 1; /* push old items out of cache when memory runs out */
settings.socketpath = NULL; /* by default, not using a unix socket */
settings.auth_file = NULL; /* by default, not using ASCII authentication tokens */
settings.factor = 1.25;
settings.chunk_size = 48; /* space for a modest key and value */
settings.num_threads = 4; /* N workers */
settings.num_threads_per_udp = 0;
settings.prefix_delimiter = ':';
settings.detail_enabled = 0;
settings.reqs_per_event = 20;
settings.backlog = 1024;
settings.binding_protocol = negotiating_prot;
settings.item_size_max = 1024 * 1024; /* The famous 1MB upper limit. */
settings.slab_page_size = 1024 * 1024; /* chunks are split from 1MB pages. */
settings.slab_chunk_size_max = settings.slab_page_size / 2;
settings.sasl = false;
settings.maxconns_fast = true;
settings.lru_crawler = false;
settings.lru_crawler_sleep = 100;
settings.lru_crawler_tocrawl = 0;
settings.lru_maintainer_thread = false;
settings.lru_segmented = true;
settings.hot_lru_pct = 20;
settings.warm_lru_pct = 40;
settings.hot_max_factor = 0.2;
settings.warm_max_factor = 2.0;
settings.temp_lru = false;
settings.temporary_ttl = 61;
settings.idle_timeout = 0; /* disabled */
settings.hashpower_init = 0;
settings.slab_reassign = true;
settings.slab_automove = 1;
settings.slab_automove_ratio = 0.8;
settings.slab_automove_window = 30;
settings.shutdown_command = false;
settings.tail_repair_time = TAIL_REPAIR_TIME_DEFAULT;
settings.flush_enabled = true;
settings.dump_enabled = true;
settings.crawls_persleep = 1000;
settings.logger_watcher_buf_size = LOGGER_WATCHER_BUF_SIZE;
settings.logger_buf_size = LOGGER_BUF_SIZE;
settings.drop_privileges = false;
settings.watch_enabled = true;
settings.resp_obj_mem_limit = 0;
settings.read_buf_mem_limit = 0;
#ifdef MEMCACHED_DEBUG
settings.relaxed_privileges = false;
#endif
}
extern pthread_mutex_t conn_lock;
/* Connection timeout thread bits */
static pthread_t conn_timeout_tid;
static int do_run_conn_timeout_thread;
#define CONNS_PER_SLICE 100
#define TIMEOUT_MSG_SIZE (1 + sizeof(int))
static void *conn_timeout_thread(void *arg) {
int i;
conn *c;
char buf[TIMEOUT_MSG_SIZE];
rel_time_t oldest_last_cmd;
int sleep_time;
useconds_t timeslice = 1000000 / (max_fds / CONNS_PER_SLICE);
while(do_run_conn_timeout_thread) {
if (settings.verbose > 2)
fprintf(stderr, "idle timeout thread at top of connection list\n");
oldest_last_cmd = current_time;
for (i = 0; i < max_fds; i++) {
if ((i % CONNS_PER_SLICE) == 0) {
if (settings.verbose > 2)
fprintf(stderr, "idle timeout thread sleeping for %ulus\n",
(unsigned int)timeslice);
usleep(timeslice);
}
if (!conns[i])
continue;
c = conns[i];
if (!IS_TCP(c->transport))
continue;
if (c->state != conn_new_cmd && c->state != conn_read)
continue;
if ((current_time - c->last_cmd_time) > settings.idle_timeout) {
buf[0] = 't';
memcpy(&buf[1], &i, sizeof(int));
if (write(c->thread->notify_send_fd, buf, TIMEOUT_MSG_SIZE)
!= TIMEOUT_MSG_SIZE)
perror("Failed to write timeout to notify pipe");
} else {
if (c->last_cmd_time < oldest_last_cmd)
oldest_last_cmd = c->last_cmd_time;
}
}
/* This is the soonest we could have another connection time out */
sleep_time = settings.idle_timeout - (current_time - oldest_last_cmd) + 1;
if (sleep_time <= 0)
sleep_time = 1;
if (settings.verbose > 2)
fprintf(stderr,
"idle timeout thread finished pass, sleeping for %ds\n",
sleep_time);
usleep((useconds_t) sleep_time * 1000000);
}
return NULL;
}
static int start_conn_timeout_thread() {
int ret;
if (settings.idle_timeout == 0)
return -1;
do_run_conn_timeout_thread = 1;
if ((ret = pthread_create(&conn_timeout_tid, NULL,
conn_timeout_thread, NULL)) != 0) {
fprintf(stderr, "Can't create idle connection timeout thread: %s\n",
strerror(ret));
return -1;
}
return 0;
}
int stop_conn_timeout_thread(void) {
if (!do_run_conn_timeout_thread)
return -1;
do_run_conn_timeout_thread = 0;
pthread_join(conn_timeout_tid, NULL);
return 0;
}
/*
* read buffer cache helper functions
*/
static void rbuf_release(conn *c) {
if (c->rbuf != NULL && c->rbytes == 0 && !IS_UDP(c->transport)) {
if (c->rbuf_malloced) {
free(c->rbuf);
c->rbuf_malloced = false;
} else {
do_cache_free(c->thread->rbuf_cache, c->rbuf);
}
c->rsize = 0;
c->rbuf = NULL;
c->rcurr = NULL;
}
}
static bool rbuf_alloc(conn *c) {
if (c->rbuf == NULL) {
c->rbuf = do_cache_alloc(c->thread->rbuf_cache);
if (!c->rbuf) {
THR_STATS_LOCK(c);
c->thread->stats.read_buf_oom++;
THR_STATS_UNLOCK(c);
return false;
}
c->rsize = READ_BUFFER_SIZE;
c->rcurr = c->rbuf;
}
return true;
}
// Just for handling huge ASCII multigets.
// The previous system was essentially the same; realloc'ing until big enough,
// then realloc'ing back down after the request finished.
static bool rbuf_switch_to_malloc(conn *c) {
// Might as well start with x2 and work from there.
size_t size = c->rsize * 2;
char *tmp = malloc(size);
if (!tmp)
return false;
do_cache_free(c->thread->rbuf_cache, c->rbuf);
memcpy(tmp, c->rcurr, c->rbytes);
c->rcurr = c->rbuf = tmp;
c->rsize = size;
c->rbuf_malloced = true;
return true;
}
/*
* Initializes the connections array. We don't actually allocate connection
* structures until they're needed, so as to avoid wasting memory when the
* maximum connection count is much higher than the actual number of
* connections.
*
* This does end up wasting a few pointers' worth of memory for FDs that are
* used for things other than connections, but that's worth it in exchange for
* being able to directly index the conns array by FD.
*/
static void conn_init(void) {
/* We're unlikely to see an FD much higher than maxconns. */
int next_fd = dup(1);
if (next_fd < 0) {
perror("Failed to duplicate file descriptor\n");
exit(1);
}
int headroom = 10; /* account for extra unexpected open FDs */
struct rlimit rl;
max_fds = settings.maxconns + headroom + next_fd;
/* But if possible, get the actual highest FD we can possibly ever see. */
if (getrlimit(RLIMIT_NOFILE, &rl) == 0) {
max_fds = rl.rlim_max;
} else {
fprintf(stderr, "Failed to query maximum file descriptor; "
"falling back to maxconns\n");
}
close(next_fd);
if ((conns = calloc(max_fds, sizeof(conn *))) == NULL) {
fprintf(stderr, "Failed to allocate connection structures\n");
/* This is unrecoverable so bail out early. */
exit(1);
}
}
static const char *prot_text(enum protocol prot) {
char *rv = "unknown";
switch(prot) {
case ascii_prot:
rv = "ascii";
break;
case binary_prot:
rv = "binary";
break;
case negotiating_prot:
rv = "auto-negotiate";
break;
}
return rv;
}
void conn_close_idle(conn *c) {
if (settings.idle_timeout > 0 &&
(current_time - c->last_cmd_time) > settings.idle_timeout) {
if (c->state != conn_new_cmd && c->state != conn_read) {
if (settings.verbose > 1)
fprintf(stderr,
"fd %d wants to timeout, but isn't in read state", c->sfd);
return;
}
if (settings.verbose > 1)
fprintf(stderr, "Closing idle fd %d\n", c->sfd);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.idle_kicks++;
pthread_mutex_unlock(&c->thread->stats.mutex);
conn_set_state(c, conn_closing);
drive_machine(c);
}
}
/* bring conn back from a sidethread. could have had its event base moved. */
void conn_worker_readd(conn *c) {
c->ev_flags = EV_READ | EV_PERSIST;
event_set(&c->event, c->sfd, c->ev_flags, event_handler, (void *)c);
event_base_set(c->thread->base, &c->event);
// TODO: call conn_cleanup/fail/etc
if (event_add(&c->event, 0) == -1) {
perror("event_add");
}
// side thread wanted us to close immediately.
if (c->state == conn_closing) {
drive_machine(c);
return;
}
c->state = conn_new_cmd;
#ifdef EXTSTORE
// If we had IO objects, process
if (c->io_wraplist) {
//assert(c->io_wrapleft == 0); // assert no more to process
conn_set_state(c, conn_mwrite);
drive_machine(c);
}
#endif
}
conn *conn_new(const int sfd, enum conn_states init_state,
const int event_flags,
const int read_buffer_size, enum network_transport transport,
struct event_base *base, void *ssl) {
conn *c;
assert(sfd >= 0 && sfd < max_fds);
c = conns[sfd];
if (NULL == c) {
if (!(c = (conn *)calloc(1, sizeof(conn)))) {
STATS_LOCK();
stats.malloc_fails++;
STATS_UNLOCK();
fprintf(stderr, "Failed to allocate connection object\n");
return NULL;
}
MEMCACHED_CONN_CREATE(c);
c->read = NULL;
c->sendmsg = NULL;
c->write = NULL;
c->rbuf = NULL;
c->rsize = read_buffer_size;
// UDP connections use a persistent static buffer.
if (c->rsize) {
c->rbuf = (char *)malloc((size_t)c->rsize);
}
if (c->rsize && c->rbuf == NULL) {
conn_free(c);
STATS_LOCK();
stats.malloc_fails++;
STATS_UNLOCK();
fprintf(stderr, "Failed to allocate buffers for connection\n");
return NULL;
}
STATS_LOCK();
stats_state.conn_structs++;
STATS_UNLOCK();
c->sfd = sfd;
conns[sfd] = c;
}
c->transport = transport;
c->protocol = settings.binding_protocol;
/* unix socket mode doesn't need this, so zeroed out. but why
* is this done for every command? presumably for UDP
* mode. */
if (!settings.socketpath) {
c->request_addr_size = sizeof(c->request_addr);
} else {
c->request_addr_size = 0;
}
if (transport == tcp_transport && init_state == conn_new_cmd) {
if (getpeername(sfd, (struct sockaddr *) &c->request_addr,
&c->request_addr_size)) {
perror("getpeername");
memset(&c->request_addr, 0, sizeof(c->request_addr));
}
}
if (settings.verbose > 1) {
if (init_state == conn_listening) {
fprintf(stderr, "<%d server listening (%s)\n", sfd,
prot_text(c->protocol));
} else if (IS_UDP(transport)) {
fprintf(stderr, "<%d server listening (udp)\n", sfd);
} else if (c->protocol == negotiating_prot) {
fprintf(stderr, "<%d new auto-negotiating client connection\n",
sfd);
} else if (c->protocol == ascii_prot) {
fprintf(stderr, "<%d new ascii client connection.\n", sfd);
} else if (c->protocol == binary_prot) {
fprintf(stderr, "<%d new binary client connection.\n", sfd);
} else {
fprintf(stderr, "<%d new unknown (%d) client connection\n",
sfd, c->protocol);
assert(false);
}
}
#ifdef TLS
c->ssl = NULL;
c->ssl_wbuf = NULL;
c->ssl_enabled = false;
#endif
c->state = init_state;
c->rlbytes = 0;
c->cmd = -1;
c->rbytes = 0;
c->rcurr = c->rbuf;
c->ritem = 0;
c->rbuf_malloced = false;
c->sasl_started = false;
c->set_stale = false;
c->mset_res = false;
c->close_after_write = false;
c->last_cmd_time = current_time; /* initialize for idle kicker */
#ifdef EXTSTORE
c->io_wraplist = NULL;
c->io_wrapleft = 0;
#endif
c->item = 0;
c->noreply = false;
#ifdef TLS
if (ssl) {
c->ssl = (SSL*)ssl;
c->read = ssl_read;
c->sendmsg = ssl_sendmsg;
c->write = ssl_write;
c->ssl_enabled = true;
SSL_set_info_callback(c->ssl, ssl_callback);
} else
#else
// This must be NULL if TLS is not enabled.
assert(ssl == NULL);
#endif
{
c->read = tcp_read;
c->sendmsg = tcp_sendmsg;
c->write = tcp_write;
}
if (IS_UDP(transport)) {
c->try_read_command = try_read_command_udp;
} else {
switch (c->protocol) {
case ascii_prot:
if (settings.auth_file == NULL) {
c->authenticated = true;
c->try_read_command = try_read_command_ascii;
} else {
c->authenticated = false;
c->try_read_command = try_read_command_asciiauth;
}
break;
case binary_prot:
// binprot handles its own authentication via SASL parsing.
c->authenticated = false;
c->try_read_command = try_read_command_binary;
break;
case negotiating_prot:
c->try_read_command = try_read_command_negotiate;
break;
}
}
event_set(&c->event, sfd, event_flags, event_handler, (void *)c);
event_base_set(base, &c->event);
c->ev_flags = event_flags;
if (event_add(&c->event, 0) == -1) {
perror("event_add");
return NULL;
}
STATS_LOCK();
stats_state.curr_conns++;
stats.total_conns++;
STATS_UNLOCK();
MEMCACHED_CONN_ALLOCATE(c->sfd);
return c;
}
#ifdef EXTSTORE
static void recache_or_free(conn *c, io_wrap *wrap) {
item *it;
it = (item *)wrap->io.buf;
bool do_free = true;
if (wrap->active) {
// If request never dispatched, free the read buffer but leave the
// item header alone.
do_free = false;
size_t ntotal = ITEM_ntotal(wrap->hdr_it);
slabs_free(it, ntotal, slabs_clsid(ntotal));
c->io_wrapleft--;
assert(c->io_wrapleft >= 0);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.get_aborted_extstore++;
pthread_mutex_unlock(&c->thread->stats.mutex);
} else if (wrap->miss) {
// If request was ultimately a miss, unlink the header.
do_free = false;
size_t ntotal = ITEM_ntotal(wrap->hdr_it);
item_unlink(wrap->hdr_it);
slabs_free(it, ntotal, slabs_clsid(ntotal));
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.miss_from_extstore++;
if (wrap->badcrc)
c->thread->stats.badcrc_from_extstore++;
pthread_mutex_unlock(&c->thread->stats.mutex);
} else if (settings.ext_recache_rate) {
// hashvalue is cuddled during store
uint32_t hv = (uint32_t)it->time;
// opt to throw away rather than wait on a lock.
void *hold_lock = item_trylock(hv);
if (hold_lock != NULL) {
item *h_it = wrap->hdr_it;
uint8_t flags = ITEM_LINKED|ITEM_FETCHED|ITEM_ACTIVE;
// Item must be recently hit at least twice to recache.
if (((h_it->it_flags & flags) == flags) &&
h_it->time > current_time - ITEM_UPDATE_INTERVAL &&
c->recache_counter++ % settings.ext_recache_rate == 0) {
do_free = false;
// In case it's been updated.
it->exptime = h_it->exptime;
it->it_flags &= ~ITEM_LINKED;
it->refcount = 0;
it->h_next = NULL; // might not be necessary.
STORAGE_delete(c->thread->storage, h_it);
item_replace(h_it, it, hv);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.recache_from_extstore++;
pthread_mutex_unlock(&c->thread->stats.mutex);
}
}
if (hold_lock)
item_trylock_unlock(hold_lock);
}
if (do_free)
slabs_free(it, ITEM_ntotal(it), ITEM_clsid(it));
wrap->io.buf = NULL; // sanity.
wrap->io.next = NULL;
wrap->next = NULL;
wrap->active = false;
// TODO: reuse lock and/or hv.
item_remove(wrap->hdr_it);
}
#endif
static void conn_release_items(conn *c) {
assert(c != NULL);
if (c->item) {
item_remove(c->item);
c->item = 0;
}
#ifdef EXTSTORE
if (c->io_wraplist) {
io_wrap *tmp = c->io_wraplist;
while (tmp) {
io_wrap *next = tmp->next;
recache_or_free(c, tmp);
// malloc'ed iovec list used for chunked extstore fetches.
if (tmp->io.iov) {
free(tmp->io.iov);
tmp->io.iov = NULL;
}
do_cache_free(c->thread->io_cache, tmp); // lockless
tmp = next;
}
c->io_wraplist = NULL;
}
#endif
// Cull any unsent responses.
if (c->resp_head) {
mc_resp *resp = c->resp_head;
// r_f() handles the chain maintenance.
while (resp) {
// temporary by default. hide behind a debug flag in the future:
// double free detection. Transmit loops can drop out early, but
// here we could infinite loop.
if (resp->free) {
fprintf(stderr, "ERROR: double free detected during conn_release_items(): [%d] [%s]\n",
c->sfd, c->protocol == binary_prot ? "binary" : "ascii");
// Since this is a critical failure, just leak the memory.
// If these errors are seen, an abort() can be used instead.
c->resp_head = NULL;
c->resp = NULL;
break;
}
resp = resp_finish(c, resp);
}
}
}
static void conn_cleanup(conn *c) {
assert(c != NULL);
conn_release_items(c);
if (c->sasl_conn) {
assert(settings.sasl);
sasl_dispose(&c->sasl_conn);
c->sasl_conn = NULL;
}
if (IS_UDP(c->transport)) {
conn_set_state(c, conn_read);
}
}
/*
* Frees a connection.
*/
void conn_free(conn *c) {
if (c) {
assert(c != NULL);
assert(c->sfd >= 0 && c->sfd < max_fds);
MEMCACHED_CONN_DESTROY(c);
conns[c->sfd] = NULL;
if (c->rbuf)
free(c->rbuf);
#ifdef TLS
if (c->ssl_wbuf)
c->ssl_wbuf = NULL;
#endif
free(c);
}
}
static void conn_close(conn *c) {
assert(c != NULL);
/* delete the event, the socket and the conn */
event_del(&c->event);
if (settings.verbose > 1)
fprintf(stderr, "<%d connection closed.\n", c->sfd);
conn_cleanup(c);
// force release of read buffer.
if (c->thread) {
c->rbytes = 0;
rbuf_release(c);
}
MEMCACHED_CONN_RELEASE(c->sfd);
conn_set_state(c, conn_closed);
#ifdef TLS
if (c->ssl) {
SSL_shutdown(c->ssl);
SSL_free(c->ssl);
}
#endif
close(c->sfd);
pthread_mutex_lock(&conn_lock);
allow_new_conns = true;
pthread_mutex_unlock(&conn_lock);
STATS_LOCK();
stats_state.curr_conns--;
STATS_UNLOCK();
return;
}
/**
* Convert a state name to a human readable form.
*/
static const char *state_text(enum conn_states state) {
const char* const statenames[] = { "conn_listening",
"conn_new_cmd",
"conn_waiting",
"conn_read",
"conn_parse_cmd",
"conn_write",
"conn_nread",
"conn_swallow",
"conn_closing",
"conn_mwrite",
"conn_closed",
"conn_watch" };
return statenames[state];
}
/*
* Sets a connection's current state in the state machine. Any special
* processing that needs to happen on certain state transitions can
* happen here.
*/
static void conn_set_state(conn *c, enum conn_states state) {
assert(c != NULL);
assert(state >= conn_listening && state < conn_max_state);
if (state != c->state) {
if (settings.verbose > 2) {
fprintf(stderr, "%d: going from %s to %s\n",
c->sfd, state_text(c->state),
state_text(state));
}
if (state == conn_write || state == conn_mwrite) {
MEMCACHED_PROCESS_COMMAND_END(c->sfd, c->resp->wbuf, c->resp->wbytes);
}
c->state = state;
}
}
/*
* response object helper functions
*/
static void resp_reset(mc_resp *resp) {
if (resp->item) {
item_remove(resp->item);
resp->item = NULL;
}
if (resp->write_and_free) {
free(resp->write_and_free);
resp->write_and_free = NULL;
}
resp->wbytes = 0;
resp->tosend = 0;
resp->iovcnt = 0;
resp->chunked_data_iov = 0;
resp->chunked_total = 0;
resp->skip = false;
}
static void resp_add_iov(mc_resp *resp, const void *buf, int len) {
assert(resp->iovcnt < MC_RESP_IOVCOUNT);
int x = resp->iovcnt;
resp->iov[x].iov_base = (void *)buf;
resp->iov[x].iov_len = len;
resp->iovcnt++;
resp->tosend += len;
}
// Notes that an IOV should be handled as a chunked item header.
// TODO: I'm hoping this isn't a permanent abstraction while I learn what the
// API should be.
static void resp_add_chunked_iov(mc_resp *resp, const void *buf, int len) {
resp->chunked_data_iov = resp->iovcnt;
resp->chunked_total = len;
resp_add_iov(resp, buf, len);
}
static bool resp_start(conn *c) {
mc_resp *resp = do_cache_alloc(c->thread->resp_cache);
if (!resp) {
THR_STATS_LOCK(c);
c->thread->stats.response_obj_oom++;
THR_STATS_UNLOCK(c);
return false;
}
// FIXME: make wbuf indirect or use offsetof to zero up until wbuf
memset(resp, 0, sizeof(*resp));
if (!c->resp_head) {
c->resp_head = resp;
}
if (!c->resp) {
c->resp = resp;
} else {
c->resp->next = resp;
c->resp = resp;
}
if (IS_UDP(c->transport)) {
// need to hold on to some data for async responses.
c->resp->request_id = c->request_id;
c->resp->request_addr = c->request_addr;
c->resp->request_addr_size = c->request_addr_size;
}
return true;
}
// returns next response in chain.
static mc_resp* resp_finish(conn *c, mc_resp *resp) {
mc_resp *next = resp->next;
if (resp->item) {
// TODO: cache hash value in resp obj?
item_remove(resp->item);
resp->item = NULL;
}
if (resp->write_and_free) {
free(resp->write_and_free);
}
if (c->resp_head == resp) {
c->resp_head = next;
}
if (c->resp == resp) {
c->resp = NULL;
}
resp->free = true;
do_cache_free(c->thread->resp_cache, resp);
return next;
}
// tells if connection has a depth of response objects to process.
static bool resp_has_stack(conn *c) {
return c->resp_head->next != NULL ? true : false;
}
static void out_string(conn *c, const char *str) {
size_t len;
mc_resp *resp = c->resp;
assert(c != NULL);
// if response was original filled with something, but we're now writing
// out an error or similar, have to reset the object first.
// TODO: since this is often redundant with allocation, how many callers
// are actually requiring it be reset? Can we fast test by just looking at
// tosend and reset if nonzero?
resp_reset(resp);
if (c->noreply) {
// TODO: just invalidate the response since nothing's been attempted
// to send yet?
resp->skip = true;
if (settings.verbose > 1)
fprintf(stderr, ">%d NOREPLY %s\n", c->sfd, str);
conn_set_state(c, conn_new_cmd);
return;
}
if (settings.verbose > 1)
fprintf(stderr, ">%d %s\n", c->sfd, str);
// Fill response object with static string.
len = strlen(str);
if ((len + 2) > WRITE_BUFFER_SIZE) {
/* ought to be always enough. just fail for simplicity */
str = "SERVER_ERROR output line too long";
len = strlen(str);
}
memcpy(resp->wbuf, str, len);
memcpy(resp->wbuf + len, "\r\n", 2);
resp_add_iov(resp, resp->wbuf, len + 2);
conn_set_state(c, conn_new_cmd);
return;
}
// For metaget-style ASCII commands. Ignores noreply, ensuring clients see
// protocol level errors.
static void out_errstring(conn *c, const char *str) {
c->noreply = false;
out_string(c, str);
}
/*
* Outputs a protocol-specific "out of memory" error. For ASCII clients,
* this is equivalent to out_string().
*/
static void out_of_memory(conn *c, char *ascii_error) {
const static char error_prefix[] = "SERVER_ERROR ";
const static int error_prefix_len = sizeof(error_prefix) - 1;
if (c->protocol == binary_prot) {
/* Strip off the generic error prefix; it's irrelevant in binary */
if (!strncmp(ascii_error, error_prefix, error_prefix_len)) {
ascii_error += error_prefix_len;
}
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, ascii_error, 0);
} else {
out_string(c, ascii_error);
}
}
/*
* we get here after reading the value in set/add/replace commands. The command
* has been stored in c->cmd, and the item is ready in c->item.
*/
static void complete_nread_ascii(conn *c) {
assert(c != NULL);
item *it = c->item;
int comm = c->cmd;
enum store_item_type ret;
bool is_valid = false;
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(it)].set_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
if ((it->it_flags & ITEM_CHUNKED) == 0) {
if (strncmp(ITEM_data(it) + it->nbytes - 2, "\r\n", 2) == 0) {
is_valid = true;
}
} else {
char buf[2];
/* should point to the final item chunk */
item_chunk *ch = (item_chunk *) c->ritem;
assert(ch->used != 0);
/* :( We need to look at the last two bytes. This could span two
* chunks.
*/
if (ch->used > 1) {
buf[0] = ch->data[ch->used - 2];
buf[1] = ch->data[ch->used - 1];
} else {
assert(ch->prev);
assert(ch->used == 1);
buf[0] = ch->prev->data[ch->prev->used - 1];
buf[1] = ch->data[ch->used - 1];
}
if (strncmp(buf, "\r\n", 2) == 0) {
is_valid = true;
} else {
assert(1 == 0);
}
}
if (!is_valid) {
// metaset mode always returns errors.
if (c->mset_res) {
c->noreply = false;
}
out_string(c, "CLIENT_ERROR bad data chunk");
} else {
ret = store_item(it, comm, c);
#ifdef ENABLE_DTRACE
uint64_t cas = ITEM_get_cas(it);
switch (c->cmd) {
case NREAD_ADD:
MEMCACHED_COMMAND_ADD(c->sfd, ITEM_key(it), it->nkey,
(ret == 1) ? it->nbytes : -1, cas);
break;
case NREAD_REPLACE:
MEMCACHED_COMMAND_REPLACE(c->sfd, ITEM_key(it), it->nkey,
(ret == 1) ? it->nbytes : -1, cas);
break;
case NREAD_APPEND:
MEMCACHED_COMMAND_APPEND(c->sfd, ITEM_key(it), it->nkey,
(ret == 1) ? it->nbytes : -1, cas);
break;
case NREAD_PREPEND:
MEMCACHED_COMMAND_PREPEND(c->sfd, ITEM_key(it), it->nkey,
(ret == 1) ? it->nbytes : -1, cas);
break;
case NREAD_SET:
MEMCACHED_COMMAND_SET(c->sfd, ITEM_key(it), it->nkey,
(ret == 1) ? it->nbytes : -1, cas);
break;
case NREAD_CAS:
MEMCACHED_COMMAND_CAS(c->sfd, ITEM_key(it), it->nkey, it->nbytes,
cas);
break;
}
#endif
if (c->mset_res) {
// Replace the status code in the response.
// Rest was prepared during mset parsing.
mc_resp *resp = c->resp;
conn_set_state(c, conn_new_cmd);
switch (ret) {
case STORED:
memcpy(resp->wbuf, "OK ", 3);
// Only place noreply is used for meta cmds is a nominal response.
if (c->noreply) {
resp->skip = true;
}
break;
case EXISTS:
memcpy(resp->wbuf, "EX ", 3);
break;
case NOT_FOUND:
memcpy(resp->wbuf, "NF ", 3);
break;
case NOT_STORED:
memcpy(resp->wbuf, "NS ", 3);
break;
default:
c->noreply = false;
out_string(c, "SERVER_ERROR Unhandled storage type.");
}
} else {
switch (ret) {
case STORED:
out_string(c, "STORED");
break;
case EXISTS:
out_string(c, "EXISTS");
break;
case NOT_FOUND:
out_string(c, "NOT_FOUND");
break;
case NOT_STORED:
out_string(c, "NOT_STORED");
break;
default:
out_string(c, "SERVER_ERROR Unhandled storage type.");
}
}
}
c->set_stale = false; /* force flag to be off just in case */
c->mset_res = false;
item_remove(c->item); /* release the c->item reference */
c->item = 0;
}
/**
* get a pointer to the key in this request
*/
static char* binary_get_key(conn *c) {
return c->rcurr - (c->binary_header.request.keylen);
}
static void add_bin_header(conn *c, uint16_t err, uint8_t hdr_len, uint16_t key_len, uint32_t body_len) {
protocol_binary_response_header* header;
mc_resp *resp = c->resp;
assert(c);
resp_reset(resp);
header = (protocol_binary_response_header *)resp->wbuf;
header->response.magic = (uint8_t)PROTOCOL_BINARY_RES;
header->response.opcode = c->binary_header.request.opcode;
header->response.keylen = (uint16_t)htons(key_len);
header->response.extlen = (uint8_t)hdr_len;
header->response.datatype = (uint8_t)PROTOCOL_BINARY_RAW_BYTES;
header->response.status = (uint16_t)htons(err);
header->response.bodylen = htonl(body_len);
header->response.opaque = c->opaque;
header->response.cas = htonll(c->cas);
if (settings.verbose > 1) {
int ii;
fprintf(stderr, ">%d Writing bin response:", c->sfd);
for (ii = 0; ii < sizeof(header->bytes); ++ii) {
if (ii % 4 == 0) {
fprintf(stderr, "\n>%d ", c->sfd);
}
fprintf(stderr, " 0x%02x", header->bytes[ii]);
}
fprintf(stderr, "\n");
}
resp->wbytes = sizeof(header->response);
resp_add_iov(resp, resp->wbuf, resp->wbytes);
}
/**
* Writes a binary error response. If errstr is supplied, it is used as the
* error text; otherwise a generic description of the error status code is
* included.
*/
static void write_bin_error(conn *c, protocol_binary_response_status err,
const char *errstr, int swallow) {
size_t len;
if (!errstr) {
switch (err) {
case PROTOCOL_BINARY_RESPONSE_ENOMEM:
errstr = "Out of memory";
break;
case PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND:
errstr = "Unknown command";
break;
case PROTOCOL_BINARY_RESPONSE_KEY_ENOENT:
errstr = "Not found";
break;
case PROTOCOL_BINARY_RESPONSE_EINVAL:
errstr = "Invalid arguments";
break;
case PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS:
errstr = "Data exists for key.";
break;
case PROTOCOL_BINARY_RESPONSE_E2BIG:
errstr = "Too large.";
break;
case PROTOCOL_BINARY_RESPONSE_DELTA_BADVAL:
errstr = "Non-numeric server-side value for incr or decr";
break;
case PROTOCOL_BINARY_RESPONSE_NOT_STORED:
errstr = "Not stored.";
break;
case PROTOCOL_BINARY_RESPONSE_AUTH_ERROR:
errstr = "Auth failure.";
break;
default:
assert(false);
errstr = "UNHANDLED ERROR";
fprintf(stderr, ">%d UNHANDLED ERROR: %d\n", c->sfd, err);
}
}
if (settings.verbose > 1) {
fprintf(stderr, ">%d Writing an error: %s\n", c->sfd, errstr);
}
len = strlen(errstr);
add_bin_header(c, err, 0, 0, len);
if (len > 0) {
resp_add_iov(c->resp, errstr, len);
}
if (swallow > 0) {
c->sbytes = swallow;
conn_set_state(c, conn_swallow);
} else {
conn_set_state(c, conn_mwrite);
}
}
/* Form and send a response to a command over the binary protocol */
static void write_bin_response(conn *c, void *d, int hlen, int keylen, int dlen) {
if (!c->noreply || c->cmd == PROTOCOL_BINARY_CMD_GET ||
c->cmd == PROTOCOL_BINARY_CMD_GETK) {
add_bin_header(c, 0, hlen, keylen, dlen);
mc_resp *resp = c->resp;
if (dlen > 0) {
resp_add_iov(resp, d, dlen);
}
}
conn_set_state(c, conn_new_cmd);
}
static void complete_incr_bin(conn *c, char *extbuf) {
item *it;
char *key;
size_t nkey;
/* Weird magic in add_delta forces me to pad here */
char tmpbuf[INCR_MAX_STORAGE_LEN];
uint64_t cas = 0;
protocol_binary_response_incr* rsp = (protocol_binary_response_incr*)c->resp->wbuf;
protocol_binary_request_incr* req = (void *)extbuf;
assert(c != NULL);
//assert(c->wsize >= sizeof(*rsp));
/* fix byteorder in the request */
req->message.body.delta = ntohll(req->message.body.delta);
req->message.body.initial = ntohll(req->message.body.initial);
req->message.body.expiration = ntohl(req->message.body.expiration);
key = binary_get_key(c);
nkey = c->binary_header.request.keylen;
if (settings.verbose > 1) {
int i;
fprintf(stderr, "incr ");
for (i = 0; i < nkey; i++) {
fprintf(stderr, "%c", key[i]);
}
fprintf(stderr, " %lld, %llu, %d\n",
(long long)req->message.body.delta,
(long long)req->message.body.initial,
req->message.body.expiration);
}
if (c->binary_header.request.cas != 0) {
cas = c->binary_header.request.cas;
}
switch(add_delta(c, key, nkey, c->cmd == PROTOCOL_BINARY_CMD_INCREMENT,
req->message.body.delta, tmpbuf,
&cas)) {
case OK:
rsp->message.body.value = htonll(strtoull(tmpbuf, NULL, 10));
if (cas) {
c->cas = cas;
}
write_bin_response(c, &rsp->message.body, 0, 0,
sizeof(rsp->message.body.value));
break;
case NON_NUMERIC:
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_DELTA_BADVAL, NULL, 0);
break;
case EOM:
out_of_memory(c, "SERVER_ERROR Out of memory incrementing value");
break;
case DELTA_ITEM_NOT_FOUND:
if (req->message.body.expiration != 0xffffffff) {
/* Save some room for the response */
rsp->message.body.value = htonll(req->message.body.initial);
snprintf(tmpbuf, INCR_MAX_STORAGE_LEN, "%llu",
(unsigned long long)req->message.body.initial);
int res = strlen(tmpbuf);
it = item_alloc(key, nkey, 0, realtime(req->message.body.expiration),
res + 2);
if (it != NULL) {
memcpy(ITEM_data(it), tmpbuf, res);
memcpy(ITEM_data(it) + res, "\r\n", 2);
if (store_item(it, NREAD_ADD, c)) {
c->cas = ITEM_get_cas(it);
write_bin_response(c, &rsp->message.body, 0, 0, sizeof(rsp->message.body.value));
} else {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_NOT_STORED,
NULL, 0);
}
item_remove(it); /* release our reference */
} else {
out_of_memory(c,
"SERVER_ERROR Out of memory allocating new item");
}
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
if (c->cmd == PROTOCOL_BINARY_CMD_INCREMENT) {
c->thread->stats.incr_misses++;
} else {
c->thread->stats.decr_misses++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, NULL, 0);
}
break;
case DELTA_ITEM_CAS_MISMATCH:
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, NULL, 0);
break;
}
}
static void complete_update_bin(conn *c) {
protocol_binary_response_status eno = PROTOCOL_BINARY_RESPONSE_EINVAL;
enum store_item_type ret = NOT_STORED;
assert(c != NULL);
item *it = c->item;
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(it)].set_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
/* We don't actually receive the trailing two characters in the bin
* protocol, so we're going to just set them here */
if ((it->it_flags & ITEM_CHUNKED) == 0) {
*(ITEM_data(it) + it->nbytes - 2) = '\r';
*(ITEM_data(it) + it->nbytes - 1) = '\n';
} else {
assert(c->ritem);
item_chunk *ch = (item_chunk *) c->ritem;
if (ch->size == ch->used)
ch = ch->next;
assert(ch->size - ch->used >= 2);
ch->data[ch->used] = '\r';
ch->data[ch->used + 1] = '\n';
ch->used += 2;
}
ret = store_item(it, c->cmd, c);
#ifdef ENABLE_DTRACE
uint64_t cas = ITEM_get_cas(it);
switch (c->cmd) {
case NREAD_ADD:
MEMCACHED_COMMAND_ADD(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
case NREAD_REPLACE:
MEMCACHED_COMMAND_REPLACE(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
case NREAD_APPEND:
MEMCACHED_COMMAND_APPEND(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
case NREAD_PREPEND:
MEMCACHED_COMMAND_PREPEND(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
case NREAD_SET:
MEMCACHED_COMMAND_SET(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
}
#endif
switch (ret) {
case STORED:
/* Stored */
write_bin_response(c, NULL, 0, 0, 0);
break;
case EXISTS:
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, NULL, 0);
break;
case NOT_FOUND:
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, NULL, 0);
break;
case NOT_STORED:
case TOO_LARGE:
case NO_MEMORY:
if (c->cmd == NREAD_ADD) {
eno = PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS;
} else if(c->cmd == NREAD_REPLACE) {
eno = PROTOCOL_BINARY_RESPONSE_KEY_ENOENT;
} else {
eno = PROTOCOL_BINARY_RESPONSE_NOT_STORED;
}
write_bin_error(c, eno, NULL, 0);
}
item_remove(c->item); /* release the c->item reference */
c->item = 0;
}
static void write_bin_miss_response(conn *c, char *key, size_t nkey) {
if (nkey) {
add_bin_header(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT,
0, nkey, nkey);
char *ofs = c->resp->wbuf + sizeof(protocol_binary_response_header);
memcpy(ofs, key, nkey);
resp_add_iov(c->resp, ofs, nkey);
conn_set_state(c, conn_new_cmd);
} else {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT,
NULL, 0);
}
}
static void process_bin_get_or_touch(conn *c, char *extbuf) {
item *it;
protocol_binary_response_get* rsp = (protocol_binary_response_get*)c->resp->wbuf;
char* key = binary_get_key(c);
size_t nkey = c->binary_header.request.keylen;
int should_touch = (c->cmd == PROTOCOL_BINARY_CMD_TOUCH ||
c->cmd == PROTOCOL_BINARY_CMD_GAT ||
c->cmd == PROTOCOL_BINARY_CMD_GATK);
int should_return_key = (c->cmd == PROTOCOL_BINARY_CMD_GETK ||
c->cmd == PROTOCOL_BINARY_CMD_GATK);
int should_return_value = (c->cmd != PROTOCOL_BINARY_CMD_TOUCH);
bool failed = false;
if (settings.verbose > 1) {
fprintf(stderr, "<%d %s ", c->sfd, should_touch ? "TOUCH" : "GET");
if (fwrite(key, 1, nkey, stderr)) {}
fputc('\n', stderr);
}
if (should_touch) {
protocol_binary_request_touch *t = (void *)extbuf;
time_t exptime = ntohl(t->message.body.expiration);
it = item_touch(key, nkey, realtime(exptime), c);
} else {
it = item_get(key, nkey, c, DO_UPDATE);
}
if (it) {
/* the length has two unnecessary bytes ("\r\n") */
uint16_t keylen = 0;
uint32_t bodylen = sizeof(rsp->message.body) + (it->nbytes - 2);
pthread_mutex_lock(&c->thread->stats.mutex);
if (should_touch) {
c->thread->stats.touch_cmds++;
c->thread->stats.slab_stats[ITEM_clsid(it)].touch_hits++;
} else {
c->thread->stats.get_cmds++;
c->thread->stats.lru_hits[it->slabs_clsid]++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
if (should_touch) {
MEMCACHED_COMMAND_TOUCH(c->sfd, ITEM_key(it), it->nkey,
it->nbytes, ITEM_get_cas(it));
} else {
MEMCACHED_COMMAND_GET(c->sfd, ITEM_key(it), it->nkey,
it->nbytes, ITEM_get_cas(it));
}
if (c->cmd == PROTOCOL_BINARY_CMD_TOUCH) {
bodylen -= it->nbytes - 2;
} else if (should_return_key) {
bodylen += nkey;
keylen = nkey;
}
add_bin_header(c, 0, sizeof(rsp->message.body), keylen, bodylen);
rsp->message.header.response.cas = htonll(ITEM_get_cas(it));
// add the flags
FLAGS_CONV(it, rsp->message.body.flags);
rsp->message.body.flags = htonl(rsp->message.body.flags);
resp_add_iov(c->resp, &rsp->message.body, sizeof(rsp->message.body));
if (should_return_key) {
resp_add_iov(c->resp, ITEM_key(it), nkey);
}
if (should_return_value) {
/* Add the data minus the CRLF */
#ifdef EXTSTORE
if (it->it_flags & ITEM_HDR) {
if (_get_extstore(c, it, c->resp) != 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.get_oom_extstore++;
pthread_mutex_unlock(&c->thread->stats.mutex);
failed = true;
}
} else if ((it->it_flags & ITEM_CHUNKED) == 0) {
resp_add_iov(c->resp, ITEM_data(it), it->nbytes - 2);
} else {
// Allow transmit handler to find the item and expand iov's
resp_add_chunked_iov(c->resp, it, it->nbytes - 2);
}
#else
if ((it->it_flags & ITEM_CHUNKED) == 0) {
resp_add_iov(c->resp, ITEM_data(it), it->nbytes - 2);
} else {
resp_add_chunked_iov(c->resp, it, it->nbytes - 2);
}
#endif
}
if (!failed) {
conn_set_state(c, conn_new_cmd);
/* Remember this command so we can garbage collect it later */
#ifdef EXTSTORE
if ((it->it_flags & ITEM_HDR) != 0 && should_return_value) {
// Only have extstore clean if header and returning value.
c->resp->item = NULL;
} else {
c->resp->item = it;
}
#else
c->resp->item = it;
#endif
} else {
item_remove(it);
}
} else {
failed = true;
}
if (failed) {
pthread_mutex_lock(&c->thread->stats.mutex);
if (should_touch) {
c->thread->stats.touch_cmds++;
c->thread->stats.touch_misses++;
} else {
c->thread->stats.get_cmds++;
c->thread->stats.get_misses++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
if (should_touch) {
MEMCACHED_COMMAND_TOUCH(c->sfd, key, nkey, -1, 0);
} else {
MEMCACHED_COMMAND_GET(c->sfd, key, nkey, -1, 0);
}
if (c->noreply) {
conn_set_state(c, conn_new_cmd);
} else {
if (should_return_key) {
write_bin_miss_response(c, key, nkey);
} else {
write_bin_miss_response(c, NULL, 0);
}
}
}
if (settings.detail_enabled) {
stats_prefix_record_get(key, nkey, NULL != it);
}
}
static void append_bin_stats(const char *key, const uint16_t klen,
const char *val, const uint32_t vlen,
conn *c) {
char *buf = c->stats.buffer + c->stats.offset;
uint32_t bodylen = klen + vlen;
protocol_binary_response_header header = {
.response.magic = (uint8_t)PROTOCOL_BINARY_RES,
.response.opcode = PROTOCOL_BINARY_CMD_STAT,
.response.keylen = (uint16_t)htons(klen),
.response.datatype = (uint8_t)PROTOCOL_BINARY_RAW_BYTES,
.response.bodylen = htonl(bodylen),
.response.opaque = c->opaque
};
memcpy(buf, header.bytes, sizeof(header.response));
buf += sizeof(header.response);
if (klen > 0) {
memcpy(buf, key, klen);
buf += klen;
if (vlen > 0) {
memcpy(buf, val, vlen);
}
}
c->stats.offset += sizeof(header.response) + bodylen;
}
static void append_ascii_stats(const char *key, const uint16_t klen,
const char *val, const uint32_t vlen,
conn *c) {
char *pos = c->stats.buffer + c->stats.offset;
uint32_t nbytes = 0;
int remaining = c->stats.size - c->stats.offset;
int room = remaining - 1;
if (klen == 0 && vlen == 0) {
nbytes = snprintf(pos, room, "END\r\n");
} else if (vlen == 0) {
nbytes = snprintf(pos, room, "STAT %s\r\n", key);
} else {
nbytes = snprintf(pos, room, "STAT %s %s\r\n", key, val);
}
c->stats.offset += nbytes;
}
static bool grow_stats_buf(conn *c, size_t needed) {
size_t nsize = c->stats.size;
size_t available = nsize - c->stats.offset;
bool rv = true;
/* Special case: No buffer -- need to allocate fresh */
if (c->stats.buffer == NULL) {
nsize = 1024;
available = c->stats.size = c->stats.offset = 0;
}
while (needed > available) {
assert(nsize > 0);
nsize = nsize << 1;
available = nsize - c->stats.offset;
}
if (nsize != c->stats.size) {
char *ptr = realloc(c->stats.buffer, nsize);
if (ptr) {
c->stats.buffer = ptr;
c->stats.size = nsize;
} else {
STATS_LOCK();
stats.malloc_fails++;
STATS_UNLOCK();
rv = false;
}
}
return rv;
}
static void append_stats(const char *key, const uint16_t klen,
const char *val, const uint32_t vlen,
const void *cookie)
{
/* value without a key is invalid */
if (klen == 0 && vlen > 0) {
return;
}
conn *c = (conn*)cookie;
if (c->protocol == binary_prot) {
size_t needed = vlen + klen + sizeof(protocol_binary_response_header);
if (!grow_stats_buf(c, needed)) {
return;
}
append_bin_stats(key, klen, val, vlen, c);
} else {
size_t needed = vlen + klen + 10; // 10 == "STAT = \r\n"
if (!grow_stats_buf(c, needed)) {
return;
}
append_ascii_stats(key, klen, val, vlen, c);
}
assert(c->stats.offset <= c->stats.size);
}
static void process_bin_stat(conn *c) {
char *subcommand = binary_get_key(c);
size_t nkey = c->binary_header.request.keylen;
if (settings.verbose > 1) {
int ii;
fprintf(stderr, "<%d STATS ", c->sfd);
for (ii = 0; ii < nkey; ++ii) {
fprintf(stderr, "%c", subcommand[ii]);
}
fprintf(stderr, "\n");
}
if (nkey == 0) {
/* request all statistics */
server_stats(&append_stats, c);
(void)get_stats(NULL, 0, &append_stats, c);
} else if (strncmp(subcommand, "reset", 5) == 0) {
stats_reset();
} else if (strncmp(subcommand, "settings", 8) == 0) {
process_stat_settings(&append_stats, c);
} else if (strncmp(subcommand, "detail", 6) == 0) {
char *subcmd_pos = subcommand + 6;
if (strncmp(subcmd_pos, " dump", 5) == 0) {
int len;
char *dump_buf = stats_prefix_dump(&len);
if (dump_buf == NULL || len <= 0) {
out_of_memory(c, "SERVER_ERROR Out of memory generating stats");
if (dump_buf != NULL)
free(dump_buf);
return;
} else {
append_stats("detailed", strlen("detailed"), dump_buf, len, c);
free(dump_buf);
}
} else if (strncmp(subcmd_pos, " on", 3) == 0) {
settings.detail_enabled = 1;
} else if (strncmp(subcmd_pos, " off", 4) == 0) {
settings.detail_enabled = 0;
} else {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, NULL, 0);
return;
}
} else {
if (get_stats(subcommand, nkey, &append_stats, c)) {
if (c->stats.buffer == NULL) {
out_of_memory(c, "SERVER_ERROR Out of memory generating stats");
} else {
write_and_free(c, c->stats.buffer, c->stats.offset);
c->stats.buffer = NULL;
}
} else {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, NULL, 0);
}
return;
}
/* Append termination package and start the transfer */
append_stats(NULL, 0, NULL, 0, c);
if (c->stats.buffer == NULL) {
out_of_memory(c, "SERVER_ERROR Out of memory preparing to send stats");
} else {
write_and_free(c, c->stats.buffer, c->stats.offset);
c->stats.buffer = NULL;
}
}
/* Just write an error message and disconnect the client */
static void handle_binary_protocol_error(conn *c) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_EINVAL, NULL, 0);
if (settings.verbose) {
fprintf(stderr, "Protocol error (opcode %02x), close connection %d\n",
c->binary_header.request.opcode, c->sfd);
}
c->close_after_write = true;
}
static void init_sasl_conn(conn *c) {
assert(c);
/* should something else be returned? */
if (!settings.sasl)
return;
c->authenticated = false;
if (!c->sasl_conn) {
int result=sasl_server_new("memcached",
NULL,
my_sasl_hostname[0] ? my_sasl_hostname : NULL,
NULL, NULL,
NULL, 0, &c->sasl_conn);
if (result != SASL_OK) {
if (settings.verbose) {
fprintf(stderr, "Failed to initialize SASL conn.\n");
}
c->sasl_conn = NULL;
}
}
}
static void bin_list_sasl_mechs(conn *c) {
// Guard against a disabled SASL.
if (!settings.sasl) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND, NULL,
c->binary_header.request.bodylen
- c->binary_header.request.keylen);
return;
}
init_sasl_conn(c);
const char *result_string = NULL;
unsigned int string_length = 0;
int result=sasl_listmech(c->sasl_conn, NULL,
"", /* What to prepend the string with */
" ", /* What to separate mechanisms with */
"", /* What to append to the string */
&result_string, &string_length,
NULL);
if (result != SASL_OK) {
/* Perhaps there's a better error for this... */
if (settings.verbose) {
fprintf(stderr, "Failed to list SASL mechanisms.\n");
}
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, NULL, 0);
return;
}
write_bin_response(c, (char*)result_string, 0, 0, string_length);
}
static void process_bin_sasl_auth(conn *c) {
// Guard for handling disabled SASL on the server.
if (!settings.sasl) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND, NULL,
c->binary_header.request.bodylen
- c->binary_header.request.keylen);
return;
}
assert(c->binary_header.request.extlen == 0);
int nkey = c->binary_header.request.keylen;
int vlen = c->binary_header.request.bodylen - nkey;
if (nkey > MAX_SASL_MECH_LEN) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_EINVAL, NULL, vlen);
conn_set_state(c, conn_swallow);
return;
}
char *key = binary_get_key(c);
assert(key);
item *it = item_alloc(key, nkey, 0, 0, vlen+2);
/* Can't use a chunked item for SASL authentication. */
if (it == 0 || (it->it_flags & ITEM_CHUNKED)) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, NULL, vlen);
conn_set_state(c, conn_swallow);
if (it) {
do_item_remove(it);
}
return;
}
c->item = it;
c->ritem = ITEM_data(it);
c->rlbytes = vlen;
conn_set_state(c, conn_nread);
c->substate = bin_reading_sasl_auth_data;
}
static void process_bin_complete_sasl_auth(conn *c) {
assert(settings.sasl);
const char *out = NULL;
unsigned int outlen = 0;
assert(c->item);
init_sasl_conn(c);
int nkey = c->binary_header.request.keylen;
int vlen = c->binary_header.request.bodylen - nkey;
if (nkey > ((item*) c->item)->nkey) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_EINVAL, NULL, vlen);
conn_set_state(c, conn_swallow);
return;
}
char mech[nkey+1];
memcpy(mech, ITEM_key((item*)c->item), nkey);
mech[nkey] = 0x00;
if (settings.verbose)
fprintf(stderr, "mech: ``%s'' with %d bytes of data\n", mech, vlen);
const char *challenge = vlen == 0 ? NULL : ITEM_data((item*) c->item);
if (vlen > ((item*) c->item)->nbytes) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_EINVAL, NULL, vlen);
conn_set_state(c, conn_swallow);
return;
}
int result=-1;
switch (c->cmd) {
case PROTOCOL_BINARY_CMD_SASL_AUTH:
result = sasl_server_start(c->sasl_conn, mech,
challenge, vlen,
&out, &outlen);
c->sasl_started = (result == SASL_OK || result == SASL_CONTINUE);
break;
case PROTOCOL_BINARY_CMD_SASL_STEP:
if (!c->sasl_started) {
if (settings.verbose) {
fprintf(stderr, "%d: SASL_STEP called but sasl_server_start "
"not called for this connection!\n", c->sfd);
}
break;
}
result = sasl_server_step(c->sasl_conn,
challenge, vlen,
&out, &outlen);
break;
default:
assert(false); /* CMD should be one of the above */
/* This code is pretty much impossible, but makes the compiler
happier */
if (settings.verbose) {
fprintf(stderr, "Unhandled command %d with challenge %s\n",
c->cmd, challenge);
}
break;
}
if (settings.verbose) {
fprintf(stderr, "sasl result code: %d\n", result);
}
switch(result) {
case SASL_OK:
c->authenticated = true;
write_bin_response(c, "Authenticated", 0, 0, strlen("Authenticated"));
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.auth_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
break;
case SASL_CONTINUE:
add_bin_header(c, PROTOCOL_BINARY_RESPONSE_AUTH_CONTINUE, 0, 0, outlen);
if (outlen > 0) {
resp_add_iov(c->resp, out, outlen);
}
// Immediately flush our write.
conn_set_state(c, conn_mwrite);
break;
default:
if (settings.verbose)
fprintf(stderr, "Unknown sasl response: %d\n", result);
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, NULL, 0);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.auth_cmds++;
c->thread->stats.auth_errors++;
pthread_mutex_unlock(&c->thread->stats.mutex);
}
}
static bool authenticated(conn *c) {
assert(settings.sasl);
bool rv = false;
switch (c->cmd) {
case PROTOCOL_BINARY_CMD_SASL_LIST_MECHS: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_SASL_AUTH: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_SASL_STEP: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_VERSION: /* FALLTHROUGH */
rv = true;
break;
default:
rv = c->authenticated;
}
if (settings.verbose > 1) {
fprintf(stderr, "authenticated() in cmd 0x%02x is %s\n",
c->cmd, rv ? "true" : "false");
}
return rv;
}
static void dispatch_bin_command(conn *c, char *extbuf) {
int protocol_error = 0;
uint8_t extlen = c->binary_header.request.extlen;
uint16_t keylen = c->binary_header.request.keylen;
uint32_t bodylen = c->binary_header.request.bodylen;
if (keylen > bodylen || keylen + extlen > bodylen) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND, NULL, 0);
c->close_after_write = true;
return;
}
if (settings.sasl && !authenticated(c)) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, NULL, 0);
c->close_after_write = true;
return;
}
MEMCACHED_PROCESS_COMMAND_START(c->sfd, c->rcurr, c->rbytes);
c->noreply = true;
/* binprot supports 16bit keys, but internals are still 8bit */
if (keylen > KEY_MAX_LENGTH) {
handle_binary_protocol_error(c);
return;
}
switch (c->cmd) {
case PROTOCOL_BINARY_CMD_SETQ:
c->cmd = PROTOCOL_BINARY_CMD_SET;
break;
case PROTOCOL_BINARY_CMD_ADDQ:
c->cmd = PROTOCOL_BINARY_CMD_ADD;
break;
case PROTOCOL_BINARY_CMD_REPLACEQ:
c->cmd = PROTOCOL_BINARY_CMD_REPLACE;
break;
case PROTOCOL_BINARY_CMD_DELETEQ:
c->cmd = PROTOCOL_BINARY_CMD_DELETE;
break;
case PROTOCOL_BINARY_CMD_INCREMENTQ:
c->cmd = PROTOCOL_BINARY_CMD_INCREMENT;
break;
case PROTOCOL_BINARY_CMD_DECREMENTQ:
c->cmd = PROTOCOL_BINARY_CMD_DECREMENT;
break;
case PROTOCOL_BINARY_CMD_QUITQ:
c->cmd = PROTOCOL_BINARY_CMD_QUIT;
break;
case PROTOCOL_BINARY_CMD_FLUSHQ:
c->cmd = PROTOCOL_BINARY_CMD_FLUSH;
break;
case PROTOCOL_BINARY_CMD_APPENDQ:
c->cmd = PROTOCOL_BINARY_CMD_APPEND;
break;
case PROTOCOL_BINARY_CMD_PREPENDQ:
c->cmd = PROTOCOL_BINARY_CMD_PREPEND;
break;
case PROTOCOL_BINARY_CMD_GETQ:
c->cmd = PROTOCOL_BINARY_CMD_GET;
break;
case PROTOCOL_BINARY_CMD_GETKQ:
c->cmd = PROTOCOL_BINARY_CMD_GETK;
break;
case PROTOCOL_BINARY_CMD_GATQ:
c->cmd = PROTOCOL_BINARY_CMD_GAT;
break;
case PROTOCOL_BINARY_CMD_GATKQ:
c->cmd = PROTOCOL_BINARY_CMD_GATK;
break;
default:
c->noreply = false;
}
switch (c->cmd) {
case PROTOCOL_BINARY_CMD_VERSION:
if (extlen == 0 && keylen == 0 && bodylen == 0) {
write_bin_response(c, VERSION, 0, 0, strlen(VERSION));
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_FLUSH:
if (keylen == 0 && bodylen == extlen && (extlen == 0 || extlen == 4)) {
process_bin_flush(c, extbuf);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_NOOP:
if (extlen == 0 && keylen == 0 && bodylen == 0) {
write_bin_response(c, NULL, 0, 0, 0);
// NOOP forces pipeline flush.
conn_set_state(c, conn_mwrite);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_SET: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_ADD: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_REPLACE:
if (extlen == 8 && keylen != 0 && bodylen >= (keylen + 8)) {
process_bin_update(c, extbuf);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_GETQ: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_GET: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_GETKQ: /* FALLTHROUGH */
case PROTOCOL_BINARY_CMD_GETK:
if (extlen == 0 && bodylen == keylen && keylen > 0) {
process_bin_get_or_touch(c, extbuf);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_DELETE:
if (keylen > 0 && extlen == 0 && bodylen == keylen) {
process_bin_delete(c);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_INCREMENT:
case PROTOCOL_BINARY_CMD_DECREMENT:
if (keylen > 0 && extlen == 20 && bodylen == (keylen + extlen)) {
complete_incr_bin(c, extbuf);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_APPEND:
case PROTOCOL_BINARY_CMD_PREPEND:
if (keylen > 0 && extlen == 0) {
process_bin_append_prepend(c);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_STAT:
if (extlen == 0) {
process_bin_stat(c);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_QUIT:
if (keylen == 0 && extlen == 0 && bodylen == 0) {
write_bin_response(c, NULL, 0, 0, 0);
conn_set_state(c, conn_mwrite);
c->close_after_write = true;
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_SASL_LIST_MECHS:
if (extlen == 0 && keylen == 0 && bodylen == 0) {
bin_list_sasl_mechs(c);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_SASL_AUTH:
case PROTOCOL_BINARY_CMD_SASL_STEP:
if (extlen == 0 && keylen != 0) {
process_bin_sasl_auth(c);
} else {
protocol_error = 1;
}
break;
case PROTOCOL_BINARY_CMD_TOUCH:
case PROTOCOL_BINARY_CMD_GAT:
case PROTOCOL_BINARY_CMD_GATQ:
case PROTOCOL_BINARY_CMD_GATK:
case PROTOCOL_BINARY_CMD_GATKQ:
if (extlen == 4 && keylen != 0) {
process_bin_get_or_touch(c, extbuf);
} else {
protocol_error = 1;
}
break;
default:
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND, NULL,
bodylen);
}
if (protocol_error)
handle_binary_protocol_error(c);
}
static void process_bin_update(conn *c, char *extbuf) {
char *key;
int nkey;
int vlen;
item *it;
protocol_binary_request_set* req = (void *)extbuf;
assert(c != NULL);
key = binary_get_key(c);
nkey = c->binary_header.request.keylen;
/* fix byteorder in the request */
req->message.body.flags = ntohl(req->message.body.flags);
req->message.body.expiration = ntohl(req->message.body.expiration);
vlen = c->binary_header.request.bodylen - (nkey + c->binary_header.request.extlen);
if (settings.verbose > 1) {
int ii;
if (c->cmd == PROTOCOL_BINARY_CMD_ADD) {
fprintf(stderr, "<%d ADD ", c->sfd);
} else if (c->cmd == PROTOCOL_BINARY_CMD_SET) {
fprintf(stderr, "<%d SET ", c->sfd);
} else {
fprintf(stderr, "<%d REPLACE ", c->sfd);
}
for (ii = 0; ii < nkey; ++ii) {
fprintf(stderr, "%c", key[ii]);
}
fprintf(stderr, " Value len is %d", vlen);
fprintf(stderr, "\n");
}
if (settings.detail_enabled) {
stats_prefix_record_set(key, nkey);
}
it = item_alloc(key, nkey, req->message.body.flags,
realtime(req->message.body.expiration), vlen+2);
if (it == 0) {
enum store_item_type status;
if (! item_size_ok(nkey, req->message.body.flags, vlen + 2)) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_E2BIG, NULL, vlen);
status = TOO_LARGE;
} else {
out_of_memory(c, "SERVER_ERROR Out of memory allocating item");
/* This error generating method eats the swallow value. Add here. */
c->sbytes = vlen;
status = NO_MEMORY;
}
/* FIXME: losing c->cmd since it's translated below. refactor? */
LOGGER_LOG(c->thread->l, LOG_MUTATIONS, LOGGER_ITEM_STORE,
NULL, status, 0, key, nkey, req->message.body.expiration,
ITEM_clsid(it), c->sfd);
/* Avoid stale data persisting in cache because we failed alloc.
* Unacceptable for SET. Anywhere else too? */
if (c->cmd == PROTOCOL_BINARY_CMD_SET) {
it = item_get(key, nkey, c, DONT_UPDATE);
if (it) {
item_unlink(it);
STORAGE_delete(c->thread->storage, it);
item_remove(it);
}
}
/* swallow the data line */
conn_set_state(c, conn_swallow);
return;
}
ITEM_set_cas(it, c->binary_header.request.cas);
switch (c->cmd) {
case PROTOCOL_BINARY_CMD_ADD:
c->cmd = NREAD_ADD;
break;
case PROTOCOL_BINARY_CMD_SET:
c->cmd = NREAD_SET;
break;
case PROTOCOL_BINARY_CMD_REPLACE:
c->cmd = NREAD_REPLACE;
break;
default:
assert(0);
}
if (ITEM_get_cas(it) != 0) {
c->cmd = NREAD_CAS;
}
c->item = it;
#ifdef NEED_ALIGN
if (it->it_flags & ITEM_CHUNKED) {
c->ritem = ITEM_schunk(it);
} else {
c->ritem = ITEM_data(it);
}
#else
c->ritem = ITEM_data(it);
#endif
c->rlbytes = vlen;
conn_set_state(c, conn_nread);
c->substate = bin_read_set_value;
}
static void process_bin_append_prepend(conn *c) {
char *key;
int nkey;
int vlen;
item *it;
assert(c != NULL);
key = binary_get_key(c);
nkey = c->binary_header.request.keylen;
vlen = c->binary_header.request.bodylen - nkey;
if (settings.verbose > 1) {
fprintf(stderr, "Value len is %d\n", vlen);
}
if (settings.detail_enabled) {
stats_prefix_record_set(key, nkey);
}
it = item_alloc(key, nkey, 0, 0, vlen+2);
if (it == 0) {
if (! item_size_ok(nkey, 0, vlen + 2)) {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_E2BIG, NULL, vlen);
} else {
out_of_memory(c, "SERVER_ERROR Out of memory allocating item");
/* OOM calls eat the swallow value. Add here. */
c->sbytes = vlen;
}
/* swallow the data line */
conn_set_state(c, conn_swallow);
return;
}
ITEM_set_cas(it, c->binary_header.request.cas);
switch (c->cmd) {
case PROTOCOL_BINARY_CMD_APPEND:
c->cmd = NREAD_APPEND;
break;
case PROTOCOL_BINARY_CMD_PREPEND:
c->cmd = NREAD_PREPEND;
break;
default:
assert(0);
}
c->item = it;
#ifdef NEED_ALIGN
if (it->it_flags & ITEM_CHUNKED) {
c->ritem = ITEM_schunk(it);
} else {
c->ritem = ITEM_data(it);
}
#else
c->ritem = ITEM_data(it);
#endif
c->rlbytes = vlen;
conn_set_state(c, conn_nread);
c->substate = bin_read_set_value;
}
static void process_bin_flush(conn *c, char *extbuf) {
time_t exptime = 0;
protocol_binary_request_flush* req = (void *)extbuf;
rel_time_t new_oldest = 0;
if (!settings.flush_enabled) {
// flush_all is not allowed but we log it on stats
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, NULL, 0);
return;
}
if (c->binary_header.request.extlen == sizeof(req->message.body)) {
exptime = ntohl(req->message.body.expiration);
}
if (exptime > 0) {
new_oldest = realtime(exptime);
} else {
new_oldest = current_time;
}
if (settings.use_cas) {
settings.oldest_live = new_oldest - 1;
if (settings.oldest_live <= current_time)
settings.oldest_cas = get_cas_id();
} else {
settings.oldest_live = new_oldest;
}
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.flush_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
write_bin_response(c, NULL, 0, 0, 0);
}
static void process_bin_delete(conn *c) {
item *it;
uint32_t hv;
char* key = binary_get_key(c);
size_t nkey = c->binary_header.request.keylen;
assert(c != NULL);
if (settings.verbose > 1) {
int ii;
fprintf(stderr, "Deleting ");
for (ii = 0; ii < nkey; ++ii) {
fprintf(stderr, "%c", key[ii]);
}
fprintf(stderr, "\n");
}
if (settings.detail_enabled) {
stats_prefix_record_delete(key, nkey);
}
it = item_get_locked(key, nkey, c, DONT_UPDATE, &hv);
if (it) {
uint64_t cas = c->binary_header.request.cas;
if (cas == 0 || cas == ITEM_get_cas(it)) {
MEMCACHED_COMMAND_DELETE(c->sfd, ITEM_key(it), it->nkey);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(it)].delete_hits++;
pthread_mutex_unlock(&c->thread->stats.mutex);
do_item_unlink(it, hv);
STORAGE_delete(c->thread->storage, it);
write_bin_response(c, NULL, 0, 0, 0);
} else {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, NULL, 0);
}
do_item_remove(it); /* release our reference */
} else {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, NULL, 0);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.delete_misses++;
pthread_mutex_unlock(&c->thread->stats.mutex);
}
item_unlock(hv);
}
static void complete_nread_binary(conn *c) {
assert(c != NULL);
assert(c->cmd >= 0);
switch(c->substate) {
case bin_read_set_value:
complete_update_bin(c);
break;
case bin_reading_sasl_auth_data:
process_bin_complete_sasl_auth(c);
if (c->item) {
do_item_remove(c->item);
c->item = NULL;
}
break;
default:
fprintf(stderr, "Not handling substate %d\n", c->substate);
assert(0);
}
}
static void reset_cmd_handler(conn *c) {
c->cmd = -1;
c->substate = bin_no_state;
if (c->item != NULL) {
// TODO: Any other way to get here?
// SASL auth was mistakenly using it. Nothing else should?
item_remove(c->item);
c->item = NULL;
}
if (c->rbytes > 0) {
conn_set_state(c, conn_parse_cmd);
} else if (c->resp_head) {
conn_set_state(c, conn_mwrite);
} else {
conn_set_state(c, conn_waiting);
}
}
static void complete_nread(conn *c) {
assert(c != NULL);
assert(c->protocol == ascii_prot
|| c->protocol == binary_prot);
if (c->protocol == ascii_prot) {
complete_nread_ascii(c);
} else if (c->protocol == binary_prot) {
complete_nread_binary(c);
}
}
/* Destination must always be chunked */
/* This should be part of item.c */
static int _store_item_copy_chunks(item *d_it, item *s_it, const int len) {
item_chunk *dch = (item_chunk *) ITEM_schunk(d_it);
/* Advance dch until we find free space */
while (dch->size == dch->used) {
if (dch->next) {
dch = dch->next;
} else {
break;
}
}
if (s_it->it_flags & ITEM_CHUNKED) {
int remain = len;
item_chunk *sch = (item_chunk *) ITEM_schunk(s_it);
int copied = 0;
/* Fills dch's to capacity, not straight copy sch in case data is
* being added or removed (ie append/prepend)
*/
while (sch && dch && remain) {
assert(dch->used <= dch->size);
int todo = (dch->size - dch->used < sch->used - copied)
? dch->size - dch->used : sch->used - copied;
if (remain < todo)
todo = remain;
memcpy(dch->data + dch->used, sch->data + copied, todo);
dch->used += todo;
copied += todo;
remain -= todo;
assert(dch->used <= dch->size);
if (dch->size == dch->used) {
item_chunk *tch = do_item_alloc_chunk(dch, remain);
if (tch) {
dch = tch;
} else {
return -1;
}
}
assert(copied <= sch->used);
if (copied == sch->used) {
copied = 0;
sch = sch->next;
}
}
/* assert that the destination had enough space for the source */
assert(remain == 0);
} else {
int done = 0;
/* Fill dch's via a non-chunked item. */
while (len > done && dch) {
int todo = (dch->size - dch->used < len - done)
? dch->size - dch->used : len - done;
//assert(dch->size - dch->used != 0);
memcpy(dch->data + dch->used, ITEM_data(s_it) + done, todo);
done += todo;
dch->used += todo;
assert(dch->used <= dch->size);
if (dch->size == dch->used) {
item_chunk *tch = do_item_alloc_chunk(dch, len - done);
if (tch) {
dch = tch;
} else {
return -1;
}
}
}
assert(len == done);
}
return 0;
}
static int _store_item_copy_data(int comm, item *old_it, item *new_it, item *add_it) {
if (comm == NREAD_APPEND) {
if (new_it->it_flags & ITEM_CHUNKED) {
if (_store_item_copy_chunks(new_it, old_it, old_it->nbytes - 2) == -1 ||
_store_item_copy_chunks(new_it, add_it, add_it->nbytes) == -1) {
return -1;
}
} else {
memcpy(ITEM_data(new_it), ITEM_data(old_it), old_it->nbytes);
memcpy(ITEM_data(new_it) + old_it->nbytes - 2 /* CRLF */, ITEM_data(add_it), add_it->nbytes);
}
} else {
/* NREAD_PREPEND */
if (new_it->it_flags & ITEM_CHUNKED) {
if (_store_item_copy_chunks(new_it, add_it, add_it->nbytes - 2) == -1 ||
_store_item_copy_chunks(new_it, old_it, old_it->nbytes) == -1) {
return -1;
}
} else {
memcpy(ITEM_data(new_it), ITEM_data(add_it), add_it->nbytes);
memcpy(ITEM_data(new_it) + add_it->nbytes - 2 /* CRLF */, ITEM_data(old_it), old_it->nbytes);
}
}
return 0;
}
/*
* Stores an item in the cache according to the semantics of one of the set
* commands. Protected by the item lock.
*
* Returns the state of storage.
*/
enum store_item_type do_store_item(item *it, int comm, conn *c, const uint32_t hv) {
char *key = ITEM_key(it);
item *old_it = do_item_get(key, it->nkey, hv, c, DONT_UPDATE);
enum store_item_type stored = NOT_STORED;
enum cas_result { CAS_NONE, CAS_MATCH, CAS_BADVAL, CAS_STALE, CAS_MISS };
item *new_it = NULL;
uint32_t flags;
/* Do the CAS test up front so we can apply to all store modes */
enum cas_result cas_res = CAS_NONE;
bool do_store = false;
if (old_it != NULL) {
// Most of the CAS work requires something to compare to.
uint64_t it_cas = ITEM_get_cas(it);
uint64_t old_cas = ITEM_get_cas(old_it);
if (it_cas == 0) {
cas_res = CAS_NONE;
} else if (it_cas == old_cas) {
cas_res = CAS_MATCH;
} else if (c->set_stale && it_cas < old_cas) {
cas_res = CAS_STALE;
} else {
cas_res = CAS_BADVAL;
}
switch (comm) {
case NREAD_ADD:
/* add only adds a nonexistent item, but promote to head of LRU */
do_item_update(old_it);
break;
case NREAD_CAS:
if (cas_res == CAS_MATCH) {
// cas validates
// it and old_it may belong to different classes.
// I'm updating the stats for the one that's getting pushed out
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(old_it)].cas_hits++;
pthread_mutex_unlock(&c->thread->stats.mutex);
do_store = true;
} else if (cas_res == CAS_STALE) {
// if we're allowed to set a stale value, CAS must be lower than
// the current item's CAS.
// This replaces the value, but should preserve TTL, and stale
// item marker bit + token sent if exists.
it->exptime = old_it->exptime;
it->it_flags |= ITEM_STALE;
if (old_it->it_flags & ITEM_TOKEN_SENT) {
it->it_flags |= ITEM_TOKEN_SENT;
}
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(old_it)].cas_hits++;
pthread_mutex_unlock(&c->thread->stats.mutex);
do_store = true;
} else {
// NONE or BADVAL are the same for CAS cmd
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(old_it)].cas_badval++;
pthread_mutex_unlock(&c->thread->stats.mutex);
if (settings.verbose > 1) {
fprintf(stderr, "CAS: failure: expected %llu, got %llu\n",
(unsigned long long)ITEM_get_cas(old_it),
(unsigned long long)ITEM_get_cas(it));
}
stored = EXISTS;
}
break;
case NREAD_APPEND:
case NREAD_PREPEND:
if (cas_res != CAS_NONE && cas_res != CAS_MATCH) {
stored = EXISTS;
break;
}
#ifdef EXTSTORE
if ((old_it->it_flags & ITEM_HDR) != 0) {
/* block append/prepend from working with extstore-d items.
* leave response code to NOT_STORED default */
break;
}
#endif
/* we have it and old_it here - alloc memory to hold both */
FLAGS_CONV(old_it, flags);
new_it = do_item_alloc(key, it->nkey, flags, old_it->exptime, it->nbytes + old_it->nbytes - 2 /* CRLF */);
// OOM trying to copy.
if (new_it == NULL)
break;
/* copy data from it and old_it to new_it */
if (_store_item_copy_data(comm, old_it, new_it, it) == -1) {
// failed data copy
break;
} else {
// refcount of new_it is 1 here. will end up 2 after link.
// it's original ref is managed outside of this function
it = new_it;
do_store = true;
}
break;
case NREAD_REPLACE:
case NREAD_SET:
do_store = true;
break;
}
if (do_store) {
STORAGE_delete(c->thread->storage, old_it);
item_replace(old_it, it, hv);
stored = STORED;
}
do_item_remove(old_it); /* release our reference */
if (new_it != NULL) {
// append/prepend end up with an extra reference for new_it.
do_item_remove(new_it);
}
} else {
/* No pre-existing item to replace or compare to. */
if (ITEM_get_cas(it) != 0) {
/* Asked for a CAS match but nothing to compare it to. */
cas_res = CAS_MISS;
}
switch (comm) {
case NREAD_ADD:
case NREAD_SET:
do_store = true;
break;
case NREAD_CAS:
// LRU expired
stored = NOT_FOUND;
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.cas_misses++;
pthread_mutex_unlock(&c->thread->stats.mutex);
break;
case NREAD_REPLACE:
case NREAD_APPEND:
case NREAD_PREPEND:
/* Requires an existing item. */
break;
}
if (do_store) {
do_item_link(it, hv);
stored = STORED;
}
}
if (stored == STORED) {
c->cas = ITEM_get_cas(it);
}
LOGGER_LOG(c->thread->l, LOG_MUTATIONS, LOGGER_ITEM_STORE, NULL,
stored, comm, ITEM_key(it), it->nkey, it->exptime, ITEM_clsid(it), c->sfd);
return stored;
}
typedef struct token_s {
char *value;
size_t length;
} token_t;
#define COMMAND_TOKEN 0
#define SUBCOMMAND_TOKEN 1
#define KEY_TOKEN 1
#define MAX_TOKENS 24
/*
* Tokenize the command string by replacing whitespace with '\0' and update
* the token array tokens with pointer to start of each token and length.
* Returns total number of tokens. The last valid token is the terminal
* token (value points to the first unprocessed character of the string and
* length zero).
*
* Usage example:
*
* while(tokenize_command(command, ncommand, tokens, max_tokens) > 0) {
* for(int ix = 0; tokens[ix].length != 0; ix++) {
* ...
* }
* ncommand = tokens[ix].value - command;
* command = tokens[ix].value;
* }
*/
static size_t tokenize_command(char *command, token_t *tokens, const size_t max_tokens) {
char *s, *e;
size_t ntokens = 0;
size_t len = strlen(command);
unsigned int i = 0;
assert(command != NULL && tokens != NULL && max_tokens > 1);
s = e = command;
for (i = 0; i < len; i++) {
if (*e == ' ') {
if (s != e) {
tokens[ntokens].value = s;
tokens[ntokens].length = e - s;
ntokens++;
*e = '\0';
if (ntokens == max_tokens - 1) {
e++;
s = e; /* so we don't add an extra token */
break;
}
}
s = e + 1;
}
e++;
}
if (s != e) {
tokens[ntokens].value = s;
tokens[ntokens].length = e - s;
ntokens++;
}
/*
* If we scanned the whole string, the terminal value pointer is null,
* otherwise it is the first unprocessed character.
*/
tokens[ntokens].value = *e == '\0' ? NULL : e;
tokens[ntokens].length = 0;
ntokens++;
return ntokens;
}
/* set up a connection to write a buffer then free it, used for stats */
static void write_and_free(conn *c, char *buf, int bytes) {
if (buf) {
mc_resp *resp = c->resp;
resp->write_and_free = buf;
resp_add_iov(resp, buf, bytes);
conn_set_state(c, conn_new_cmd);
} else {
out_of_memory(c, "SERVER_ERROR out of memory writing stats");
}
}
static inline bool set_noreply_maybe(conn *c, token_t *tokens, size_t ntokens)
{
int noreply_index = ntokens - 2;
/*
NOTE: this function is not the first place where we are going to
send the reply. We could send it instead from process_command()
if the request line has wrong number of tokens. However parsing
malformed line for "noreply" option is not reliable anyway, so
it can't be helped.
*/
if (tokens[noreply_index].value
&& strcmp(tokens[noreply_index].value, "noreply") == 0) {
c->noreply = true;
}
return c->noreply;
}
void append_stat(const char *name, ADD_STAT add_stats, conn *c,
const char *fmt, ...) {
char val_str[STAT_VAL_LEN];
int vlen;
va_list ap;
assert(name);
assert(add_stats);
assert(c);
assert(fmt);
va_start(ap, fmt);
vlen = vsnprintf(val_str, sizeof(val_str) - 1, fmt, ap);
va_end(ap);
add_stats(name, strlen(name), val_str, vlen, c);
}
inline static void process_stats_detail(conn *c, const char *command) {
assert(c != NULL);
if (strcmp(command, "on") == 0) {
settings.detail_enabled = 1;
out_string(c, "OK");
}
else if (strcmp(command, "off") == 0) {
settings.detail_enabled = 0;
out_string(c, "OK");
}
else if (strcmp(command, "dump") == 0) {
int len;
char *stats = stats_prefix_dump(&len);
write_and_free(c, stats, len);
}
else {
out_string(c, "CLIENT_ERROR usage: stats detail on|off|dump");
}
}
/* return server specific stats only */
static void server_stats(ADD_STAT add_stats, conn *c) {
pid_t pid = getpid();
rel_time_t now = current_time;
struct thread_stats thread_stats;
threadlocal_stats_aggregate(&thread_stats);
struct slab_stats slab_stats;
slab_stats_aggregate(&thread_stats, &slab_stats);
#ifdef EXTSTORE
struct extstore_stats st;
#endif
#ifndef WIN32
struct rusage usage;
getrusage(RUSAGE_SELF, &usage);
#endif /* !WIN32 */
STATS_LOCK();
APPEND_STAT("pid", "%lu", (long)pid);
APPEND_STAT("uptime", "%u", now - ITEM_UPDATE_INTERVAL);
APPEND_STAT("time", "%ld", now + (long)process_started);
APPEND_STAT("version", "%s", VERSION);
APPEND_STAT("libevent", "%s", event_get_version());
APPEND_STAT("pointer_size", "%d", (int)(8 * sizeof(void *)));
#ifndef WIN32
append_stat("rusage_user", add_stats, c, "%ld.%06ld",
(long)usage.ru_utime.tv_sec,
(long)usage.ru_utime.tv_usec);
append_stat("rusage_system", add_stats, c, "%ld.%06ld",
(long)usage.ru_stime.tv_sec,
(long)usage.ru_stime.tv_usec);
#endif /* !WIN32 */
APPEND_STAT("max_connections", "%d", settings.maxconns);
APPEND_STAT("curr_connections", "%llu", (unsigned long long)stats_state.curr_conns - 1);
APPEND_STAT("total_connections", "%llu", (unsigned long long)stats.total_conns);
if (settings.maxconns_fast) {
APPEND_STAT("rejected_connections", "%llu", (unsigned long long)stats.rejected_conns);
}
APPEND_STAT("connection_structures", "%u", stats_state.conn_structs);
APPEND_STAT("response_obj_bytes", "%llu", (unsigned long long)thread_stats.response_obj_bytes);
APPEND_STAT("response_obj_total", "%llu", (unsigned long long)thread_stats.response_obj_total);
APPEND_STAT("response_obj_free", "%llu", (unsigned long long)thread_stats.response_obj_free);
APPEND_STAT("response_obj_oom", "%llu", (unsigned long long)thread_stats.response_obj_oom);
APPEND_STAT("read_buf_bytes", "%llu", (unsigned long long)thread_stats.read_buf_bytes);
APPEND_STAT("read_buf_bytes_free", "%llu", (unsigned long long)thread_stats.read_buf_bytes_free);
APPEND_STAT("read_buf_oom", "%llu", (unsigned long long)thread_stats.read_buf_oom);
APPEND_STAT("reserved_fds", "%u", stats_state.reserved_fds);
APPEND_STAT("cmd_get", "%llu", (unsigned long long)thread_stats.get_cmds);
APPEND_STAT("cmd_set", "%llu", (unsigned long long)slab_stats.set_cmds);
APPEND_STAT("cmd_flush", "%llu", (unsigned long long)thread_stats.flush_cmds);
APPEND_STAT("cmd_touch", "%llu", (unsigned long long)thread_stats.touch_cmds);
APPEND_STAT("cmd_meta", "%llu", (unsigned long long)thread_stats.meta_cmds);
APPEND_STAT("get_hits", "%llu", (unsigned long long)slab_stats.get_hits);
APPEND_STAT("get_misses", "%llu", (unsigned long long)thread_stats.get_misses);
APPEND_STAT("get_expired", "%llu", (unsigned long long)thread_stats.get_expired);
APPEND_STAT("get_flushed", "%llu", (unsigned long long)thread_stats.get_flushed);
#ifdef EXTSTORE
if (c->thread->storage) {
APPEND_STAT("get_extstore", "%llu", (unsigned long long)thread_stats.get_extstore);
APPEND_STAT("get_aborted_extstore", "%llu", (unsigned long long)thread_stats.get_aborted_extstore);
APPEND_STAT("get_oom_extstore", "%llu", (unsigned long long)thread_stats.get_oom_extstore);
APPEND_STAT("recache_from_extstore", "%llu", (unsigned long long)thread_stats.recache_from_extstore);
APPEND_STAT("miss_from_extstore", "%llu", (unsigned long long)thread_stats.miss_from_extstore);
APPEND_STAT("badcrc_from_extstore", "%llu", (unsigned long long)thread_stats.badcrc_from_extstore);
}
#endif
APPEND_STAT("delete_misses", "%llu", (unsigned long long)thread_stats.delete_misses);
APPEND_STAT("delete_hits", "%llu", (unsigned long long)slab_stats.delete_hits);
APPEND_STAT("incr_misses", "%llu", (unsigned long long)thread_stats.incr_misses);
APPEND_STAT("incr_hits", "%llu", (unsigned long long)slab_stats.incr_hits);
APPEND_STAT("decr_misses", "%llu", (unsigned long long)thread_stats.decr_misses);
APPEND_STAT("decr_hits", "%llu", (unsigned long long)slab_stats.decr_hits);
APPEND_STAT("cas_misses", "%llu", (unsigned long long)thread_stats.cas_misses);
APPEND_STAT("cas_hits", "%llu", (unsigned long long)slab_stats.cas_hits);
APPEND_STAT("cas_badval", "%llu", (unsigned long long)slab_stats.cas_badval);
APPEND_STAT("touch_hits", "%llu", (unsigned long long)slab_stats.touch_hits);
APPEND_STAT("touch_misses", "%llu", (unsigned long long)thread_stats.touch_misses);
APPEND_STAT("auth_cmds", "%llu", (unsigned long long)thread_stats.auth_cmds);
APPEND_STAT("auth_errors", "%llu", (unsigned long long)thread_stats.auth_errors);
if (settings.idle_timeout) {
APPEND_STAT("idle_kicks", "%llu", (unsigned long long)thread_stats.idle_kicks);
}
APPEND_STAT("bytes_read", "%llu", (unsigned long long)thread_stats.bytes_read);
APPEND_STAT("bytes_written", "%llu", (unsigned long long)thread_stats.bytes_written);
APPEND_STAT("limit_maxbytes", "%llu", (unsigned long long)settings.maxbytes);
APPEND_STAT("accepting_conns", "%u", stats_state.accepting_conns);
APPEND_STAT("listen_disabled_num", "%llu", (unsigned long long)stats.listen_disabled_num);
APPEND_STAT("time_in_listen_disabled_us", "%llu", stats.time_in_listen_disabled_us);
APPEND_STAT("threads", "%d", settings.num_threads);
APPEND_STAT("conn_yields", "%llu", (unsigned long long)thread_stats.conn_yields);
APPEND_STAT("hash_power_level", "%u", stats_state.hash_power_level);
APPEND_STAT("hash_bytes", "%llu", (unsigned long long)stats_state.hash_bytes);
APPEND_STAT("hash_is_expanding", "%u", stats_state.hash_is_expanding);
if (settings.slab_reassign) {
APPEND_STAT("slab_reassign_rescues", "%llu", stats.slab_reassign_rescues);
APPEND_STAT("slab_reassign_chunk_rescues", "%llu", stats.slab_reassign_chunk_rescues);
APPEND_STAT("slab_reassign_evictions_nomem", "%llu", stats.slab_reassign_evictions_nomem);
APPEND_STAT("slab_reassign_inline_reclaim", "%llu", stats.slab_reassign_inline_reclaim);
APPEND_STAT("slab_reassign_busy_items", "%llu", stats.slab_reassign_busy_items);
APPEND_STAT("slab_reassign_busy_deletes", "%llu", stats.slab_reassign_busy_deletes);
APPEND_STAT("slab_reassign_running", "%u", stats_state.slab_reassign_running);
APPEND_STAT("slabs_moved", "%llu", stats.slabs_moved);
}
if (settings.lru_crawler) {
APPEND_STAT("lru_crawler_running", "%u", stats_state.lru_crawler_running);
APPEND_STAT("lru_crawler_starts", "%u", stats.lru_crawler_starts);
}
if (settings.lru_maintainer_thread) {
APPEND_STAT("lru_maintainer_juggles", "%llu", (unsigned long long)stats.lru_maintainer_juggles);
}
APPEND_STAT("malloc_fails", "%llu",
(unsigned long long)stats.malloc_fails);
APPEND_STAT("log_worker_dropped", "%llu", (unsigned long long)stats.log_worker_dropped);
APPEND_STAT("log_worker_written", "%llu", (unsigned long long)stats.log_worker_written);
APPEND_STAT("log_watcher_skipped", "%llu", (unsigned long long)stats.log_watcher_skipped);
APPEND_STAT("log_watcher_sent", "%llu", (unsigned long long)stats.log_watcher_sent);
STATS_UNLOCK();
#ifdef EXTSTORE
if (c->thread->storage) {
STATS_LOCK();
APPEND_STAT("extstore_compact_lost", "%llu", (unsigned long long)stats.extstore_compact_lost);
APPEND_STAT("extstore_compact_rescues", "%llu", (unsigned long long)stats.extstore_compact_rescues);
APPEND_STAT("extstore_compact_skipped", "%llu", (unsigned long long)stats.extstore_compact_skipped);
STATS_UNLOCK();
extstore_get_stats(c->thread->storage, &st);
APPEND_STAT("extstore_page_allocs", "%llu", (unsigned long long)st.page_allocs);
APPEND_STAT("extstore_page_evictions", "%llu", (unsigned long long)st.page_evictions);
APPEND_STAT("extstore_page_reclaims", "%llu", (unsigned long long)st.page_reclaims);
APPEND_STAT("extstore_pages_free", "%llu", (unsigned long long)st.pages_free);
APPEND_STAT("extstore_pages_used", "%llu", (unsigned long long)st.pages_used);
APPEND_STAT("extstore_objects_evicted", "%llu", (unsigned long long)st.objects_evicted);
APPEND_STAT("extstore_objects_read", "%llu", (unsigned long long)st.objects_read);
APPEND_STAT("extstore_objects_written", "%llu", (unsigned long long)st.objects_written);
APPEND_STAT("extstore_objects_used", "%llu", (unsigned long long)st.objects_used);
APPEND_STAT("extstore_bytes_evicted", "%llu", (unsigned long long)st.bytes_evicted);
APPEND_STAT("extstore_bytes_written", "%llu", (unsigned long long)st.bytes_written);
APPEND_STAT("extstore_bytes_read", "%llu", (unsigned long long)st.bytes_read);
APPEND_STAT("extstore_bytes_used", "%llu", (unsigned long long)st.bytes_used);
APPEND_STAT("extstore_bytes_fragmented", "%llu", (unsigned long long)st.bytes_fragmented);
APPEND_STAT("extstore_limit_maxbytes", "%llu", (unsigned long long)(st.page_count * st.page_size));
APPEND_STAT("extstore_io_queue", "%llu", (unsigned long long)(st.io_queue));
}
#endif
#ifdef TLS
if (settings.ssl_enabled) {
APPEND_STAT("ssl_handshake_errors", "%llu", (unsigned long long)stats.ssl_handshake_errors);
APPEND_STAT("time_since_server_cert_refresh", "%u", now - settings.ssl_last_cert_refresh_time);
}
#endif
}
static void process_stat_settings(ADD_STAT add_stats, void *c) {
assert(add_stats);
APPEND_STAT("maxbytes", "%llu", (unsigned long long)settings.maxbytes);
APPEND_STAT("maxconns", "%d", settings.maxconns);
APPEND_STAT("tcpport", "%d", settings.port);
APPEND_STAT("udpport", "%d", settings.udpport);
APPEND_STAT("inter", "%s", settings.inter ? settings.inter : "NULL");
APPEND_STAT("verbosity", "%d", settings.verbose);
APPEND_STAT("oldest", "%lu", (unsigned long)settings.oldest_live);
APPEND_STAT("evictions", "%s", settings.evict_to_free ? "on" : "off");
APPEND_STAT("domain_socket", "%s",
settings.socketpath ? settings.socketpath : "NULL");
APPEND_STAT("umask", "%o", settings.access);
APPEND_STAT("growth_factor", "%.2f", settings.factor);
APPEND_STAT("chunk_size", "%d", settings.chunk_size);
APPEND_STAT("num_threads", "%d", settings.num_threads);
APPEND_STAT("num_threads_per_udp", "%d", settings.num_threads_per_udp);
APPEND_STAT("stat_key_prefix", "%c", settings.prefix_delimiter);
APPEND_STAT("detail_enabled", "%s",
settings.detail_enabled ? "yes" : "no");
APPEND_STAT("reqs_per_event", "%d", settings.reqs_per_event);
APPEND_STAT("cas_enabled", "%s", settings.use_cas ? "yes" : "no");
APPEND_STAT("tcp_backlog", "%d", settings.backlog);
APPEND_STAT("binding_protocol", "%s",
prot_text(settings.binding_protocol));
APPEND_STAT("auth_enabled_sasl", "%s", settings.sasl ? "yes" : "no");
APPEND_STAT("auth_enabled_ascii", "%s", settings.auth_file ? settings.auth_file : "no");
APPEND_STAT("item_size_max", "%d", settings.item_size_max);
APPEND_STAT("maxconns_fast", "%s", settings.maxconns_fast ? "yes" : "no");
APPEND_STAT("hashpower_init", "%d", settings.hashpower_init);
APPEND_STAT("slab_reassign", "%s", settings.slab_reassign ? "yes" : "no");
APPEND_STAT("slab_automove", "%d", settings.slab_automove);
APPEND_STAT("slab_automove_ratio", "%.2f", settings.slab_automove_ratio);
APPEND_STAT("slab_automove_window", "%u", settings.slab_automove_window);
APPEND_STAT("slab_chunk_max", "%d", settings.slab_chunk_size_max);
APPEND_STAT("lru_crawler", "%s", settings.lru_crawler ? "yes" : "no");
APPEND_STAT("lru_crawler_sleep", "%d", settings.lru_crawler_sleep);
APPEND_STAT("lru_crawler_tocrawl", "%lu", (unsigned long)settings.lru_crawler_tocrawl);
APPEND_STAT("tail_repair_time", "%d", settings.tail_repair_time);
APPEND_STAT("flush_enabled", "%s", settings.flush_enabled ? "yes" : "no");
APPEND_STAT("dump_enabled", "%s", settings.dump_enabled ? "yes" : "no");
APPEND_STAT("hash_algorithm", "%s", settings.hash_algorithm);
APPEND_STAT("lru_maintainer_thread", "%s", settings.lru_maintainer_thread ? "yes" : "no");
APPEND_STAT("lru_segmented", "%s", settings.lru_segmented ? "yes" : "no");
APPEND_STAT("hot_lru_pct", "%d", settings.hot_lru_pct);
APPEND_STAT("warm_lru_pct", "%d", settings.warm_lru_pct);
APPEND_STAT("hot_max_factor", "%.2f", settings.hot_max_factor);
APPEND_STAT("warm_max_factor", "%.2f", settings.warm_max_factor);
APPEND_STAT("temp_lru", "%s", settings.temp_lru ? "yes" : "no");
APPEND_STAT("temporary_ttl", "%u", settings.temporary_ttl);
APPEND_STAT("idle_timeout", "%d", settings.idle_timeout);
APPEND_STAT("watcher_logbuf_size", "%u", settings.logger_watcher_buf_size);
APPEND_STAT("worker_logbuf_size", "%u", settings.logger_buf_size);
APPEND_STAT("resp_obj_mem_limit", "%u", settings.resp_obj_mem_limit);
APPEND_STAT("read_buf_mem_limit", "%u", settings.read_buf_mem_limit);
APPEND_STAT("track_sizes", "%s", item_stats_sizes_status() ? "yes" : "no");
APPEND_STAT("inline_ascii_response", "%s", "no"); // setting is dead, cannot be yes.
#ifdef HAVE_DROP_PRIVILEGES
APPEND_STAT("drop_privileges", "%s", settings.drop_privileges ? "yes" : "no");
#endif
#ifdef EXTSTORE
APPEND_STAT("ext_item_size", "%u", settings.ext_item_size);
APPEND_STAT("ext_item_age", "%u", settings.ext_item_age);
APPEND_STAT("ext_low_ttl", "%u", settings.ext_low_ttl);
APPEND_STAT("ext_recache_rate", "%u", settings.ext_recache_rate);
APPEND_STAT("ext_wbuf_size", "%u", settings.ext_wbuf_size);
APPEND_STAT("ext_compact_under", "%u", settings.ext_compact_under);
APPEND_STAT("ext_drop_under", "%u", settings.ext_drop_under);
APPEND_STAT("ext_max_frag", "%.2f", settings.ext_max_frag);
APPEND_STAT("slab_automove_freeratio", "%.3f", settings.slab_automove_freeratio);
APPEND_STAT("ext_drop_unread", "%s", settings.ext_drop_unread ? "yes" : "no");
#endif
#ifdef TLS
APPEND_STAT("ssl_enabled", "%s", settings.ssl_enabled ? "yes" : "no");
APPEND_STAT("ssl_chain_cert", "%s", settings.ssl_chain_cert);
APPEND_STAT("ssl_key", "%s", settings.ssl_key);
APPEND_STAT("ssl_verify_mode", "%d", settings.ssl_verify_mode);
APPEND_STAT("ssl_keyformat", "%d", settings.ssl_keyformat);
APPEND_STAT("ssl_ciphers", "%s", settings.ssl_ciphers ? settings.ssl_ciphers : "NULL");
APPEND_STAT("ssl_ca_cert", "%s", settings.ssl_ca_cert ? settings.ssl_ca_cert : "NULL");
APPEND_STAT("ssl_wbuf_size", "%u", settings.ssl_wbuf_size);
#endif
}
static int nz_strcmp(int nzlength, const char *nz, const char *z) {
int zlength=strlen(z);
return (zlength == nzlength) && (strncmp(nz, z, zlength) == 0) ? 0 : -1;
}
static bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c) {
bool ret = true;
if (add_stats != NULL) {
if (!stat_type) {
/* prepare general statistics for the engine */
STATS_LOCK();
APPEND_STAT("bytes", "%llu", (unsigned long long)stats_state.curr_bytes);
APPEND_STAT("curr_items", "%llu", (unsigned long long)stats_state.curr_items);
APPEND_STAT("total_items", "%llu", (unsigned long long)stats.total_items);
STATS_UNLOCK();
APPEND_STAT("slab_global_page_pool", "%u", global_page_pool_size(NULL));
item_stats_totals(add_stats, c);
} else if (nz_strcmp(nkey, stat_type, "items") == 0) {
item_stats(add_stats, c);
} else if (nz_strcmp(nkey, stat_type, "slabs") == 0) {
slabs_stats(add_stats, c);
} else if (nz_strcmp(nkey, stat_type, "sizes") == 0) {
item_stats_sizes(add_stats, c);
} else if (nz_strcmp(nkey, stat_type, "sizes_enable") == 0) {
item_stats_sizes_enable(add_stats, c);
} else if (nz_strcmp(nkey, stat_type, "sizes_disable") == 0) {
item_stats_sizes_disable(add_stats, c);
} else {
ret = false;
}
} else {
ret = false;
}
return ret;
}
static inline void get_conn_text(const conn *c, const int af,
char* addr, struct sockaddr *sock_addr) {
char addr_text[MAXPATHLEN];
addr_text[0] = '\0';
const char *protoname = "?";
unsigned short port = 0;
size_t pathlen = 0;
switch (af) {
case AF_INET:
(void) inet_ntop(af,
&((struct sockaddr_in *)sock_addr)->sin_addr,
addr_text,
sizeof(addr_text) - 1);
port = ntohs(((struct sockaddr_in *)sock_addr)->sin_port);
protoname = IS_UDP(c->transport) ? "udp" : "tcp";
break;
case AF_INET6:
addr_text[0] = '[';
addr_text[1] = '\0';
if (inet_ntop(af,
&((struct sockaddr_in6 *)sock_addr)->sin6_addr,
addr_text + 1,
sizeof(addr_text) - 2)) {
strcat(addr_text, "]");
}
port = ntohs(((struct sockaddr_in6 *)sock_addr)->sin6_port);
protoname = IS_UDP(c->transport) ? "udp6" : "tcp6";
break;
case AF_UNIX:
// this strncpy call originally could piss off an address
// sanitizer; we supplied the size of the dest buf as a limiter,
// but optimized versions of strncpy could read past the end of
// *src while looking for a null terminator. Since buf and
// sun_path here are both on the stack they could even overlap,
// which is "undefined". In all OSS versions of strncpy I could
// find this has no effect; it'll still only copy until the first null
// terminator is found. Thus it's possible to get the OS to
// examine past the end of sun_path but it's unclear to me if this
// can cause any actual problem.
//
// We need a safe_strncpy util function but I'll punt on figuring
// that out for now.
pathlen = sizeof(((struct sockaddr_un *)sock_addr)->sun_path);
if (MAXPATHLEN <= pathlen) {
pathlen = MAXPATHLEN - 1;
}
strncpy(addr_text,
((struct sockaddr_un *)sock_addr)->sun_path,
pathlen);
addr_text[pathlen] = '\0';
protoname = "unix";
break;
}
if (strlen(addr_text) < 2) {
/* Most likely this is a connected UNIX-domain client which
* has no peer socket address, but there's no portable way
* to tell for sure.
*/
sprintf(addr_text, "<AF %d>", af);
}
if (port) {
sprintf(addr, "%s:%s:%u", protoname, addr_text, port);
} else {
sprintf(addr, "%s:%s", protoname, addr_text);
}
}
static void conn_to_str(const conn *c, char *addr, char *svr_addr) {
if (!c) {
strcpy(addr, "<null>");
} else if (c->state == conn_closed) {
strcpy(addr, "<closed>");
} else {
struct sockaddr_in6 local_addr;
struct sockaddr *sock_addr = (void *)&c->request_addr;
/* For listen ports and idle UDP ports, show listen address */
if (c->state == conn_listening ||
(IS_UDP(c->transport) &&
c->state == conn_read)) {
socklen_t local_addr_len = sizeof(local_addr);
if (getsockname(c->sfd,
(struct sockaddr *)&local_addr,
&local_addr_len) == 0) {
sock_addr = (struct sockaddr *)&local_addr;
}
}
get_conn_text(c, sock_addr->sa_family, addr, sock_addr);
if (c->state != conn_listening && !(IS_UDP(c->transport) &&
c->state == conn_read)) {
struct sockaddr_storage svr_sock_addr;
socklen_t svr_addr_len = sizeof(svr_sock_addr);
getsockname(c->sfd, (struct sockaddr *)&svr_sock_addr, &svr_addr_len);
get_conn_text(c, svr_sock_addr.ss_family, svr_addr, (struct sockaddr *)&svr_sock_addr);
}
}
}
static void process_stats_conns(ADD_STAT add_stats, void *c) {
int i;
char key_str[STAT_KEY_LEN];
char val_str[STAT_VAL_LEN];
size_t extras_len = sizeof("unix:") + sizeof("65535");
char addr[MAXPATHLEN + extras_len];
char svr_addr[MAXPATHLEN + extras_len];
int klen = 0, vlen = 0;
assert(add_stats);
for (i = 0; i < max_fds; i++) {
if (conns[i]) {
/* This is safe to do unlocked because conns are never freed; the
* worst that'll happen will be a minor inconsistency in the
* output -- not worth the complexity of the locking that'd be
* required to prevent it.
*/
if (IS_UDP(conns[i]->transport)) {
APPEND_NUM_STAT(i, "UDP", "%s", "UDP");
}
if (conns[i]->state != conn_closed) {
conn_to_str(conns[i], addr, svr_addr);
APPEND_NUM_STAT(i, "addr", "%s", addr);
if (conns[i]->state != conn_listening &&
!(IS_UDP(conns[i]->transport) && conns[i]->state == conn_read)) {
APPEND_NUM_STAT(i, "listen_addr", "%s", svr_addr);
}
APPEND_NUM_STAT(i, "state", "%s",
state_text(conns[i]->state));
APPEND_NUM_STAT(i, "secs_since_last_cmd", "%d",
current_time - conns[i]->last_cmd_time);
}
}
}
}
#ifdef EXTSTORE
static void process_extstore_stats(ADD_STAT add_stats, conn *c) {
int i;
char key_str[STAT_KEY_LEN];
char val_str[STAT_VAL_LEN];
int klen = 0, vlen = 0;
struct extstore_stats st;
assert(add_stats);
void *storage = c->thread->storage;
extstore_get_stats(storage, &st);
st.page_data = calloc(st.page_count, sizeof(struct extstore_page_data));
extstore_get_page_data(storage, &st);
for (i = 0; i < st.page_count; i++) {
APPEND_NUM_STAT(i, "version", "%llu",
(unsigned long long) st.page_data[i].version);
APPEND_NUM_STAT(i, "bytes", "%llu",
(unsigned long long) st.page_data[i].bytes_used);
APPEND_NUM_STAT(i, "bucket", "%u",
st.page_data[i].bucket);
APPEND_NUM_STAT(i, "free_bucket", "%u",
st.page_data[i].free_bucket);
}
}
#endif
static void process_stat(conn *c, token_t *tokens, const size_t ntokens) {
const char *subcommand = tokens[SUBCOMMAND_TOKEN].value;
assert(c != NULL);
if (ntokens < 2) {
out_string(c, "CLIENT_ERROR bad command line");
return;
}
if (ntokens == 2) {
server_stats(&append_stats, c);
(void)get_stats(NULL, 0, &append_stats, c);
} else if (strcmp(subcommand, "reset") == 0) {
stats_reset();
out_string(c, "RESET");
return;
} else if (strcmp(subcommand, "detail") == 0) {
/* NOTE: how to tackle detail with binary? */
if (ntokens < 4)
process_stats_detail(c, ""); /* outputs the error message */
else
process_stats_detail(c, tokens[2].value);
/* Output already generated */
return;
} else if (strcmp(subcommand, "settings") == 0) {
process_stat_settings(&append_stats, c);
} else if (strcmp(subcommand, "cachedump") == 0) {
char *buf;
unsigned int bytes, id, limit = 0;
if (!settings.dump_enabled) {
out_string(c, "CLIENT_ERROR stats cachedump not allowed");
return;
}
if (ntokens < 5) {
out_string(c, "CLIENT_ERROR bad command line");
return;
}
if (!safe_strtoul(tokens[2].value, &id) ||
!safe_strtoul(tokens[3].value, &limit)) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
if (id >= MAX_NUMBER_OF_SLAB_CLASSES) {
out_string(c, "CLIENT_ERROR Illegal slab id");
return;
}
buf = item_cachedump(id, limit, &bytes);
write_and_free(c, buf, bytes);
return;
} else if (strcmp(subcommand, "conns") == 0) {
process_stats_conns(&append_stats, c);
#ifdef EXTSTORE
} else if (strcmp(subcommand, "extstore") == 0) {
process_extstore_stats(&append_stats, c);
#endif
} else {
/* getting here means that the subcommand is either engine specific or
is invalid. query the engine and see. */
if (get_stats(subcommand, strlen(subcommand), &append_stats, c)) {
if (c->stats.buffer == NULL) {
out_of_memory(c, "SERVER_ERROR out of memory writing stats");
} else {
write_and_free(c, c->stats.buffer, c->stats.offset);
c->stats.buffer = NULL;
}
} else {
out_string(c, "ERROR");
}
return;
}
/* append terminator and start the transfer */
append_stats(NULL, 0, NULL, 0, c);
if (c->stats.buffer == NULL) {
out_of_memory(c, "SERVER_ERROR out of memory writing stats");
} else {
write_and_free(c, c->stats.buffer, c->stats.offset);
c->stats.buffer = NULL;
}
}
/* client flags == 0 means use no storage for client flags */
static inline int make_ascii_get_suffix(char *suffix, item *it, bool return_cas, int nbytes) {
char *p = suffix;
*p = ' ';
p++;
if (FLAGS_SIZE(it) == 0) {
*p = '0';
p++;
} else {
p = itoa_u32(*((uint32_t *) ITEM_suffix(it)), p);
}
*p = ' ';
p = itoa_u32(nbytes-2, p+1);
if (return_cas) {
*p = ' ';
p = itoa_u64(ITEM_get_cas(it), p+1);
}
*p = '\r';
*(p+1) = '\n';
*(p+2) = '\0';
return (p - suffix) + 2;
}
#define IT_REFCOUNT_LIMIT 60000
static inline item* limited_get(char *key, size_t nkey, conn *c, uint32_t exptime, bool should_touch, bool do_update, bool *overflow) {
item *it;
if (should_touch) {
it = item_touch(key, nkey, exptime, c);
} else {
it = item_get(key, nkey, c, do_update);
}
if (it && it->refcount > IT_REFCOUNT_LIMIT) {
item_remove(it);
it = NULL;
*overflow = true;
} else {
*overflow = false;
}
return it;
}
// Semantics are different than limited_get; since the item is returned
// locked, caller can directly change what it needs.
// though it might eventually be a better interface to sink it all into
// items.c.
static inline item* limited_get_locked(char *key, size_t nkey, conn *c, bool do_update, uint32_t *hv, bool *overflow) {
item *it;
it = item_get_locked(key, nkey, c, do_update, hv);
if (it && it->refcount > IT_REFCOUNT_LIMIT) {
do_item_remove(it);
it = NULL;
item_unlock(*hv);
*overflow = true;
} else {
*overflow = false;
}
return it;
}
#ifdef EXTSTORE
// FIXME: This runs in the IO thread. to get better IO performance this should
// simply mark the io wrapper with the return value and decrement wrapleft, if
// zero redispatching. Still a bit of work being done in the side thread but
// minimized at least.
static void _get_extstore_cb(void *e, obj_io *io, int ret) {
// FIXME: assumes success
io_wrap *wrap = (io_wrap *)io->data;
mc_resp *resp = wrap->resp;
conn *c = wrap->c;
assert(wrap->active == true);
item *read_it = (item *)io->buf;
bool miss = false;
// TODO: How to do counters for hit/misses?
if (ret < 1) {
miss = true;
} else {
uint32_t crc2;
uint32_t crc = (uint32_t) read_it->exptime;
int x;
// item is chunked, crc the iov's
if (io->iov != NULL) {
// first iov is the header, which we don't use beyond crc
crc2 = crc32c(0, (char *)io->iov[0].iov_base+STORE_OFFSET, io->iov[0].iov_len-STORE_OFFSET);
// make sure it's not sent. hack :(
io->iov[0].iov_len = 0;
for (x = 1; x < io->iovcnt; x++) {
crc2 = crc32c(crc2, (char *)io->iov[x].iov_base, io->iov[x].iov_len);
}
} else {
crc2 = crc32c(0, (char *)read_it+STORE_OFFSET, io->len-STORE_OFFSET);
}
if (crc != crc2) {
miss = true;
wrap->badcrc = true;
}
}
if (miss) {
if (wrap->noreply) {
// In all GET cases, noreply means we send nothing back.
resp->skip = true;
} else {
// TODO: This should be movable to the worker thread.
// Convert the binprot response into a miss response.
// The header requires knowing a bunch of stateful crap, so rather
// than simply writing out a "new" miss response we mangle what's
// already there.
if (c->protocol == binary_prot) {
protocol_binary_response_header *header =
(protocol_binary_response_header *)resp->wbuf;
// cut the extra nbytes off of the body_len
uint32_t body_len = ntohl(header->response.bodylen);
uint8_t hdr_len = header->response.extlen;
body_len -= resp->iov[wrap->iovec_data].iov_len + hdr_len;
resp->tosend -= resp->iov[wrap->iovec_data].iov_len + hdr_len;
header->response.extlen = 0;
header->response.status = (uint16_t)htons(PROTOCOL_BINARY_RESPONSE_KEY_ENOENT);
header->response.bodylen = htonl(body_len);
// truncate the data response.
resp->iov[wrap->iovec_data].iov_len = 0;
// wipe the extlen iov... wish it was just a flat buffer.
resp->iov[wrap->iovec_data-1].iov_len = 0;
resp->chunked_data_iov = 0;
} else {
int i;
// Meta commands have EN status lines for miss, rather than
// END as a trailer as per normal ascii.
if (resp->iov[0].iov_len >= 3
&& memcmp(resp->iov[0].iov_base, "VA ", 3) == 0) {
// TODO: These miss translators should use specific callback
// functions attached to the io wrap. This is weird :(
resp->iovcnt = 1;
resp->iov[0].iov_len = 4;
resp->iov[0].iov_base = "EN\r\n";
resp->tosend = 4;
} else {
// Wipe the iovecs up through our data injection.
// Allows trailers to be returned (END)
for (i = 0; i <= wrap->iovec_data; i++) {
resp->tosend -= resp->iov[i].iov_len;
resp->iov[i].iov_len = 0;
resp->iov[i].iov_base = NULL;
}
}
resp->chunked_total = 0;
resp->chunked_data_iov = 0;
}
}
wrap->miss = true;
} else {
assert(read_it->slabs_clsid != 0);
// TODO: should always use it instead of ITEM_data to kill more
// chunked special casing.
if ((read_it->it_flags & ITEM_CHUNKED) == 0) {
resp->iov[wrap->iovec_data].iov_base = ITEM_data(read_it);
}
wrap->miss = false;
}
c->io_wrapleft--;
wrap->active = false;
//assert(c->io_wrapleft >= 0);
// All IO's have returned, lets re-attach this connection to our original
// thread.
if (c->io_wrapleft == 0) {
assert(c->io_queued == true);
c->io_queued = false;
redispatch_conn(c);
}
}
static inline int _get_extstore(conn *c, item *it, mc_resp *resp) {
#ifdef NEED_ALIGN
item_hdr hdr;
memcpy(&hdr, ITEM_data(it), sizeof(hdr));
#else
item_hdr *hdr = (item_hdr *)ITEM_data(it);
#endif
size_t ntotal = ITEM_ntotal(it);
unsigned int clsid = slabs_clsid(ntotal);
item *new_it;
bool chunked = false;
if (ntotal > settings.slab_chunk_size_max) {
// Pull a chunked item header.
uint32_t flags;
FLAGS_CONV(it, flags);
new_it = item_alloc(ITEM_key(it), it->nkey, flags, it->exptime, it->nbytes);
assert(new_it == NULL || (new_it->it_flags & ITEM_CHUNKED));
chunked = true;
} else {
new_it = do_item_alloc_pull(ntotal, clsid);
}
if (new_it == NULL)
return -1;
assert(!c->io_queued); // FIXME: debugging.
// so we can free the chunk on a miss
new_it->slabs_clsid = clsid;
io_wrap *io = do_cache_alloc(c->thread->io_cache);
io->active = true;
io->miss = false;
io->badcrc = false;
io->noreply = c->noreply;
// io_wrap owns the reference for this object now.
io->hdr_it = it;
io->resp = resp;
io->io.iov = NULL;
// FIXME: error handling.
if (chunked) {
unsigned int ciovcnt = 0;
size_t remain = new_it->nbytes;
item_chunk *chunk = (item_chunk *) ITEM_schunk(new_it);
// TODO: This might make sense as a _global_ cache vs a per-thread.
// but we still can't load objects requiring > IOV_MAX iovs.
// In the meantime, these objects are rare/slow enough that
// malloc/freeing a statically sized object won't cause us much pain.
io->io.iov = malloc(sizeof(struct iovec) * IOV_MAX);
if (io->io.iov == NULL) {
item_remove(new_it);
do_cache_free(c->thread->io_cache, io);
return -1;
}
// fill the header so we can get the full data + crc back.
io->io.iov[0].iov_base = new_it;
io->io.iov[0].iov_len = ITEM_ntotal(new_it) - new_it->nbytes;
ciovcnt++;
while (remain > 0) {
chunk = do_item_alloc_chunk(chunk, remain);
// FIXME: _pure evil_, silently erroring if item is too large.
if (chunk == NULL || ciovcnt > IOV_MAX-1) {
item_remove(new_it);
free(io->io.iov);
// TODO: wrapper function for freeing up an io wrap?
io->io.iov = NULL;
do_cache_free(c->thread->io_cache, io);
return -1;
}
io->io.iov[ciovcnt].iov_base = chunk->data;
io->io.iov[ciovcnt].iov_len = (remain < chunk->size) ? remain : chunk->size;
chunk->used = (remain < chunk->size) ? remain : chunk->size;
remain -= chunk->size;
ciovcnt++;
}
io->io.iovcnt = ciovcnt;
}
// Chunked or non chunked we reserve a response iov here.
io->iovec_data = resp->iovcnt;
int iovtotal = (c->protocol == binary_prot) ? it->nbytes - 2 : it->nbytes;
if (chunked) {
resp_add_chunked_iov(resp, new_it, iovtotal);
} else {
resp_add_iov(resp, "", iovtotal);
}
io->io.buf = (void *)new_it;
io->c = c;
// We need to stack the sub-struct IO's together as well.
if (c->io_wraplist) {
io->io.next = &c->io_wraplist->io;
} else {
io->io.next = NULL;
}
// IO queue for this connection.
io->next = c->io_wraplist;
c->io_wraplist = io;
assert(c->io_wrapleft >= 0);
c->io_wrapleft++;
// reference ourselves for the callback.
io->io.data = (void *)io;
// Now, fill in io->io based on what was in our header.
#ifdef NEED_ALIGN
io->io.page_version = hdr.page_version;
io->io.page_id = hdr.page_id;
io->io.offset = hdr.offset;
#else
io->io.page_version = hdr->page_version;
io->io.page_id = hdr->page_id;
io->io.offset = hdr->offset;
#endif
io->io.len = ntotal;
io->io.mode = OBJ_IO_READ;
io->io.cb = _get_extstore_cb;
//fprintf(stderr, "EXTSTORE: IO stacked %u\n", io->iovec_data);
// FIXME: This stat needs to move to reflect # of flash hits vs misses
// for now it's a good gauge on how often we request out to flash at
// least.
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.get_extstore++;
pthread_mutex_unlock(&c->thread->stats.mutex);
return 0;
}
#endif
/* ntokens is overwritten here... shrug.. */
static inline void process_get_command(conn *c, token_t *tokens, size_t ntokens, bool return_cas, bool should_touch) {
char *key;
size_t nkey;
item *it;
token_t *key_token = &tokens[KEY_TOKEN];
int32_t exptime_int = 0;
rel_time_t exptime = 0;
bool fail_length = false;
assert(c != NULL);
mc_resp *resp = c->resp;
if (should_touch) {
// For get and touch commands, use first token as exptime
if (!safe_strtol(tokens[1].value, &exptime_int)) {
out_string(c, "CLIENT_ERROR invalid exptime argument");
return;
}
key_token++;
exptime = realtime(EXPTIME_TO_POSITIVE_TIME(exptime_int));
}
do {
while(key_token->length != 0) {
bool overflow; // not used here.
key = key_token->value;
nkey = key_token->length;
if (nkey > KEY_MAX_LENGTH) {
fail_length = true;
goto stop;
}
it = limited_get(key, nkey, c, exptime, should_touch, DO_UPDATE, &overflow);
if (settings.detail_enabled) {
stats_prefix_record_get(key, nkey, NULL != it);
}
if (it) {
/*
* Construct the response. Each hit adds three elements to the
* outgoing data list:
* "VALUE "
* key
* " " + flags + " " + data length + "\r\n" + data (with \r\n)
*/
{
MEMCACHED_COMMAND_GET(c->sfd, ITEM_key(it), it->nkey,
it->nbytes, ITEM_get_cas(it));
int nbytes = it->nbytes;;
nbytes = it->nbytes;
char *p = resp->wbuf;
memcpy(p, "VALUE ", 6);
p += 6;
memcpy(p, ITEM_key(it), it->nkey);
p += it->nkey;
p += make_ascii_get_suffix(p, it, return_cas, nbytes);
resp_add_iov(resp, resp->wbuf, p - resp->wbuf);
#ifdef EXTSTORE
if (it->it_flags & ITEM_HDR) {
if (_get_extstore(c, it, resp) != 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.get_oom_extstore++;
pthread_mutex_unlock(&c->thread->stats.mutex);
item_remove(it);
goto stop;
}
} else if ((it->it_flags & ITEM_CHUNKED) == 0) {
resp_add_iov(resp, ITEM_data(it), it->nbytes);
} else {
resp_add_chunked_iov(resp, it, it->nbytes);
}
#else
if ((it->it_flags & ITEM_CHUNKED) == 0) {
resp_add_iov(resp, ITEM_data(it), it->nbytes);
} else {
resp_add_chunked_iov(resp, it, it->nbytes);
}
#endif
}
if (settings.verbose > 1) {
int ii;
fprintf(stderr, ">%d sending key ", c->sfd);
for (ii = 0; ii < it->nkey; ++ii) {
fprintf(stderr, "%c", key[ii]);
}
fprintf(stderr, "\n");
}
/* item_get() has incremented it->refcount for us */
pthread_mutex_lock(&c->thread->stats.mutex);
if (should_touch) {
c->thread->stats.touch_cmds++;
c->thread->stats.slab_stats[ITEM_clsid(it)].touch_hits++;
} else {
c->thread->stats.lru_hits[it->slabs_clsid]++;
c->thread->stats.get_cmds++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
#ifdef EXTSTORE
/* If ITEM_HDR, an io_wrap owns the reference. */
if ((it->it_flags & ITEM_HDR) == 0) {
resp->item = it;
}
#else
resp->item = it;
#endif
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
if (should_touch) {
c->thread->stats.touch_cmds++;
c->thread->stats.touch_misses++;
} else {
c->thread->stats.get_misses++;
c->thread->stats.get_cmds++;
}
MEMCACHED_COMMAND_GET(c->sfd, key, nkey, -1, 0);
pthread_mutex_unlock(&c->thread->stats.mutex);
}
key_token++;
if (key_token->length != 0) {
if (!resp_start(c)) {
goto stop;
}
resp = c->resp;
}
}
/*
* If the command string hasn't been fully processed, get the next set
* of tokens.
*/
if (key_token->value != NULL) {
ntokens = tokenize_command(key_token->value, tokens, MAX_TOKENS);
key_token = tokens;
if (!resp_start(c)) {
goto stop;
}
resp = c->resp;
}
} while(key_token->value != NULL);
stop:
if (settings.verbose > 1)
fprintf(stderr, ">%d END\n", c->sfd);
/*
If the loop was terminated because of out-of-memory, it is not
reliable to add END\r\n to the buffer, because it might not end
in \r\n. So we send SERVER_ERROR instead.
*/
if (key_token->value != NULL) {
// Kill any stacked responses we had.
conn_release_items(c);
// Start a new response object for the error message.
if (!resp_start(c)) {
// severe out of memory error.
conn_set_state(c, conn_closing);
return;
}
if (fail_length) {
out_string(c, "CLIENT_ERROR bad command line format");
} else {
out_of_memory(c, "SERVER_ERROR out of memory writing get response");
}
} else {
// Tag the end token onto the most recent response object.
resp_add_iov(resp, "END\r\n", 5);
conn_set_state(c, conn_mwrite);
}
}
// slow snprintf for debugging purposes.
static void process_meta_command(conn *c, token_t *tokens, const size_t ntokens) {
assert(c != NULL);
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
char *key = tokens[KEY_TOKEN].value;
size_t nkey = tokens[KEY_TOKEN].length;
bool overflow; // not used here.
item *it = limited_get(key, nkey, c, 0, false, DONT_UPDATE, &overflow);
if (it) {
mc_resp *resp = c->resp;
size_t total = 0;
size_t ret;
// similar to out_string().
memcpy(resp->wbuf, "ME ", 3);
total += 3;
memcpy(resp->wbuf + total, ITEM_key(it), it->nkey);
total += it->nkey;
resp->wbuf[total] = ' ';
total++;
ret = snprintf(resp->wbuf + total, WRITE_BUFFER_SIZE - (it->nkey + 12),
"exp=%d la=%llu cas=%llu fetch=%s cls=%u size=%lu\r\n",
(it->exptime == 0) ? -1 : (current_time - it->exptime),
(unsigned long long)(current_time - it->time),
(unsigned long long)ITEM_get_cas(it),
(it->it_flags & ITEM_FETCHED) ? "yes" : "no",
ITEM_clsid(it),
(unsigned long) ITEM_ntotal(it));
item_remove(it);
resp->wbytes = total + ret;
resp_add_iov(resp, resp->wbuf, resp->wbytes);
conn_set_state(c, conn_new_cmd);
} else {
out_string(c, "EN");
}
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.meta_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
}
#define MFLAG_MAX_OPT_LENGTH 20
#define MFLAG_MAX_OPAQUE_LENGTH 32
struct _meta_flags {
unsigned int has_error :1; // flipped if we found an error during parsing.
unsigned int no_update :1;
unsigned int locked :1;
unsigned int vivify :1;
unsigned int la :1;
unsigned int hit :1;
unsigned int value :1;
unsigned int set_stale :1;
unsigned int no_reply :1;
unsigned int has_cas :1;
unsigned int new_ttl :1;
char mode; // single character mode switch, common to ms/ma
rel_time_t exptime;
rel_time_t autoviv_exptime;
rel_time_t recache_time;
int32_t value_len;
uint32_t client_flags;
uint64_t req_cas_id;
uint64_t delta; // ma
uint64_t initial; // ma
};
static int _meta_flag_preparse(token_t *tokens, const size_t ntokens,
struct _meta_flags *of, char **errstr) {
unsigned int i;
int32_t tmp_int;
uint8_t seen[127] = {0};
// Start just past the key token. Look at first character of each token.
for (i = KEY_TOKEN+1; i < ntokens-1; i++) {
uint8_t o = (uint8_t)tokens[i].value[0];
// zero out repeat flags so we don't over-parse for return data.
if (o >= 127 || seen[o] != 0) {
*errstr = "CLIENT_ERROR duplicate flag";
return -1;
}
seen[o] = 1;
switch (o) {
/* Negative exptimes can underflow and end up immortal. realtime() will
immediately expire values that are greater than REALTIME_MAXDELTA, but less
than process_started, so lets aim for that. */
case 'N':
of->locked = 1;
of->vivify = 1;
if (!safe_strtol(tokens[i].value+1, &tmp_int)) {
*errstr = "CLIENT_ERROR bad token in command line format";
of->has_error = 1;
} else {
of->autoviv_exptime = realtime(EXPTIME_TO_POSITIVE_TIME(tmp_int));
}
break;
case 'T':
of->locked = 1;
if (!safe_strtol(tokens[i].value+1, &tmp_int)) {
*errstr = "CLIENT_ERROR bad token in command line format";
of->has_error = 1;
} else {
of->exptime = realtime(EXPTIME_TO_POSITIVE_TIME(tmp_int));
of->new_ttl = true;
}
break;
case 'R':
of->locked = 1;
if (!safe_strtol(tokens[i].value+1, &tmp_int)) {
*errstr = "CLIENT_ERROR bad token in command line format";
of->has_error = 1;
} else {
of->recache_time = realtime(EXPTIME_TO_POSITIVE_TIME(tmp_int));
}
break;
case 'l':
of->la = 1;
of->locked = 1; // need locked to delay LRU bump
break;
case 'O':
break;
case 'k': // known but no special handling
case 's':
case 't':
case 'c':
case 'f':
break;
case 'v':
of->value = 1;
break;
case 'h':
of->locked = 1; // need locked to delay LRU bump
break;
case 'u':
of->no_update = 1;
break;
case 'q':
of->no_reply = 1;
break;
// mset-related.
case 'F':
if (!safe_strtoul(tokens[i].value+1, &of->client_flags)) {
of->has_error = true;
}
break;
case 'S':
if (!safe_strtol(tokens[i].value+1, &tmp_int)) {
of->has_error = true;
} else {
// Size is adjusted for underflow or overflow once the
// \r\n terminator is added.
if (tmp_int < 0 || tmp_int > (INT_MAX - 2)) {
*errstr = "CLIENT_ERROR invalid length";
of->has_error = true;
} else {
of->value_len = tmp_int + 2; // \r\n
}
}
break;
case 'C': // mset, mdelete, marithmetic
if (!safe_strtoull(tokens[i].value+1, &of->req_cas_id)) {
*errstr = "CLIENT_ERROR bad token in command line format";
of->has_error = true;
} else {
of->has_cas = true;
}
break;
case 'M': // mset and marithmetic mode switch
if (tokens[i].length != 2) {
*errstr = "CLIENT_ERROR incorrect length for M token";
of->has_error = 1;
} else {
of->mode = tokens[i].value[1];
}
break;
case 'J': // marithmetic initial value
if (!safe_strtoull(tokens[i].value+1, &of->initial)) {
*errstr = "CLIENT_ERROR invalid numeric initial value";
of->has_error = 1;
}
break;
case 'D': // marithmetic delta value
if (!safe_strtoull(tokens[i].value+1, &of->delta)) {
*errstr = "CLIENT_ERROR invalid numeric delta value";
of->has_error = 1;
}
break;
case 'I':
of->set_stale = 1;
break;
default: // unknown flag, bail.
*errstr = "CLIENT_ERROR invalid flag";
return -1;
}
}
return of->has_error ? -1 : 0;
}
#define META_SPACE(p) { \
*p = ' '; \
p++; \
}
#define META_CHAR(p, c) { \
*p = ' '; \
*(p+1) = c; \
p += 2; \
}
static void process_mget_command(conn *c, token_t *tokens, const size_t ntokens) {
char *key;
size_t nkey;
item *it;
unsigned int i = 0;
struct _meta_flags of = {0}; // option bitflags.
uint32_t hv; // cached hash value for unlocking an item.
bool failed = false;
bool item_created = false;
bool won_token = false;
bool ttl_set = false;
char *errstr;
mc_resp *resp = c->resp;
char *p = resp->wbuf;
assert(c != NULL);
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_errstring(c, "CLIENT_ERROR bad command line format");
return;
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
// NOTE: final token has length == 0.
// KEY_TOKEN == 1. 0 is command.
if (ntokens == 3) {
// TODO: any way to fix this?
out_errstring(c, "CLIENT_ERROR bad command line format");
return;
} else if (ntokens > MFLAG_MAX_OPT_LENGTH) {
// TODO: ensure the command tokenizer gives us at least this many
out_errstring(c, "CLIENT_ERROR options flags are too long");
return;
}
// scrubs duplicated options and sets flags for how to load the item.
if (_meta_flag_preparse(tokens, ntokens, &of, &errstr) != 0) {
out_errstring(c, errstr);
return;
}
c->noreply = of.no_reply;
// TODO: need to indicate if the item was overflowed or not?
// I think we do, since an overflow shouldn't trigger an alloc/replace.
bool overflow = false;
if (!of.locked) {
it = limited_get(key, nkey, c, 0, false, !of.no_update, &overflow);
} else {
// If we had to lock the item, we're doing our own bump later.
it = limited_get_locked(key, nkey, c, DONT_UPDATE, &hv, &overflow);
}
// Since we're a new protocol, we can actually inform users that refcount
// overflow is happening by straight up throwing an error.
// We definitely don't want to re-autovivify by accident.
if (overflow) {
assert(it == NULL);
out_errstring(c, "SERVER_ERROR refcount overflow during fetch");
return;
}
if (it == NULL && of.vivify) {
// Fill in the exptime during parsing later.
it = item_alloc(key, nkey, 0, realtime(0), 2);
// We don't actually need any of do_store_item's logic:
// - already fetched and missed an existing item.
// - lock is still held.
// - not append/prepend/replace
// - not testing CAS
if (it != NULL) {
// I look forward to the day I get rid of this :)
memcpy(ITEM_data(it), "\r\n", 2);
// NOTE: This initializes the CAS value.
do_item_link(it, hv);
item_created = true;
}
}
// don't have to check result of add_iov() since the iov size defaults are
// enough.
if (it) {
if (of.value) {
memcpy(p, "VA ", 3);
p = itoa_u32(it->nbytes-2, p+3);
} else {
memcpy(p, "OK", 2);
p += 2;
}
for (i = KEY_TOKEN+1; i < ntokens-1; i++) {
switch (tokens[i].value[0]) {
case 'T':
ttl_set = true;
it->exptime = of.exptime;
break;
case 'N':
if (item_created) {
it->exptime = of.autoviv_exptime;
won_token = true;
}
break;
case 'R':
// If we haven't autovivified and supplied token is less
// than current TTL, mark a win.
if ((it->it_flags & ITEM_TOKEN_SENT) == 0
&& !item_created
&& it->exptime != 0
&& it->exptime < of.recache_time) {
won_token = true;
}
break;
case 's':
META_CHAR(p, 's');
p = itoa_u32(it->nbytes-2, p);
break;
case 't':
// TTL remaining as of this request.
// needs to be relative because server clocks may not be in sync.
META_CHAR(p, 't');
if (it->exptime == 0) {
*p = '-';
*(p+1) = '1';
p += 2;
} else {
p = itoa_u32(it->exptime - current_time, p);
}
break;
case 'c':
META_CHAR(p, 'c');
p = itoa_u64(ITEM_get_cas(it), p);
break;
case 'f':
META_CHAR(p, 'f');
if (FLAGS_SIZE(it) == 0) {
*p = '0';
p++;
} else {
p = itoa_u32(*((uint32_t *) ITEM_suffix(it)), p);
}
break;
case 'l':
META_CHAR(p, 'l');
p = itoa_u32(current_time - it->time, p);
break;
case 'h':
META_CHAR(p, 'h');
if (it->it_flags & ITEM_FETCHED) {
*p = '1';
} else {
*p = '0';
}
p++;
break;
case 'O':
if (tokens[i].length > MFLAG_MAX_OPAQUE_LENGTH) {
errstr = "CLIENT_ERROR opaque token too long";
goto error;
}
META_SPACE(p);
memcpy(p, tokens[i].value, tokens[i].length);
p += tokens[i].length;
break;
case 'k':
META_CHAR(p, 'k');
memcpy(p, ITEM_key(it), it->nkey);
p += it->nkey;
break;
}
}
// Has this item already sent a token?
// Important to do this here so we don't send W with Z.
// Isn't critical, but easier for client authors to understand.
if (it->it_flags & ITEM_TOKEN_SENT) {
META_CHAR(p, 'Z');
}
if (it->it_flags & ITEM_STALE) {
META_CHAR(p, 'X');
// FIXME: think hard about this. is this a default, or a flag?
if ((it->it_flags & ITEM_TOKEN_SENT) == 0) {
// If we're stale but no token already sent, now send one.
won_token = true;
}
}
if (won_token) {
// Mark a win into the flag buffer.
META_CHAR(p, 'W');
it->it_flags |= ITEM_TOKEN_SENT;
}
*p = '\r';
*(p+1) = '\n';
*(p+2) = '\0';
p += 2;
// finally, chain in the buffer.
resp_add_iov(resp, resp->wbuf, p - resp->wbuf);
if (of.value) {
#ifdef EXTSTORE
if (it->it_flags & ITEM_HDR) {
if (_get_extstore(c, it, resp) != 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.get_oom_extstore++;
pthread_mutex_unlock(&c->thread->stats.mutex);
failed = true;
}
} else if ((it->it_flags & ITEM_CHUNKED) == 0) {
resp_add_iov(resp, ITEM_data(it), it->nbytes);
} else {
resp_add_chunked_iov(resp, it, it->nbytes);
}
#else
if ((it->it_flags & ITEM_CHUNKED) == 0) {
resp_add_iov(resp, ITEM_data(it), it->nbytes);
} else {
resp_add_chunked_iov(resp, it, it->nbytes);
}
#endif
}
// need to hold the ref at least because of the key above.
#ifdef EXTSTORE
if (!failed) {
if ((it->it_flags & ITEM_HDR) != 0 && of.value) {
// Only have extstore clean if header and returning value.
resp->item = NULL;
} else {
resp->item = it;
}
} else {
// Failed to set up extstore fetch.
if (of.locked) {
do_item_remove(it);
} else {
item_remove(it);
}
}
#else
resp->item = it;
#endif
} else {
failed = true;
}
if (of.locked) {
// Delayed bump so we could get fetched/last access time pre-update.
if (!of.no_update && it != NULL) {
do_item_bump(c, it, hv);
}
item_unlock(hv);
}
// we count this command as a normal one if we've gotten this far.
// TODO: for autovivify case, miss never happens. Is this okay?
if (!failed) {
pthread_mutex_lock(&c->thread->stats.mutex);
if (ttl_set) {
c->thread->stats.touch_cmds++;
c->thread->stats.slab_stats[ITEM_clsid(it)].touch_hits++;
} else {
c->thread->stats.lru_hits[it->slabs_clsid]++;
c->thread->stats.get_cmds++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
conn_set_state(c, conn_new_cmd);
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
if (ttl_set) {
c->thread->stats.touch_cmds++;
c->thread->stats.touch_misses++;
} else {
c->thread->stats.get_misses++;
c->thread->stats.get_cmds++;
}
MEMCACHED_COMMAND_GET(c->sfd, key, nkey, -1, 0);
pthread_mutex_unlock(&c->thread->stats.mutex);
// This gets elided in noreply mode.
out_string(c, "EN");
}
return;
error:
if (it) {
do_item_remove(it);
if (of.locked) {
item_unlock(hv);
}
}
out_errstring(c, errstr);
}
static void process_mset_command(conn *c, token_t *tokens, const size_t ntokens) {
char *key;
size_t nkey;
item *it;
int i;
short comm = NREAD_SET;
struct _meta_flags of = {0}; // option bitflags.
char *errstr = "CLIENT_ERROR bad command line format";
uint32_t hv;
mc_resp *resp = c->resp;
char *p = resp->wbuf;
assert(c != NULL);
// TODO: most of this is identical to mget.
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_errstring(c, "CLIENT_ERROR bad command line format");
return;
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
if (ntokens == 3) {
out_errstring(c, "CLIENT_ERROR bad command line format");
return;
}
if (ntokens > MFLAG_MAX_OPT_LENGTH) {
out_errstring(c, "CLIENT_ERROR options flags too long");
return;
}
// leave space for the status code.
p = resp->wbuf + 3;
// We need to at least try to get the size to properly slurp bad bytes
// after an error.
if (_meta_flag_preparse(tokens, ntokens, &of, &errstr) != 0) {
goto error;
}
// Set noreply after tokens are understood.
c->noreply = of.no_reply;
bool has_error = false;
for (i = KEY_TOKEN+1; i < ntokens-1; i++) {
switch (tokens[i].value[0]) {
// TODO: macro perhaps?
case 'O':
if (tokens[i].length > MFLAG_MAX_OPAQUE_LENGTH) {
errstr = "CLIENT_ERROR opaque token too long";
has_error = true;
break;
}
META_SPACE(p);
memcpy(p, tokens[i].value, tokens[i].length);
p += tokens[i].length;
break;
case 'k':
META_CHAR(p, 'k');
memcpy(p, key, nkey);
p += nkey;
break;
}
}
// "mode switch" to alternative commands
switch (of.mode) {
case 0:
break; // no mode supplied.
case 'E': // Add...
comm = NREAD_ADD;
break;
case 'A': // Append.
comm = NREAD_APPEND;
break;
case 'P': // Prepend.
comm = NREAD_PREPEND;
break;
case 'R': // Replace.
comm = NREAD_REPLACE;
break;
case 'S': // Set. Default.
comm = NREAD_SET;
break;
default:
errstr = "CLIENT_ERROR invalid mode for ms M token";
goto error;
}
// The item storage function doesn't exactly map to mset.
// If a CAS value is supplied, upgrade default SET mode to CAS mode.
// Also allows REPLACE to work, as REPLACE + CAS works the same as CAS.
// add-with-cas works the same as add; but could only LRU bump if match..
// APPEND/PREPEND allow a simplified CAS check.
if (of.has_cas && (comm == NREAD_SET || comm == NREAD_REPLACE)) {
comm = NREAD_CAS;
}
// We attempt to process as much as we can in hopes of getting a valid and
// adjusted vlen, or else the data swallowed after error will be for 0b.
if (has_error)
goto error;
it = item_alloc(key, nkey, of.client_flags, of.exptime, of.value_len);
if (it == 0) {
enum store_item_type status;
// TODO: These could be normalized codes (TL and OM). Need to
// reorganize the output stuff a bit though.
if (! item_size_ok(nkey, of.client_flags, of.value_len)) {
errstr = "SERVER_ERROR object too large for cache";
status = TOO_LARGE;
} else {
errstr = "SERVER_ERROR out of memory storing object";
status = NO_MEMORY;
}
// FIXME: LOGGER_LOG specific to mset, include options.
LOGGER_LOG(c->thread->l, LOG_MUTATIONS, LOGGER_ITEM_STORE,
NULL, status, comm, key, nkey, 0, 0);
/* Avoid stale data persisting in cache because we failed alloc. */
// NOTE: only if SET mode?
it = item_get_locked(key, nkey, c, DONT_UPDATE, &hv);
if (it) {
do_item_unlink(it, hv);
STORAGE_delete(c->thread->storage, it);
do_item_remove(it);
}
item_unlock(hv);
goto error;
}
ITEM_set_cas(it, of.req_cas_id);
c->item = it;
#ifdef NEED_ALIGN
if (it->it_flags & ITEM_CHUNKED) {
c->ritem = ITEM_schunk(it);
} else {
c->ritem = ITEM_data(it);
}
#else
c->ritem = ITEM_data(it);
#endif
c->rlbytes = it->nbytes;
c->cmd = comm;
if (of.set_stale && comm == NREAD_CAS) {
c->set_stale = true;
}
resp->wbytes = p - resp->wbuf;
memcpy(resp->wbuf + resp->wbytes, "\r\n", 2);
resp->wbytes += 2;
// We've written the status line into wbuf, use wbytes to finalize later.
resp_add_iov(resp, resp->wbuf, resp->wbytes);
c->mset_res = true;
conn_set_state(c, conn_nread);
return;
error:
/* swallow the data line */
c->sbytes = of.value_len;
// Note: no errors possible after the item was successfully allocated.
// So we're just looking at dumping error codes and returning.
out_errstring(c, errstr);
// TODO: pass state in? else switching twice meh.
conn_set_state(c, conn_swallow);
}
static void process_mdelete_command(conn *c, token_t *tokens, const size_t ntokens) {
char *key;
size_t nkey;
uint64_t req_cas_id = 0;
item *it = NULL;
int i;
uint32_t hv;
struct _meta_flags of = {0}; // option bitflags.
char *errstr = "CLIENT_ERROR bad command line format";
mc_resp *resp = c->resp;
// reserve 3 bytes for status code
char *p = resp->wbuf + 3;
assert(c != NULL);
// TODO: most of this is identical to mget.
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
if (ntokens > MFLAG_MAX_OPT_LENGTH) {
out_string(c, "CLIENT_ERROR options flags too long");
return;
}
// scrubs duplicated options and sets flags for how to load the item.
if (_meta_flag_preparse(tokens, ntokens, &of, &errstr) != 0) {
out_errstring(c, "CLIENT_ERROR invalid or duplicate flag");
return;
}
c->noreply = of.no_reply;
assert(c != NULL);
for (i = KEY_TOKEN+1; i < ntokens-1; i++) {
switch (tokens[i].value[0]) {
// TODO: macro perhaps?
case 'O':
if (tokens[i].length > MFLAG_MAX_OPAQUE_LENGTH) {
errstr = "CLIENT_ERROR opaque token too long";
goto error;
}
META_SPACE(p);
memcpy(p, tokens[i].value, tokens[i].length);
p += tokens[i].length;
break;
case 'k':
META_CHAR(p, 'k');
memcpy(p, key, nkey);
p += nkey;
break;
}
}
it = item_get_locked(key, nkey, c, DONT_UPDATE, &hv);
if (it) {
MEMCACHED_COMMAND_DELETE(c->sfd, ITEM_key(it), it->nkey);
// allow only deleting/marking if a CAS value matches.
if (of.has_cas && ITEM_get_cas(it) != req_cas_id) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.delete_misses++;
pthread_mutex_unlock(&c->thread->stats.mutex);
memcpy(resp->wbuf, "EX ", 3);
goto cleanup;
}
// If we're to set this item as stale, we don't actually want to
// delete it. We mark the stale bit, bump CAS, and update exptime if
// we were supplied a new TTL.
if (of.set_stale) {
if (of.new_ttl) {
it->exptime = of.exptime;
}
it->it_flags |= ITEM_STALE;
// Also need to remove TOKEN_SENT, so next client can win.
it->it_flags &= ~ITEM_TOKEN_SENT;
ITEM_set_cas(it, (settings.use_cas) ? get_cas_id() : 0);
// Clients can noreply nominal responses.
if (c->noreply)
resp->skip = true;
memcpy(resp->wbuf, "OK ", 3);
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(it)].delete_hits++;
pthread_mutex_unlock(&c->thread->stats.mutex);
do_item_unlink(it, hv);
STORAGE_delete(c->thread->storage, it);
if (c->noreply)
resp->skip = true;
memcpy(resp->wbuf, "OK ", 3);
}
goto cleanup;
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.delete_misses++;
pthread_mutex_unlock(&c->thread->stats.mutex);
memcpy(resp->wbuf, "NF ", 3);
goto cleanup;
}
cleanup:
if (it) {
do_item_remove(it);
}
// Item is always returned locked, even if missing.
item_unlock(hv);
resp->wbytes = p - resp->wbuf;
memcpy(resp->wbuf + resp->wbytes, "\r\n", 2);
resp->wbytes += 2;
resp_add_iov(resp, resp->wbuf, resp->wbytes);
conn_set_state(c, conn_new_cmd);
return;
error:
out_errstring(c, errstr);
}
static void process_marithmetic_command(conn *c, token_t *tokens, const size_t ntokens) {
char *key;
size_t nkey;
int i;
struct _meta_flags of = {0}; // option bitflags.
char *errstr = "CLIENT_ERROR bad command line format";
mc_resp *resp = c->resp;
// no reservation (like del/set) since we post-process the status line.
char *p = resp->wbuf;
// If no argument supplied, incr or decr by one.
of.delta = 1;
of.initial = 0; // redundant, for clarity.
bool incr = true; // default mode is to increment.
bool locked = false;
uint32_t hv = 0;
item *it = NULL; // item returned by do_add_delta.
assert(c != NULL);
// TODO: most of this is identical to mget.
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
if (ntokens > MFLAG_MAX_OPT_LENGTH) {
out_string(c, "CLIENT_ERROR options flags too long");
return;
}
// scrubs duplicated options and sets flags for how to load the item.
if (_meta_flag_preparse(tokens, ntokens, &of, &errstr) != 0) {
out_errstring(c, "CLIENT_ERROR invalid or duplicate flag");
return;
}
c->noreply = of.no_reply;
assert(c != NULL);
// "mode switch" to alternative commands
switch (of.mode) {
case 0: // no switch supplied.
break;
case 'I': // Incr (default)
case '+':
incr = true;
break;
case 'D': // Decr.
case '-':
incr = false;
break;
default:
errstr = "CLIENT_ERROR invalid mode for ma M token";
goto error;
break;
}
// take hash value and manually lock item... hold lock during store phase
// on miss and avoid recalculating the hash multiple times.
hv = hash(key, nkey);
item_lock(hv);
locked = true;
char tmpbuf[INCR_MAX_STORAGE_LEN];
// return a referenced item if it exists, so we can modify it here, rather
// than adding even more parameters to do_add_delta.
bool item_created = false;
switch(do_add_delta(c, key, nkey, incr, of.delta, tmpbuf, &of.req_cas_id, hv, &it)) {
case OK:
if (c->noreply)
resp->skip = true;
memcpy(resp->wbuf, "OK ", 3);
break;
case NON_NUMERIC:
errstr = "CLIENT_ERROR cannot increment or decrement non-numeric value";
goto error;
break;
case EOM:
errstr = "SERVER_ERROR out of memory";
goto error;
break;
case DELTA_ITEM_NOT_FOUND:
if (of.vivify) {
itoa_u64(of.initial, tmpbuf);
int vlen = strlen(tmpbuf);
it = item_alloc(key, nkey, 0, 0, vlen+2);
if (it != NULL) {
memcpy(ITEM_data(it), tmpbuf, vlen);
memcpy(ITEM_data(it) + vlen, "\r\n", 2);
if (do_store_item(it, NREAD_ADD, c, hv)) {
item_created = true;
} else {
// Not sure how we can get here if we're holding the lock.
memcpy(resp->wbuf, "NS ", 3);
}
} else {
errstr = "SERVER_ERROR Out of memory allocating new item";
goto error;
}
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
if (incr) {
c->thread->stats.incr_misses++;
} else {
c->thread->stats.decr_misses++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
// won't have a valid it here.
memcpy(p, "NF ", 3);
p += 3;
}
break;
case DELTA_ITEM_CAS_MISMATCH:
// also returns without a valid it.
memcpy(p, "EX ", 3);
p += 3;
break;
}
// final loop
// allows building the response with information after vivifying from a
// miss, or returning a new CAS value after add_delta().
if (it) {
size_t vlen = strlen(tmpbuf);
if (of.value) {
memcpy(p, "VA ", 3);
p = itoa_u32(vlen, p+3);
} else {
memcpy(p, "OK", 2);
p += 2;
}
for (i = KEY_TOKEN+1; i < ntokens-1; i++) {
switch (tokens[i].value[0]) {
case 'c':
META_CHAR(p, 'c');
p = itoa_u64(ITEM_get_cas(it), p);
break;
case 't':
META_CHAR(p, 't');
if (it->exptime == 0) {
*p = '-';
*(p+1) = '1';
p += 2;
} else {
p = itoa_u32(it->exptime - current_time, p);
}
break;
case 'T':
it->exptime = of.exptime;
break;
case 'N':
if (item_created) {
it->exptime = of.autoviv_exptime;
}
break;
// TODO: macro perhaps?
case 'O':
if (tokens[i].length > MFLAG_MAX_OPAQUE_LENGTH) {
errstr = "CLIENT_ERROR opaque token too long";
goto error;
}
META_SPACE(p);
memcpy(p, tokens[i].value, tokens[i].length);
p += tokens[i].length;
break;
case 'k':
META_CHAR(p, 'k');
memcpy(p, key, nkey);
p += nkey;
break;
}
}
if (of.value) {
*p = '\r';
*(p+1) = '\n';
p += 2;
memcpy(p, tmpbuf, vlen);
p += vlen;
}
do_item_remove(it);
} else {
// No item to handle. still need to return opaque/key tokens
for (i = KEY_TOKEN+1; i < ntokens-1; i++) {
switch (tokens[i].value[0]) {
// TODO: macro perhaps?
case 'O':
if (tokens[i].length > MFLAG_MAX_OPAQUE_LENGTH) {
errstr = "CLIENT_ERROR opaque token too long";
goto error;
}
META_SPACE(p);
memcpy(p, tokens[i].value, tokens[i].length);
p += tokens[i].length;
break;
case 'k':
META_CHAR(p, 'k');
memcpy(p, key, nkey);
p += nkey;
break;
}
}
}
item_unlock(hv);
resp->wbytes = p - resp->wbuf;
memcpy(resp->wbuf + resp->wbytes, "\r\n", 2);
resp->wbytes += 2;
resp_add_iov(resp, resp->wbuf, resp->wbytes);
conn_set_state(c, conn_new_cmd);
return;
error:
if (it != NULL)
do_item_remove(it);
if (locked)
item_unlock(hv);
out_errstring(c, errstr);
}
static void process_update_command(conn *c, token_t *tokens, const size_t ntokens, int comm, bool handle_cas) {
char *key;
size_t nkey;
unsigned int flags;
int32_t exptime_int = 0;
rel_time_t exptime = 0;
int vlen;
uint64_t req_cas_id=0;
item *it;
assert(c != NULL);
set_noreply_maybe(c, tokens, ntokens);
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
if (! (safe_strtoul(tokens[2].value, (uint32_t *)&flags)
&& safe_strtol(tokens[3].value, &exptime_int)
&& safe_strtol(tokens[4].value, (int32_t *)&vlen))) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
exptime = realtime(EXPTIME_TO_POSITIVE_TIME(exptime_int));
// does cas value exist?
if (handle_cas) {
if (!safe_strtoull(tokens[5].value, &req_cas_id)) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
}
if (vlen < 0 || vlen > (INT_MAX - 2)) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
vlen += 2;
if (settings.detail_enabled) {
stats_prefix_record_set(key, nkey);
}
it = item_alloc(key, nkey, flags, exptime, vlen);
if (it == 0) {
enum store_item_type status;
if (! item_size_ok(nkey, flags, vlen)) {
out_string(c, "SERVER_ERROR object too large for cache");
status = TOO_LARGE;
} else {
out_of_memory(c, "SERVER_ERROR out of memory storing object");
status = NO_MEMORY;
}
LOGGER_LOG(c->thread->l, LOG_MUTATIONS, LOGGER_ITEM_STORE,
NULL, status, comm, key, nkey, 0, 0, c->sfd);
/* swallow the data line */
conn_set_state(c, conn_swallow);
c->sbytes = vlen;
/* Avoid stale data persisting in cache because we failed alloc.
* Unacceptable for SET. Anywhere else too? */
if (comm == NREAD_SET) {
it = item_get(key, nkey, c, DONT_UPDATE);
if (it) {
item_unlink(it);
STORAGE_delete(c->thread->storage, it);
item_remove(it);
}
}
return;
}
ITEM_set_cas(it, req_cas_id);
c->item = it;
#ifdef NEED_ALIGN
if (it->it_flags & ITEM_CHUNKED) {
c->ritem = ITEM_schunk(it);
} else {
c->ritem = ITEM_data(it);
}
#else
c->ritem = ITEM_data(it);
#endif
c->rlbytes = it->nbytes;
c->cmd = comm;
conn_set_state(c, conn_nread);
}
static void process_touch_command(conn *c, token_t *tokens, const size_t ntokens) {
char *key;
size_t nkey;
int32_t exptime_int = 0;
rel_time_t exptime = 0;
item *it;
assert(c != NULL);
set_noreply_maybe(c, tokens, ntokens);
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
if (!safe_strtol(tokens[2].value, &exptime_int)) {
out_string(c, "CLIENT_ERROR invalid exptime argument");
return;
}
exptime = realtime(EXPTIME_TO_POSITIVE_TIME(exptime_int));
it = item_touch(key, nkey, exptime, c);
if (it) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.touch_cmds++;
c->thread->stats.slab_stats[ITEM_clsid(it)].touch_hits++;
pthread_mutex_unlock(&c->thread->stats.mutex);
out_string(c, "TOUCHED");
item_remove(it);
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.touch_cmds++;
c->thread->stats.touch_misses++;
pthread_mutex_unlock(&c->thread->stats.mutex);
out_string(c, "NOT_FOUND");
}
}
static void process_arithmetic_command(conn *c, token_t *tokens, const size_t ntokens, const bool incr) {
char temp[INCR_MAX_STORAGE_LEN];
uint64_t delta;
char *key;
size_t nkey;
assert(c != NULL);
set_noreply_maybe(c, tokens, ntokens);
if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
if (!safe_strtoull(tokens[2].value, &delta)) {
out_string(c, "CLIENT_ERROR invalid numeric delta argument");
return;
}
switch(add_delta(c, key, nkey, incr, delta, temp, NULL)) {
case OK:
out_string(c, temp);
break;
case NON_NUMERIC:
out_string(c, "CLIENT_ERROR cannot increment or decrement non-numeric value");
break;
case EOM:
out_of_memory(c, "SERVER_ERROR out of memory");
break;
case DELTA_ITEM_NOT_FOUND:
pthread_mutex_lock(&c->thread->stats.mutex);
if (incr) {
c->thread->stats.incr_misses++;
} else {
c->thread->stats.decr_misses++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
out_string(c, "NOT_FOUND");
break;
case DELTA_ITEM_CAS_MISMATCH:
break; /* Should never get here */
}
}
/*
* adds a delta value to a numeric item.
*
* c connection requesting the operation
* it item to adjust
* incr true to increment value, false to decrement
* delta amount to adjust value by
* buf buffer for response string
*
* returns a response string to send back to the client.
*/
enum delta_result_type do_add_delta(conn *c, const char *key, const size_t nkey,
const bool incr, const int64_t delta,
char *buf, uint64_t *cas,
const uint32_t hv,
item **it_ret) {
char *ptr;
uint64_t value;
int res;
item *it;
it = do_item_get(key, nkey, hv, c, DONT_UPDATE);
if (!it) {
return DELTA_ITEM_NOT_FOUND;
}
/* Can't delta zero byte values. 2-byte are the "\r\n" */
/* Also can't delta for chunked items. Too large to be a number */
#ifdef EXTSTORE
if (it->nbytes <= 2 || (it->it_flags & (ITEM_CHUNKED|ITEM_HDR)) != 0) {
#else
if (it->nbytes <= 2 || (it->it_flags & (ITEM_CHUNKED)) != 0) {
#endif
do_item_remove(it);
return NON_NUMERIC;
}
if (cas != NULL && *cas != 0 && ITEM_get_cas(it) != *cas) {
do_item_remove(it);
return DELTA_ITEM_CAS_MISMATCH;
}
ptr = ITEM_data(it);
if (!safe_strtoull(ptr, &value)) {
do_item_remove(it);
return NON_NUMERIC;
}
if (incr) {
value += delta;
MEMCACHED_COMMAND_INCR(c->sfd, ITEM_key(it), it->nkey, value);
} else {
if(delta > value) {
value = 0;
} else {
value -= delta;
}
MEMCACHED_COMMAND_DECR(c->sfd, ITEM_key(it), it->nkey, value);
}
pthread_mutex_lock(&c->thread->stats.mutex);
if (incr) {
c->thread->stats.slab_stats[ITEM_clsid(it)].incr_hits++;
} else {
c->thread->stats.slab_stats[ITEM_clsid(it)].decr_hits++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
itoa_u64(value, buf);
res = strlen(buf);
/* refcount == 2 means we are the only ones holding the item, and it is
* linked. We hold the item's lock in this function, so refcount cannot
* increase. */
if (res + 2 <= it->nbytes && it->refcount == 2) { /* replace in-place */
/* When changing the value without replacing the item, we
need to update the CAS on the existing item. */
/* We also need to fiddle it in the sizes tracker in case the tracking
* was enabled at runtime, since it relies on the CAS value to know
* whether to remove an item or not. */
item_stats_sizes_remove(it);
ITEM_set_cas(it, (settings.use_cas) ? get_cas_id() : 0);
item_stats_sizes_add(it);
memcpy(ITEM_data(it), buf, res);
memset(ITEM_data(it) + res, ' ', it->nbytes - res - 2);
do_item_update(it);
} else if (it->refcount > 1) {
item *new_it;
uint32_t flags;
FLAGS_CONV(it, flags);
new_it = do_item_alloc(ITEM_key(it), it->nkey, flags, it->exptime, res + 2);
if (new_it == 0) {
do_item_remove(it);
return EOM;
}
memcpy(ITEM_data(new_it), buf, res);
memcpy(ITEM_data(new_it) + res, "\r\n", 2);
item_replace(it, new_it, hv);
// Overwrite the older item's CAS with our new CAS since we're
// returning the CAS of the old item below.
ITEM_set_cas(it, (settings.use_cas) ? ITEM_get_cas(new_it) : 0);
do_item_remove(new_it); /* release our reference */
} else {
/* Should never get here. This means we somehow fetched an unlinked
* item. TODO: Add a counter? */
if (settings.verbose) {
fprintf(stderr, "Tried to do incr/decr on invalid item\n");
}
if (it->refcount == 1)
do_item_remove(it);
return DELTA_ITEM_NOT_FOUND;
}
if (cas) {
*cas = ITEM_get_cas(it); /* swap the incoming CAS value */
}
if (it_ret != NULL) {
*it_ret = it;
} else {
do_item_remove(it); /* release our reference */
}
return OK;
}
static void process_delete_command(conn *c, token_t *tokens, const size_t ntokens) {
char *key;
size_t nkey;
item *it;
uint32_t hv;
assert(c != NULL);
if (ntokens > 3) {
bool hold_is_zero = strcmp(tokens[KEY_TOKEN+1].value, "0") == 0;
bool sets_noreply = set_noreply_maybe(c, tokens, ntokens);
bool valid = (ntokens == 4 && (hold_is_zero || sets_noreply))
|| (ntokens == 5 && hold_is_zero && sets_noreply);
if (!valid) {
out_string(c, "CLIENT_ERROR bad command line format. "
"Usage: delete <key> [noreply]");
return;
}
}
key = tokens[KEY_TOKEN].value;
nkey = tokens[KEY_TOKEN].length;
if(nkey > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
if (settings.detail_enabled) {
stats_prefix_record_delete(key, nkey);
}
it = item_get_locked(key, nkey, c, DONT_UPDATE, &hv);
if (it) {
MEMCACHED_COMMAND_DELETE(c->sfd, ITEM_key(it), it->nkey);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(it)].delete_hits++;
pthread_mutex_unlock(&c->thread->stats.mutex);
do_item_unlink(it, hv);
STORAGE_delete(c->thread->storage, it);
do_item_remove(it); /* release our reference */
out_string(c, "DELETED");
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.delete_misses++;
pthread_mutex_unlock(&c->thread->stats.mutex);
out_string(c, "NOT_FOUND");
}
item_unlock(hv);
}
static void process_verbosity_command(conn *c, token_t *tokens, const size_t ntokens) {
unsigned int level;
assert(c != NULL);
set_noreply_maybe(c, tokens, ntokens);
level = strtoul(tokens[1].value, NULL, 10);
settings.verbose = level > MAX_VERBOSITY_LEVEL ? MAX_VERBOSITY_LEVEL : level;
out_string(c, "OK");
return;
}
#ifdef MEMCACHED_DEBUG
static void process_misbehave_command(conn *c) {
int allowed = 0;
// try opening new TCP socket
int i = socket(AF_INET, SOCK_STREAM, 0);
if (i != -1) {
allowed++;
close(i);
}
// try executing new commands
i = system("sleep 0");
if (i != -1) {
allowed++;
}
if (allowed) {
out_string(c, "ERROR");
} else {
out_string(c, "OK");
}
}
#endif
static void process_slabs_automove_command(conn *c, token_t *tokens, const size_t ntokens) {
unsigned int level;
double ratio;
assert(c != NULL);
set_noreply_maybe(c, tokens, ntokens);
if (strcmp(tokens[2].value, "ratio") == 0) {
if (ntokens < 5 || !safe_strtod(tokens[3].value, &ratio)) {
out_string(c, "ERROR");
return;
}
settings.slab_automove_ratio = ratio;
} else {
level = strtoul(tokens[2].value, NULL, 10);
if (level == 0) {
settings.slab_automove = 0;
} else if (level == 1 || level == 2) {
settings.slab_automove = level;
} else {
out_string(c, "ERROR");
return;
}
}
out_string(c, "OK");
return;
}
/* TODO: decide on syntax for sampling? */
static void process_watch_command(conn *c, token_t *tokens, const size_t ntokens) {
uint16_t f = 0;
int x;
assert(c != NULL);
set_noreply_maybe(c, tokens, ntokens);
if (!settings.watch_enabled) {
out_string(c, "CLIENT_ERROR watch commands not allowed");
return;
}
if (ntokens > 2) {
for (x = COMMAND_TOKEN + 1; x < ntokens - 1; x++) {
if ((strcmp(tokens[x].value, "rawcmds") == 0)) {
f |= LOG_RAWCMDS;
} else if ((strcmp(tokens[x].value, "evictions") == 0)) {
f |= LOG_EVICTIONS;
} else if ((strcmp(tokens[x].value, "fetchers") == 0)) {
f |= LOG_FETCHERS;
} else if ((strcmp(tokens[x].value, "mutations") == 0)) {
f |= LOG_MUTATIONS;
} else if ((strcmp(tokens[x].value, "sysevents") == 0)) {
f |= LOG_SYSEVENTS;
} else {
out_string(c, "ERROR");
return;
}
}
} else {
f |= LOG_FETCHERS;
}
switch(logger_add_watcher(c, c->sfd, f)) {
case LOGGER_ADD_WATCHER_TOO_MANY:
out_string(c, "WATCHER_TOO_MANY log watcher limit reached");
break;
case LOGGER_ADD_WATCHER_FAILED:
out_string(c, "WATCHER_FAILED failed to add log watcher");
break;
case LOGGER_ADD_WATCHER_OK:
conn_set_state(c, conn_watch);
event_del(&c->event);
break;
}
}
static void process_memlimit_command(conn *c, token_t *tokens, const size_t ntokens) {
uint32_t memlimit;
assert(c != NULL);
set_noreply_maybe(c, tokens, ntokens);
if (!safe_strtoul(tokens[1].value, &memlimit)) {
out_string(c, "ERROR");
} else {
if (memlimit < 8) {
out_string(c, "MEMLIMIT_TOO_SMALL cannot set maxbytes to less than 8m");
} else {
if (memlimit > 1000000000) {
out_string(c, "MEMLIMIT_ADJUST_FAILED input value is megabytes not bytes");
} else if (slabs_adjust_mem_limit((size_t) memlimit * 1024 * 1024)) {
if (settings.verbose > 0) {
fprintf(stderr, "maxbytes adjusted to %llum\n", (unsigned long long)memlimit);
}
out_string(c, "OK");
} else {
out_string(c, "MEMLIMIT_ADJUST_FAILED out of bounds or unable to adjust");
}
}
}
}
static void process_lru_command(conn *c, token_t *tokens, const size_t ntokens) {
uint32_t pct_hot;
uint32_t pct_warm;
double hot_factor;
int32_t ttl;
double factor;
set_noreply_maybe(c, tokens, ntokens);
if (strcmp(tokens[1].value, "tune") == 0 && ntokens >= 7) {
if (!safe_strtoul(tokens[2].value, &pct_hot) ||
!safe_strtoul(tokens[3].value, &pct_warm) ||
!safe_strtod(tokens[4].value, &hot_factor) ||
!safe_strtod(tokens[5].value, &factor)) {
out_string(c, "ERROR");
} else {
if (pct_hot + pct_warm > 80) {
out_string(c, "ERROR hot and warm pcts must not exceed 80");
} else if (factor <= 0 || hot_factor <= 0) {
out_string(c, "ERROR hot/warm age factors must be greater than 0");
} else {
settings.hot_lru_pct = pct_hot;
settings.warm_lru_pct = pct_warm;
settings.hot_max_factor = hot_factor;
settings.warm_max_factor = factor;
out_string(c, "OK");
}
}
} else if (strcmp(tokens[1].value, "mode") == 0 && ntokens >= 4 &&
settings.lru_maintainer_thread) {
if (strcmp(tokens[2].value, "flat") == 0) {
settings.lru_segmented = false;
out_string(c, "OK");
} else if (strcmp(tokens[2].value, "segmented") == 0) {
settings.lru_segmented = true;
out_string(c, "OK");
} else {
out_string(c, "ERROR");
}
} else if (strcmp(tokens[1].value, "temp_ttl") == 0 && ntokens >= 4 &&
settings.lru_maintainer_thread) {
if (!safe_strtol(tokens[2].value, &ttl)) {
out_string(c, "ERROR");
} else {
if (ttl < 0) {
settings.temp_lru = false;
} else {
settings.temp_lru = true;
settings.temporary_ttl = ttl;
}
out_string(c, "OK");
}
} else {
out_string(c, "ERROR");
}
}
#ifdef EXTSTORE
static void process_extstore_command(conn *c, token_t *tokens, const size_t ntokens) {
set_noreply_maybe(c, tokens, ntokens);
bool ok = true;
if (ntokens < 4) {
ok = false;
} else if (strcmp(tokens[1].value, "free_memchunks") == 0 && ntokens > 4) {
/* per-slab-class free chunk setting. */
unsigned int clsid = 0;
unsigned int limit = 0;
if (!safe_strtoul(tokens[2].value, &clsid) ||
!safe_strtoul(tokens[3].value, &limit)) {
ok = false;
} else {
if (clsid < MAX_NUMBER_OF_SLAB_CLASSES) {
settings.ext_free_memchunks[clsid] = limit;
} else {
ok = false;
}
}
} else if (strcmp(tokens[1].value, "item_size") == 0) {
if (!safe_strtoul(tokens[2].value, &settings.ext_item_size))
ok = false;
} else if (strcmp(tokens[1].value, "item_age") == 0) {
if (!safe_strtoul(tokens[2].value, &settings.ext_item_age))
ok = false;
} else if (strcmp(tokens[1].value, "low_ttl") == 0) {
if (!safe_strtoul(tokens[2].value, &settings.ext_low_ttl))
ok = false;
} else if (strcmp(tokens[1].value, "recache_rate") == 0) {
if (!safe_strtoul(tokens[2].value, &settings.ext_recache_rate))
ok = false;
} else if (strcmp(tokens[1].value, "compact_under") == 0) {
if (!safe_strtoul(tokens[2].value, &settings.ext_compact_under))
ok = false;
} else if (strcmp(tokens[1].value, "drop_under") == 0) {
if (!safe_strtoul(tokens[2].value, &settings.ext_drop_under))
ok = false;
} else if (strcmp(tokens[1].value, "max_frag") == 0) {
if (!safe_strtod(tokens[2].value, &settings.ext_max_frag))
ok = false;
} else if (strcmp(tokens[1].value, "drop_unread") == 0) {
unsigned int v;
if (!safe_strtoul(tokens[2].value, &v)) {
ok = false;
} else {
settings.ext_drop_unread = v == 0 ? false : true;
}
} else {
ok = false;
}
if (!ok) {
out_string(c, "ERROR");
} else {
out_string(c, "OK");
}
}
#endif
// TODO: pipelined commands are incompatible with shifting connections to a
// side thread. Given this only happens in two instances (watch and
// lru_crawler metadump) it should be fine for things to bail. It _should_ be
// unusual for these commands.
// This is hard to fix since tokenize_command() mutilates the read buffer, so
// we can't drop out and back in again.
// Leaving this note here to spend more time on a fix when necessary, or if an
// opportunity becomes obvious.
static void process_command(conn *c, char *command) {
token_t tokens[MAX_TOKENS];
size_t ntokens;
int comm;
assert(c != NULL);
MEMCACHED_PROCESS_COMMAND_START(c->sfd, c->rcurr, c->rbytes);
if (settings.verbose > 1)
fprintf(stderr, "<%d %s\n", c->sfd, command);
/*
* for commands set/add/replace, we build an item and read the data
* directly into it, then continue in nread_complete().
*/
// Prep the response object for this query.
if (!resp_start(c)) {
conn_set_state(c, conn_closing);
return;
}
ntokens = tokenize_command(command, tokens, MAX_TOKENS);
if (ntokens >= 3 &&
((strcmp(tokens[COMMAND_TOKEN].value, "get") == 0) ||
(strcmp(tokens[COMMAND_TOKEN].value, "bget") == 0))) {
process_get_command(c, tokens, ntokens, false, false);
} else if ((ntokens == 6 || ntokens == 7) &&
((strcmp(tokens[COMMAND_TOKEN].value, "add") == 0 && (comm = NREAD_ADD)) ||
(strcmp(tokens[COMMAND_TOKEN].value, "set") == 0 && (comm = NREAD_SET)) ||
(strcmp(tokens[COMMAND_TOKEN].value, "replace") == 0 && (comm = NREAD_REPLACE)) ||
(strcmp(tokens[COMMAND_TOKEN].value, "prepend") == 0 && (comm = NREAD_PREPEND)) ||
(strcmp(tokens[COMMAND_TOKEN].value, "append") == 0 && (comm = NREAD_APPEND)) )) {
process_update_command(c, tokens, ntokens, comm, false);
} else if ((ntokens == 7 || ntokens == 8) && (strcmp(tokens[COMMAND_TOKEN].value, "cas") == 0 && (comm = NREAD_CAS))) {
process_update_command(c, tokens, ntokens, comm, true);
} else if ((ntokens == 4 || ntokens == 5) && (strcmp(tokens[COMMAND_TOKEN].value, "incr") == 0)) {
process_arithmetic_command(c, tokens, ntokens, 1);
} else if (ntokens >= 3 && (strcmp(tokens[COMMAND_TOKEN].value, "gets") == 0)) {
process_get_command(c, tokens, ntokens, true, false);
} else if (ntokens >= 3 && (strcmp(tokens[COMMAND_TOKEN].value, "mg") == 0)) {
process_mget_command(c, tokens, ntokens);
} else if (ntokens >= 3 && (strcmp(tokens[COMMAND_TOKEN].value, "ms") == 0)) {
process_mset_command(c, tokens, ntokens);
} else if (ntokens >= 3 && (strcmp(tokens[COMMAND_TOKEN].value, "md") == 0)) {
process_mdelete_command(c, tokens, ntokens);
} else if (ntokens >= 2 && (strcmp(tokens[COMMAND_TOKEN].value, "mn") == 0)) {
out_string(c, "MN");
// mn command forces immediate writeback flush.
conn_set_state(c, conn_mwrite);
return;
} else if (ntokens >= 2 && (strcmp(tokens[COMMAND_TOKEN].value, "ma") == 0)) {
process_marithmetic_command(c, tokens, ntokens);
} else if (ntokens >= 2 && (strcmp(tokens[COMMAND_TOKEN].value, "me") == 0)) {
process_meta_command(c, tokens, ntokens);
return;
} else if ((ntokens == 4 || ntokens == 5) && (strcmp(tokens[COMMAND_TOKEN].value, "decr") == 0)) {
process_arithmetic_command(c, tokens, ntokens, 0);
} else if (ntokens >= 3 && ntokens <= 5 && (strcmp(tokens[COMMAND_TOKEN].value, "delete") == 0)) {
process_delete_command(c, tokens, ntokens);
} else if ((ntokens == 4 || ntokens == 5) && (strcmp(tokens[COMMAND_TOKEN].value, "touch") == 0)) {
process_touch_command(c, tokens, ntokens);
} else if (ntokens >= 4 && (strcmp(tokens[COMMAND_TOKEN].value, "gat") == 0)) {
process_get_command(c, tokens, ntokens, false, true);
} else if (ntokens >= 4 && (strcmp(tokens[COMMAND_TOKEN].value, "gats") == 0)) {
process_get_command(c, tokens, ntokens, true, true);
} else if (ntokens >= 2 && (strcmp(tokens[COMMAND_TOKEN].value, "stats") == 0)) {
process_stat(c, tokens, ntokens);
} else if (ntokens >= 2 && ntokens <= 4 && (strcmp(tokens[COMMAND_TOKEN].value, "flush_all") == 0)) {
time_t exptime = 0;
rel_time_t new_oldest = 0;
set_noreply_maybe(c, tokens, ntokens);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.flush_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
if (!settings.flush_enabled) {
// flush_all is not allowed but we log it on stats
out_string(c, "CLIENT_ERROR flush_all not allowed");
return;
}
if (ntokens != (c->noreply ? 3 : 2)) {
exptime = strtol(tokens[1].value, NULL, 10);
if(errno == ERANGE) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
}
/*
If exptime is zero realtime() would return zero too, and
realtime(exptime) - 1 would overflow to the max unsigned
value. So we process exptime == 0 the same way we do when
no delay is given at all.
*/
if (exptime > 0) {
new_oldest = realtime(exptime);
} else { /* exptime == 0 */
new_oldest = current_time;
}
if (settings.use_cas) {
settings.oldest_live = new_oldest - 1;
if (settings.oldest_live <= current_time)
settings.oldest_cas = get_cas_id();
} else {
settings.oldest_live = new_oldest;
}
out_string(c, "OK");
return;
} else if (ntokens == 2 && (strcmp(tokens[COMMAND_TOKEN].value, "version") == 0)) {
out_string(c, "VERSION " VERSION);
} else if (ntokens == 2 && (strcmp(tokens[COMMAND_TOKEN].value, "quit") == 0)) {
conn_set_state(c, conn_mwrite);
c->close_after_write = true;
} else if (ntokens == 2 && (strcmp(tokens[COMMAND_TOKEN].value, "shutdown") == 0)) {
if (settings.shutdown_command) {
conn_set_state(c, conn_closing);
raise(SIGINT);
} else {
out_string(c, "ERROR: shutdown not enabled");
}
} else if (ntokens > 1 && strcmp(tokens[COMMAND_TOKEN].value, "slabs") == 0) {
if (ntokens == 5 && strcmp(tokens[COMMAND_TOKEN + 1].value, "reassign") == 0) {
int src, dst, rv;
if (settings.slab_reassign == false) {
out_string(c, "CLIENT_ERROR slab reassignment disabled");
return;
}
src = strtol(tokens[2].value, NULL, 10);
dst = strtol(tokens[3].value, NULL, 10);
if (errno == ERANGE) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
rv = slabs_reassign(src, dst);
switch (rv) {
case REASSIGN_OK:
out_string(c, "OK");
break;
case REASSIGN_RUNNING:
out_string(c, "BUSY currently processing reassign request");
break;
case REASSIGN_BADCLASS:
out_string(c, "BADCLASS invalid src or dst class id");
break;
case REASSIGN_NOSPARE:
out_string(c, "NOSPARE source class has no spare pages");
break;
case REASSIGN_SRC_DST_SAME:
out_string(c, "SAME src and dst class are identical");
break;
}
return;
} else if (ntokens >= 4 &&
(strcmp(tokens[COMMAND_TOKEN + 1].value, "automove") == 0)) {
process_slabs_automove_command(c, tokens, ntokens);
} else {
out_string(c, "ERROR");
}
} else if (ntokens > 1 && strcmp(tokens[COMMAND_TOKEN].value, "lru_crawler") == 0) {
if (ntokens == 4 && strcmp(tokens[COMMAND_TOKEN + 1].value, "crawl") == 0) {
int rv;
if (settings.lru_crawler == false) {
out_string(c, "CLIENT_ERROR lru crawler disabled");
return;
}
rv = lru_crawler_crawl(tokens[2].value, CRAWLER_EXPIRED, NULL, 0,
settings.lru_crawler_tocrawl);
switch(rv) {
case CRAWLER_OK:
out_string(c, "OK");
break;
case CRAWLER_RUNNING:
out_string(c, "BUSY currently processing crawler request");
break;
case CRAWLER_BADCLASS:
out_string(c, "BADCLASS invalid class id");
break;
case CRAWLER_NOTSTARTED:
out_string(c, "NOTSTARTED no items to crawl");
break;
case CRAWLER_ERROR:
out_string(c, "ERROR an unknown error happened");
break;
}
return;
} else if (ntokens == 4 && strcmp(tokens[COMMAND_TOKEN + 1].value, "metadump") == 0) {
if (settings.lru_crawler == false) {
out_string(c, "CLIENT_ERROR lru crawler disabled");
return;
}
if (!settings.dump_enabled) {
out_string(c, "ERROR metadump not allowed");
return;
}
if (resp_has_stack(c)) {
out_string(c, "ERROR cannot pipeline other commands before metadump");
return;
}
int rv = lru_crawler_crawl(tokens[2].value, CRAWLER_METADUMP,
c, c->sfd, LRU_CRAWLER_CAP_REMAINING);
switch(rv) {
case CRAWLER_OK:
// TODO: documentation says this string is returned, but
// it never was before. We never switch to conn_write so
// this o_s call never worked. Need to talk to users and
// decide if removing the OK from docs is fine.
//out_string(c, "OK");
// TODO: Don't reuse conn_watch here.
conn_set_state(c, conn_watch);
event_del(&c->event);
break;
case CRAWLER_RUNNING:
out_string(c, "BUSY currently processing crawler request");
break;
case CRAWLER_BADCLASS:
out_string(c, "BADCLASS invalid class id");
break;
case CRAWLER_NOTSTARTED:
out_string(c, "NOTSTARTED no items to crawl");
break;
case CRAWLER_ERROR:
out_string(c, "ERROR an unknown error happened");
break;
}
return;
} else if (ntokens == 4 && strcmp(tokens[COMMAND_TOKEN + 1].value, "tocrawl") == 0) {
uint32_t tocrawl;
if (!safe_strtoul(tokens[2].value, &tocrawl)) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
settings.lru_crawler_tocrawl = tocrawl;
out_string(c, "OK");
return;
} else if (ntokens == 4 && strcmp(tokens[COMMAND_TOKEN + 1].value, "sleep") == 0) {
uint32_t tosleep;
if (!safe_strtoul(tokens[2].value, &tosleep)) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
if (tosleep > 1000000) {
out_string(c, "CLIENT_ERROR sleep must be one second or less");
return;
}
settings.lru_crawler_sleep = tosleep;
out_string(c, "OK");
return;
} else if (ntokens == 3) {
if ((strcmp(tokens[COMMAND_TOKEN + 1].value, "enable") == 0)) {
if (start_item_crawler_thread() == 0) {
out_string(c, "OK");
} else {
out_string(c, "ERROR failed to start lru crawler thread");
}
} else if ((strcmp(tokens[COMMAND_TOKEN + 1].value, "disable") == 0)) {
if (stop_item_crawler_thread(CRAWLER_NOWAIT) == 0) {
out_string(c, "OK");
} else {
out_string(c, "ERROR failed to stop lru crawler thread");
}
} else {
out_string(c, "ERROR");
}
return;
} else {
out_string(c, "ERROR");
}
} else if (ntokens > 1 && strcmp(tokens[COMMAND_TOKEN].value, "watch") == 0) {
if (resp_has_stack(c)) {
out_string(c, "ERROR cannot pipeline other commands before watch");
return;
}
process_watch_command(c, tokens, ntokens);
} else if ((ntokens == 3 || ntokens == 4) && (strcmp(tokens[COMMAND_TOKEN].value, "cache_memlimit") == 0)) {
process_memlimit_command(c, tokens, ntokens);
} else if ((ntokens == 3 || ntokens == 4) && (strcmp(tokens[COMMAND_TOKEN].value, "verbosity") == 0)) {
process_verbosity_command(c, tokens, ntokens);
} else if (ntokens >= 3 && strcmp(tokens[COMMAND_TOKEN].value, "lru") == 0) {
process_lru_command(c, tokens, ntokens);
#ifdef MEMCACHED_DEBUG
// commands which exist only for testing the memcached's security protection
} else if (ntokens == 2 && (strcmp(tokens[COMMAND_TOKEN].value, "misbehave") == 0)) {
process_misbehave_command(c);
#endif
#ifdef EXTSTORE
} else if (ntokens >= 3 && strcmp(tokens[COMMAND_TOKEN].value, "extstore") == 0) {
process_extstore_command(c, tokens, ntokens);
#endif
#ifdef TLS
} else if (ntokens == 2 && strcmp(tokens[COMMAND_TOKEN].value, "refresh_certs") == 0) {
set_noreply_maybe(c, tokens, ntokens);
char *errmsg = NULL;
if (refresh_certs(&errmsg)) {
out_string(c, "OK");
} else {
write_and_free(c, errmsg, strlen(errmsg));
}
return;
#endif
} else {
if (ntokens >= 2 && strncmp(tokens[ntokens - 2].value, "HTTP/", 5) == 0) {
conn_set_state(c, conn_closing);
} else {
out_string(c, "ERROR");
}
}
return;
}
static int try_read_command_negotiate(conn *c) {
assert(c->protocol == negotiating_prot);
assert(c != NULL);
assert(c->rcurr <= (c->rbuf + c->rsize));
assert(c->rbytes > 0);
if ((unsigned char)c->rbuf[0] == (unsigned char)PROTOCOL_BINARY_REQ) {
c->protocol = binary_prot;
c->try_read_command = try_read_command_binary;
} else {
// authentication doesn't work with negotiated protocol.
c->protocol = ascii_prot;
c->try_read_command = try_read_command_ascii;
}
if (settings.verbose > 1) {
fprintf(stderr, "%d: Client using the %s protocol\n", c->sfd,
prot_text(c->protocol));
}
return c->try_read_command(c);
}
static int try_read_command_udp(conn *c) {
assert(c != NULL);
assert(c->rcurr <= (c->rbuf + c->rsize));
assert(c->rbytes > 0);
if ((unsigned char)c->rbuf[0] == (unsigned char)PROTOCOL_BINARY_REQ) {
c->protocol = binary_prot;
return try_read_command_binary(c);
} else {
c->protocol = ascii_prot;
return try_read_command_ascii(c);
}
}
static int try_read_command_binary(conn *c) {
/* Do we have the complete packet header? */
if (c->rbytes < sizeof(c->binary_header)) {
/* need more data! */
return 0;
} else {
memcpy(&c->binary_header, c->rcurr, sizeof(c->binary_header));
protocol_binary_request_header* req;
req = &c->binary_header;
if (settings.verbose > 1) {
/* Dump the packet before we convert it to host order */
int ii;
fprintf(stderr, "<%d Read binary protocol data:", c->sfd);
for (ii = 0; ii < sizeof(req->bytes); ++ii) {
if (ii % 4 == 0) {
fprintf(stderr, "\n<%d ", c->sfd);
}
fprintf(stderr, " 0x%02x", req->bytes[ii]);
}
fprintf(stderr, "\n");
}
c->binary_header = *req;
c->binary_header.request.keylen = ntohs(req->request.keylen);
c->binary_header.request.bodylen = ntohl(req->request.bodylen);
c->binary_header.request.cas = ntohll(req->request.cas);
if (c->binary_header.request.magic != PROTOCOL_BINARY_REQ) {
if (settings.verbose) {
fprintf(stderr, "Invalid magic: %x\n",
c->binary_header.request.magic);
}
conn_set_state(c, conn_closing);
return -1;
}
uint8_t extlen = c->binary_header.request.extlen;
uint16_t keylen = c->binary_header.request.keylen;
if (c->rbytes < keylen + extlen + sizeof(c->binary_header)) {
// Still need more bytes. Let try_read_network() realign the
// read-buffer and fetch more data as necessary.
return 0;
}
if (!resp_start(c)) {
conn_set_state(c, conn_closing);
return -1;
}
c->cmd = c->binary_header.request.opcode;
c->keylen = c->binary_header.request.keylen;
c->opaque = c->binary_header.request.opaque;
/* clear the returned cas value */
c->cas = 0;
c->last_cmd_time = current_time;
// sigh. binprot has no "largest possible extlen" define, and I don't
// want to refactor a ton of code either. Header is only ever used out
// of c->binary_header, but the extlen stuff is used for the latter
// bytes. Just wastes 24 bytes on the stack this way.
char extbuf[sizeof(c->binary_header) + BIN_MAX_EXTLEN];
memcpy(extbuf + sizeof(c->binary_header), c->rcurr + sizeof(c->binary_header), extlen);
c->rbytes -= sizeof(c->binary_header) + extlen + keylen;
c->rcurr += sizeof(c->binary_header) + extlen + keylen;
dispatch_bin_command(c, extbuf);
}
return 1;
}
static int try_read_command_asciiauth(conn *c) {
token_t tokens[MAX_TOKENS];
size_t ntokens;
char *cont = NULL;
if (!c->resp) {
if (!resp_start(c)) {
conn_set_state(c, conn_closing);
return 1;
}
}
// TODO: move to another function.
if (!c->sasl_started) {
char *el;
uint32_t size = 0;
// impossible for the auth command to be this short.
if (c->rbytes < 2)
return 0;
el = memchr(c->rcurr, '\n', c->rbytes);
// If no newline after 1k, getting junk data, close out.
if (!el) {
if (c->rbytes > 1024) {
conn_set_state(c, conn_closing);
return 1;
}
return 0;
}
// Looking for: "set foo 0 0 N\r\nuser pass\r\n"
// key, flags, and ttl are ignored. N is used to see if we have the rest.
// so tokenize doesn't walk past into the value.
// it's fine to leave the \r in, as strtoul will stop at it.
*el = '\0';
ntokens = tokenize_command(c->rcurr, tokens, MAX_TOKENS);
// ensure the buffer is consumed.
c->rbytes -= (el - c->rcurr) + 1;
c->rcurr += (el - c->rcurr) + 1;
// final token is a NULL ender, so we have one more than expected.
if (ntokens < 6
|| strcmp(tokens[0].value, "set") != 0
|| !safe_strtoul(tokens[4].value, &size)) {
out_string(c, "CLIENT_ERROR unauthenticated");
return 1;
}
// we don't actually care about the key at all; it can be anything.
// we do care about the size of the remaining read.
c->rlbytes = size + 2;
c->sasl_started = true; // reuse from binprot sasl, but not sasl :)
}
if (c->rbytes < c->rlbytes) {
// need more bytes.
return 0;
}
cont = c->rcurr;
// advance buffer. no matter what we're stopping.
c->rbytes -= c->rlbytes;
c->rcurr += c->rlbytes;
c->sasl_started = false;
// must end with \r\n
// NB: I thought ASCII sets also worked with just \n, but according to
// complete_nread_ascii only \r\n is valid.
if (strncmp(cont + c->rlbytes - 2, "\r\n", 2) != 0) {
out_string(c, "CLIENT_ERROR bad command line termination");
return 1;
}
// payload should be "user pass", so we can use the tokenizer.
cont[c->rlbytes - 2] = '\0';
ntokens = tokenize_command(cont, tokens, MAX_TOKENS);
if (ntokens < 3) {
out_string(c, "CLIENT_ERROR bad authentication token format");
return 1;
}
if (authfile_check(tokens[0].value, tokens[1].value) == 1) {
out_string(c, "STORED");
c->authenticated = true;
c->try_read_command = try_read_command_ascii;
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.auth_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
} else {
out_string(c, "CLIENT_ERROR authentication failure");
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.auth_cmds++;
c->thread->stats.auth_errors++;
pthread_mutex_unlock(&c->thread->stats.mutex);
}
return 1;
}
static int try_read_command_ascii(conn *c) {
char *el, *cont;
if (c->rbytes == 0)
return 0;
el = memchr(c->rcurr, '\n', c->rbytes);
if (!el) {
if (c->rbytes > 1024) {
/*
* We didn't have a '\n' in the first k. This _has_ to be a
* large multiget, if not we should just nuke the connection.
*/
char *ptr = c->rcurr;
while (*ptr == ' ') { /* ignore leading whitespaces */
++ptr;
}
if (ptr - c->rcurr > 100 ||
(strncmp(ptr, "get ", 4) && strncmp(ptr, "gets ", 5))) {
conn_set_state(c, conn_closing);
return 1;
}
// ASCII multigets are unbound, so our fixed size rbuf may not
// work for this particular workload... For backcompat we'll use a
// malloc/realloc/free routine just for this.
if (!c->rbuf_malloced) {
if (!rbuf_switch_to_malloc(c)) {
conn_set_state(c, conn_closing);
return 1;
}
}
}
return 0;
}
cont = el + 1;
if ((el - c->rcurr) > 1 && *(el - 1) == '\r') {
el--;
}
*el = '\0';
assert(cont <= (c->rcurr + c->rbytes));
c->last_cmd_time = current_time;
process_command(c, c->rcurr);
c->rbytes -= (cont - c->rcurr);
c->rcurr = cont;
assert(c->rcurr <= (c->rbuf + c->rsize));
return 1;
}
/*
* read a UDP request.
*/
static enum try_read_result try_read_udp(conn *c) {
int res;
assert(c != NULL);
c->request_addr_size = sizeof(c->request_addr);
res = recvfrom(c->sfd, c->rbuf, c->rsize,
0, (struct sockaddr *)&c->request_addr,
&c->request_addr_size);
if (res > 8) {
unsigned char *buf = (unsigned char *)c->rbuf;
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_read += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
/* Beginning of UDP packet is the request ID; save it. */
c->request_id = buf[0] * 256 + buf[1];
/* If this is a multi-packet request, drop it. */
if (buf[4] != 0 || buf[5] != 1) {
out_string(c, "SERVER_ERROR multi-packet request not supported");
return READ_NO_DATA_RECEIVED;
}
/* Don't care about any of the rest of the header. */
res -= 8;
memmove(c->rbuf, c->rbuf + 8, res);
c->rbytes = res;
c->rcurr = c->rbuf;
return READ_DATA_RECEIVED;
}
return READ_NO_DATA_RECEIVED;
}
/*
* read from network as much as we can, handle buffer overflow and connection
* close.
* before reading, move the remaining incomplete fragment of a command
* (if any) to the beginning of the buffer.
*
* To protect us from someone flooding a connection with bogus data causing
* the connection to eat up all available memory, break out and start looking
* at the data I've got after a number of reallocs...
*
* @return enum try_read_result
*/
static enum try_read_result try_read_network(conn *c) {
enum try_read_result gotdata = READ_NO_DATA_RECEIVED;
int res;
int num_allocs = 0;
assert(c != NULL);
if (c->rcurr != c->rbuf) {
if (c->rbytes != 0) /* otherwise there's nothing to copy */
memmove(c->rbuf, c->rcurr, c->rbytes);
c->rcurr = c->rbuf;
}
while (1) {
// TODO: move to rbuf_* func?
if (c->rbytes >= c->rsize && c->rbuf_malloced) {
if (num_allocs == 4) {
return gotdata;
}
++num_allocs;
char *new_rbuf = realloc(c->rbuf, c->rsize * 2);
if (!new_rbuf) {
STATS_LOCK();
stats.malloc_fails++;
STATS_UNLOCK();
if (settings.verbose > 0) {
fprintf(stderr, "Couldn't realloc input buffer\n");
}
c->rbytes = 0; /* ignore what we read */
out_of_memory(c, "SERVER_ERROR out of memory reading request");
c->close_after_write = true;
return READ_MEMORY_ERROR;
}
c->rcurr = c->rbuf = new_rbuf;
c->rsize *= 2;
}
int avail = c->rsize - c->rbytes;
res = c->read(c, c->rbuf + c->rbytes, avail);
if (res > 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_read += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
gotdata = READ_DATA_RECEIVED;
c->rbytes += res;
if (res == avail && c->rbuf_malloced) {
// Resize rbuf and try a few times if huge ascii multiget.
continue;
} else {
break;
}
}
if (res == 0) {
return READ_ERROR;
}
if (res == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
break;
}
return READ_ERROR;
}
}
return gotdata;
}
static bool update_event(conn *c, const int new_flags) {
assert(c != NULL);
struct event_base *base = c->event.ev_base;
if (c->ev_flags == new_flags)
return true;
if (event_del(&c->event) == -1) return false;
event_set(&c->event, c->sfd, new_flags, event_handler, (void *)c);
event_base_set(base, &c->event);
c->ev_flags = new_flags;
if (event_add(&c->event, 0) == -1) return false;
return true;
}
/*
* Sets whether we are listening for new connections or not.
*/
void do_accept_new_conns(const bool do_accept) {
conn *next;
for (next = listen_conn; next; next = next->next) {
if (do_accept) {
update_event(next, EV_READ | EV_PERSIST);
if (listen(next->sfd, settings.backlog) != 0) {
perror("listen");
}
}
else {
update_event(next, 0);
if (listen(next->sfd, 0) != 0) {
perror("listen");
}
}
}
if (do_accept) {
struct timeval maxconns_exited;
uint64_t elapsed_us;
gettimeofday(&maxconns_exited,NULL);
STATS_LOCK();
elapsed_us =
(maxconns_exited.tv_sec - stats.maxconns_entered.tv_sec) * 1000000
+ (maxconns_exited.tv_usec - stats.maxconns_entered.tv_usec);
stats.time_in_listen_disabled_us += elapsed_us;
stats_state.accepting_conns = true;
STATS_UNLOCK();
} else {
STATS_LOCK();
stats_state.accepting_conns = false;
gettimeofday(&stats.maxconns_entered,NULL);
stats.listen_disabled_num++;
STATS_UNLOCK();
allow_new_conns = false;
maxconns_handler(-42, 0, 0);
}
}
#define TRANSMIT_ONE_RESP true
#define TRANSMIT_ALL_RESP false
static int _transmit_pre(conn *c, struct iovec *iovs, int iovused, bool one_resp) {
mc_resp *resp = c->resp_head;
while (resp && iovused + resp->iovcnt < IOV_MAX-1) {
if (resp->skip) {
// Don't actually unchain the resp obj here since it's singly-linked.
// Just let the post function handle it linearly.
resp = resp->next;
continue;
}
if (resp->chunked_data_iov) {
// Handle chunked items specially.
// They spend much more time in send so we can be a bit wasteful
// in rebuilding iovecs for them.
item_chunk *ch = (item_chunk *)ITEM_schunk((item *)resp->iov[resp->chunked_data_iov].iov_base);
int x;
for (x = 0; x < resp->iovcnt; x++) {
// This iov is tracking how far we've copied so far.
if (x == resp->chunked_data_iov) {
int done = resp->chunked_total - resp->iov[x].iov_len;
// Start from the len to allow binprot to cut the \r\n
int todo = resp->iov[x].iov_len;
while (ch && todo > 0 && iovused < IOV_MAX-1) {
int skip = 0;
if (!ch->used) {
ch = ch->next;
continue;
}
// Skip parts we've already sent.
if (done >= ch->used) {
done -= ch->used;
ch = ch->next;
continue;
} else if (done) {
skip = done;
done = 0;
}
iovs[iovused].iov_base = ch->data + skip;
// Stupid binary protocol makes this go negative.
iovs[iovused].iov_len = ch->used - skip > todo ? todo : ch->used - skip;
iovused++;
todo -= ch->used - skip;
ch = ch->next;
}
} else {
iovs[iovused].iov_base = resp->iov[x].iov_base;
iovs[iovused].iov_len = resp->iov[x].iov_len;
iovused++;
}
if (iovused >= IOV_MAX-1)
break;
}
} else {
memcpy(&iovs[iovused], resp->iov, sizeof(struct iovec)*resp->iovcnt);
iovused += resp->iovcnt;
}
// done looking at first response, walk down the chain.
resp = resp->next;
// used for UDP mode: UDP cannot send multiple responses per packet.
if (one_resp)
break;
}
return iovused;
}
/*
* Decrements and completes responses based on how much data was transmitted.
* Takes the connection and current result bytes.
*/
static void _transmit_post(conn *c, ssize_t res) {
// We've written some of the data. Remove the completed
// responses from the list of pending writes.
mc_resp *resp = c->resp_head;
while (resp) {
int x;
if (resp->skip) {
resp = resp_finish(c, resp);
continue;
}
// fastpath check. all small responses should cut here.
if (res >= resp->tosend) {
res -= resp->tosend;
resp = resp_finish(c, resp);
continue;
}
// it's fine to re-check iov's that were zeroed out before.
for (x = 0; x < resp->iovcnt; x++) {
struct iovec *iov = &resp->iov[x];
if (res >= iov->iov_len) {
resp->tosend -= iov->iov_len;
res -= iov->iov_len;
iov->iov_len = 0;
} else {
// Dumb special case for chunked items. Currently tracking
// where to inject the chunked item via iov_base.
// Extra not-great since chunked items can't be the first
// index, so we have to check for non-zero c_d_iov first.
if (!resp->chunked_data_iov || x != resp->chunked_data_iov) {
iov->iov_base = (char *)iov->iov_base + res;
}
iov->iov_len -= res;
resp->tosend -= res;
res = 0;
break;
}
}
// are we done with this response object?
if (resp->tosend == 0) {
resp = resp_finish(c, resp);
} else {
// Jammed up here. This is the new head.
break;
}
}
}
/*
* Transmit the next chunk of data from our list of msgbuf structures.
*
* Returns:
* TRANSMIT_COMPLETE All done writing.
* TRANSMIT_INCOMPLETE More data remaining to write.
* TRANSMIT_SOFT_ERROR Can't write any more right now.
* TRANSMIT_HARD_ERROR Can't write (c->state is set to conn_closing)
*/
static enum transmit_result transmit(conn *c) {
assert(c != NULL);
struct iovec iovs[IOV_MAX];
struct msghdr msg;
int iovused = 0;
// init the msg.
memset(&msg, 0, sizeof(struct msghdr));
msg.msg_iov = iovs;
iovused = _transmit_pre(c, iovs, iovused, TRANSMIT_ALL_RESP);
// Alright, send.
ssize_t res;
msg.msg_iovlen = iovused;
res = c->sendmsg(c, &msg, 0);
if (res >= 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_written += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
// Decrement any partial IOV's and complete any finished resp's.
_transmit_post(c, res);
if (c->resp_head) {
return TRANSMIT_INCOMPLETE;
} else {
return TRANSMIT_COMPLETE;
}
}
if (res == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
if (!update_event(c, EV_WRITE | EV_PERSIST)) {
if (settings.verbose > 0)
fprintf(stderr, "Couldn't update event\n");
conn_set_state(c, conn_closing);
return TRANSMIT_HARD_ERROR;
}
return TRANSMIT_SOFT_ERROR;
}
/* if res == -1 and error is not EAGAIN or EWOULDBLOCK,
we have a real error, on which we close the connection */
if (settings.verbose > 0)
perror("Failed to write, and not due to blocking");
conn_set_state(c, conn_closing);
return TRANSMIT_HARD_ERROR;
}
static void build_udp_header(unsigned char *hdr, mc_resp *resp) {
// We need to communicate the total number of packets
// If this isn't set, it's the first time this response is building a udp
// header, so "tosend" must be static.
if (!resp->udp_total) {
uint32_t total;
total = resp->tosend / UDP_MAX_PAYLOAD_SIZE;
if (resp->tosend % UDP_MAX_PAYLOAD_SIZE)
total++;
// The spec doesn't really say what we should do here. It's _probably_
// better to bail out?
if (total > USHRT_MAX) {
total = USHRT_MAX;
}
resp->udp_total = total;
}
// TODO: why wasn't this hto*'s and casts?
// this ends up sending UDP hdr data specifically in host byte order.
*hdr++ = resp->request_id / 256;
*hdr++ = resp->request_id % 256;
*hdr++ = resp->udp_sequence / 256;
*hdr++ = resp->udp_sequence % 256;
*hdr++ = resp->udp_total / 256;
*hdr++ = resp->udp_total % 256;
*hdr++ = 0;
*hdr++ = 0;
resp->udp_sequence++;
}
/*
* UDP specific transmit function. Uses its own function rather than check
* IS_UDP() five times. If we ever implement sendmmsg or similar support they
* will diverge even more.
* Does not use TLS.
*
* Returns:
* TRANSMIT_COMPLETE All done writing.
* TRANSMIT_INCOMPLETE More data remaining to write.
* TRANSMIT_SOFT_ERROR Can't write any more right now.
* TRANSMIT_HARD_ERROR Can't write (c->state is set to conn_closing)
*/
static enum transmit_result transmit_udp(conn *c) {
assert(c != NULL);
struct iovec iovs[IOV_MAX];
struct msghdr msg;
mc_resp *resp;
int iovused = 0;
unsigned char udp_hdr[UDP_HEADER_SIZE];
// We only send one UDP packet per call (ugh), so we can only operate on a
// single response at a time.
resp = c->resp_head;
if (!resp) {
return TRANSMIT_COMPLETE;
}
if (resp->skip) {
resp = resp_finish(c, resp);
return TRANSMIT_INCOMPLETE;
}
// clear the message and initialize it.
memset(&msg, 0, sizeof(struct msghdr));
msg.msg_iov = iovs;
// the UDP source to return to.
msg.msg_name = &resp->request_addr;
msg.msg_namelen = resp->request_addr_size;
// First IOV is the custom UDP header.
iovs[0].iov_base = udp_hdr;
iovs[0].iov_len = UDP_HEADER_SIZE;
build_udp_header(udp_hdr, resp);
iovused++;
// Fill the IOV's the standard way.
// TODO: might get a small speedup if we let it break early with a length
// limit.
iovused = _transmit_pre(c, iovs, iovused, TRANSMIT_ONE_RESP);
// Clip the IOV's to the max UDP packet size.
// If we add support for send_mmsg, this can be where we split msg's.
{
int x = 0;
int len = 0;
for (x = 0; x < iovused; x++) {
if (len + iovs[x].iov_len >= UDP_MAX_PAYLOAD_SIZE) {
iovs[x].iov_len = UDP_MAX_PAYLOAD_SIZE - len;
x++;
break;
} else {
len += iovs[x].iov_len;
}
}
iovused = x;
}
ssize_t res;
msg.msg_iovlen = iovused;
// NOTE: uses system sendmsg since we have no support for indirect UDP.
res = sendmsg(c->sfd, &msg, 0);
if (res >= 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_written += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
// Ignore the header size from forwarding the IOV's
res -= UDP_HEADER_SIZE;
// Decrement any partial IOV's and complete any finished resp's.
_transmit_post(c, res);
if (c->resp_head) {
return TRANSMIT_INCOMPLETE;
} else {
return TRANSMIT_COMPLETE;
}
}
if (res == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
if (!update_event(c, EV_WRITE | EV_PERSIST)) {
if (settings.verbose > 0)
fprintf(stderr, "Couldn't update event\n");
conn_set_state(c, conn_closing);
return TRANSMIT_HARD_ERROR;
}
return TRANSMIT_SOFT_ERROR;
}
/* if res == -1 and error is not EAGAIN or EWOULDBLOCK,
we have a real error, on which we close the connection */
if (settings.verbose > 0)
perror("Failed to write, and not due to blocking");
conn_set_state(c, conn_read);
return TRANSMIT_HARD_ERROR;
}
/* Does a looped read to fill data chunks */
/* TODO: restrict number of times this can loop.
* Also, benchmark using readv's.
*/
static int read_into_chunked_item(conn *c) {
int total = 0;
int res;
assert(c->rcurr != c->ritem);
while (c->rlbytes > 0) {
item_chunk *ch = (item_chunk *)c->ritem;
if (ch->size == ch->used) {
// FIXME: ch->next is currently always 0. remove this?
if (ch->next) {
c->ritem = (char *) ch->next;
} else {
/* Allocate next chunk. Binary protocol needs 2b for \r\n */
c->ritem = (char *) do_item_alloc_chunk(ch, c->rlbytes +
((c->protocol == binary_prot) ? 2 : 0));
if (!c->ritem) {
// We failed an allocation. Let caller handle cleanup.
total = -2;
break;
}
// ritem has new chunk, restart the loop.
continue;
//assert(c->rlbytes == 0);
}
}
int unused = ch->size - ch->used;
/* first check if we have leftovers in the conn_read buffer */
if (c->rbytes > 0) {
total = 0;
int tocopy = c->rbytes > c->rlbytes ? c->rlbytes : c->rbytes;
tocopy = tocopy > unused ? unused : tocopy;
if (c->ritem != c->rcurr) {
memmove(ch->data + ch->used, c->rcurr, tocopy);
}
total += tocopy;
c->rlbytes -= tocopy;
c->rcurr += tocopy;
c->rbytes -= tocopy;
ch->used += tocopy;
if (c->rlbytes == 0) {
break;
}
} else {
/* now try reading from the socket */
res = c->read(c, ch->data + ch->used,
(unused > c->rlbytes ? c->rlbytes : unused));
if (res > 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_read += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
ch->used += res;
total += res;
c->rlbytes -= res;
} else {
/* Reset total to the latest result so caller can handle it */
total = res;
break;
}
}
}
/* At some point I will be able to ditch the \r\n from item storage and
remove all of these kludges.
The above binprot check ensures inline space for \r\n, but if we do
exactly enough allocs there will be no additional chunk for \r\n.
*/
if (c->rlbytes == 0 && c->protocol == binary_prot && total >= 0) {
item_chunk *ch = (item_chunk *)c->ritem;
if (ch->size - ch->used < 2) {
c->ritem = (char *) do_item_alloc_chunk(ch, 2);
if (!c->ritem) {
total = -2;
}
}
}
return total;
}
static void drive_machine(conn *c) {
bool stop = false;
int sfd;
socklen_t addrlen;
struct sockaddr_storage addr;
int nreqs = settings.reqs_per_event;
int res;
const char *str;
#ifdef HAVE_ACCEPT4
static int use_accept4 = 1;
#else
static int use_accept4 = 0;
#endif
assert(c != NULL);
while (!stop) {
switch(c->state) {
case conn_listening:
addrlen = sizeof(addr);
#ifdef HAVE_ACCEPT4
if (use_accept4) {
sfd = accept4(c->sfd, (struct sockaddr *)&addr, &addrlen, SOCK_NONBLOCK);
} else {
sfd = accept(c->sfd, (struct sockaddr *)&addr, &addrlen);
}
#else
sfd = accept(c->sfd, (struct sockaddr *)&addr, &addrlen);
#endif
if (sfd == -1) {
if (use_accept4 && errno == ENOSYS) {
use_accept4 = 0;
continue;
}
perror(use_accept4 ? "accept4()" : "accept()");
if (errno == EAGAIN || errno == EWOULDBLOCK) {
/* these are transient, so don't log anything */
stop = true;
} else if (errno == EMFILE) {
if (settings.verbose > 0)
fprintf(stderr, "Too many open connections\n");
accept_new_conns(false);
stop = true;
} else {
perror("accept()");
stop = true;
}
break;
}
if (!use_accept4) {
if (fcntl(sfd, F_SETFL, fcntl(sfd, F_GETFL) | O_NONBLOCK) < 0) {
perror("setting O_NONBLOCK");
close(sfd);
break;
}
}
bool reject;
if (settings.maxconns_fast) {
STATS_LOCK();
reject = stats_state.curr_conns + stats_state.reserved_fds >= settings.maxconns - 1;
if (reject) {
stats.rejected_conns++;
}
STATS_UNLOCK();
} else {
reject = false;
}
if (reject) {
str = "ERROR Too many open connections\r\n";
res = write(sfd, str, strlen(str));
close(sfd);
} else {
void *ssl_v = NULL;
#ifdef TLS
SSL *ssl = NULL;
if (c->ssl_enabled) {
assert(IS_TCP(c->transport) && settings.ssl_enabled);
if (settings.ssl_ctx == NULL) {
if (settings.verbose) {
fprintf(stderr, "SSL context is not initialized\n");
}
close(sfd);
break;
}
SSL_LOCK();
ssl = SSL_new(settings.ssl_ctx);
SSL_UNLOCK();
if (ssl == NULL) {
if (settings.verbose) {
fprintf(stderr, "Failed to created the SSL object\n");
}
close(sfd);
break;
}
SSL_set_fd(ssl, sfd);
int ret = SSL_accept(ssl);
if (ret <= 0) {
int err = SSL_get_error(ssl, ret);
if (err == SSL_ERROR_SYSCALL || err == SSL_ERROR_SSL) {
if (settings.verbose) {
fprintf(stderr, "SSL connection failed with error code : %d : %s\n", err, strerror(errno));
}
SSL_free(ssl);
close(sfd);
STATS_LOCK();
stats.ssl_handshake_errors++;
STATS_UNLOCK();
break;
}
}
}
ssl_v = (void*) ssl;
#endif
dispatch_conn_new(sfd, conn_new_cmd, EV_READ | EV_PERSIST,
READ_BUFFER_CACHED, c->transport, ssl_v);
}
stop = true;
break;
case conn_waiting:
rbuf_release(c);
if (!update_event(c, EV_READ | EV_PERSIST)) {
if (settings.verbose > 0)
fprintf(stderr, "Couldn't update event\n");
conn_set_state(c, conn_closing);
break;
}
conn_set_state(c, conn_read);
stop = true;
break;
case conn_read:
if (!IS_UDP(c->transport)) {
// Assign a read buffer if necessary.
if (!rbuf_alloc(c)) {
// TODO: Some way to allow for temporary failures.
conn_set_state(c, conn_closing);
break;
}
res = try_read_network(c);
} else {
// UDP connections always have a static buffer.
res = try_read_udp(c);
}
switch (res) {
case READ_NO_DATA_RECEIVED:
conn_set_state(c, conn_waiting);
break;
case READ_DATA_RECEIVED:
conn_set_state(c, conn_parse_cmd);
break;
case READ_ERROR:
conn_set_state(c, conn_closing);
break;
case READ_MEMORY_ERROR: /* Failed to allocate more memory */
/* State already set by try_read_network */
break;
}
break;
case conn_parse_cmd:
c->noreply = false;
if (c->try_read_command(c) == 0) {
/* wee need more data! */
if (c->resp_head) {
// Buffered responses waiting, flush in the meantime.
conn_set_state(c, conn_mwrite);
} else {
conn_set_state(c, conn_waiting);
}
}
break;
case conn_new_cmd:
/* Only process nreqs at a time to avoid starving other
connections */
--nreqs;
if (nreqs >= 0) {
reset_cmd_handler(c);
} else if (c->resp_head) {
// flush response pipe on yield.
conn_set_state(c, conn_mwrite);
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.conn_yields++;
pthread_mutex_unlock(&c->thread->stats.mutex);
if (c->rbytes > 0) {
/* We have already read in data into the input buffer,
so libevent will most likely not signal read events
on the socket (unless more data is available. As a
hack we should just put in a request to write data,
because that should be possible ;-)
*/
if (!update_event(c, EV_WRITE | EV_PERSIST)) {
if (settings.verbose > 0)
fprintf(stderr, "Couldn't update event\n");
conn_set_state(c, conn_closing);
break;
}
}
stop = true;
}
break;
case conn_nread:
if (c->rlbytes == 0) {
complete_nread(c);
break;
}
/* Check if rbytes < 0, to prevent crash */
if (c->rlbytes < 0) {
if (settings.verbose) {
fprintf(stderr, "Invalid rlbytes to read: len %d\n", c->rlbytes);
}
conn_set_state(c, conn_closing);
break;
}
if ((((item *)c->item)->it_flags & ITEM_CHUNKED) == 0) {
/* first check if we have leftovers in the conn_read buffer */
if (c->rbytes > 0) {
int tocopy = c->rbytes > c->rlbytes ? c->rlbytes : c->rbytes;
memmove(c->ritem, c->rcurr, tocopy);
c->ritem += tocopy;
c->rlbytes -= tocopy;
c->rcurr += tocopy;
c->rbytes -= tocopy;
if (c->rlbytes == 0) {
break;
}
}
/* now try reading from the socket */
res = c->read(c, c->ritem, c->rlbytes);
if (res > 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_read += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
if (c->rcurr == c->ritem) {
c->rcurr += res;
}
c->ritem += res;
c->rlbytes -= res;
break;
}
} else {
res = read_into_chunked_item(c);
if (res > 0)
break;
}
if (res == 0) { /* end of stream */
conn_set_state(c, conn_closing);
break;
}
if (res == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
if (!update_event(c, EV_READ | EV_PERSIST)) {
if (settings.verbose > 0)
fprintf(stderr, "Couldn't update event\n");
conn_set_state(c, conn_closing);
break;
}
stop = true;
break;
}
/* Memory allocation failure */
if (res == -2) {
out_of_memory(c, "SERVER_ERROR Out of memory during read");
c->sbytes = c->rlbytes;
conn_set_state(c, conn_swallow);
// Ensure this flag gets cleared. It gets killed on conn_new()
// so any conn_closing is fine, calling complete_nread is
// fine. This swallow semms to be the only other case.
c->set_stale = false;
c->mset_res = false;
break;
}
/* otherwise we have a real error, on which we close the connection */
if (settings.verbose > 0) {
fprintf(stderr, "Failed to read, and not due to blocking:\n"
"errno: %d %s \n"
"rcurr=%lx ritem=%lx rbuf=%lx rlbytes=%d rsize=%d\n",
errno, strerror(errno),
(long)c->rcurr, (long)c->ritem, (long)c->rbuf,
(int)c->rlbytes, (int)c->rsize);
}
conn_set_state(c, conn_closing);
break;
case conn_swallow:
/* we are reading sbytes and throwing them away */
if (c->sbytes <= 0) {
conn_set_state(c, conn_new_cmd);
break;
}
/* first check if we have leftovers in the conn_read buffer */
if (c->rbytes > 0) {
int tocopy = c->rbytes > c->sbytes ? c->sbytes : c->rbytes;
c->sbytes -= tocopy;
c->rcurr += tocopy;
c->rbytes -= tocopy;
break;
}
/* now try reading from the socket */
res = c->read(c, c->rbuf, c->rsize > c->sbytes ? c->sbytes : c->rsize);
if (res > 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_read += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
c->sbytes -= res;
break;
}
if (res == 0) { /* end of stream */
conn_set_state(c, conn_closing);
break;
}
if (res == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
if (!update_event(c, EV_READ | EV_PERSIST)) {
if (settings.verbose > 0)
fprintf(stderr, "Couldn't update event\n");
conn_set_state(c, conn_closing);
break;
}
stop = true;
break;
}
/* otherwise we have a real error, on which we close the connection */
if (settings.verbose > 0)
fprintf(stderr, "Failed to read, and not due to blocking\n");
conn_set_state(c, conn_closing);
break;
case conn_write:
case conn_mwrite:
#ifdef EXTSTORE
/* have side IO's that must process before transmit() can run.
* remove the connection from the worker thread and dispatch the
* IO queue
*/
if (c->io_wrapleft) {
assert(c->io_queued == false);
assert(c->io_wraplist != NULL);
// TODO: create proper state for this condition
conn_set_state(c, conn_watch);
event_del(&c->event);
c->io_queued = true;
extstore_submit(c->thread->storage, &c->io_wraplist->io);
stop = true;
break;
}
#endif
switch (!IS_UDP(c->transport) ? transmit(c) : transmit_udp(c)) {
case TRANSMIT_COMPLETE:
if (c->state == conn_mwrite) {
// Free up IO wraps and any half-uploaded items.
conn_release_items(c);
conn_set_state(c, conn_new_cmd);
if (c->close_after_write) {
conn_set_state(c, conn_closing);
}
} else {
if (settings.verbose > 0)
fprintf(stderr, "Unexpected state %d\n", c->state);
conn_set_state(c, conn_closing);
}
break;
case TRANSMIT_INCOMPLETE:
case TRANSMIT_HARD_ERROR:
break; /* Continue in state machine. */
case TRANSMIT_SOFT_ERROR:
stop = true;
break;
}
break;
case conn_closing:
if (IS_UDP(c->transport))
conn_cleanup(c);
else
conn_close(c);
stop = true;
break;
case conn_closed:
/* This only happens if dormando is an idiot. */
abort();
break;
case conn_watch:
/* We handed off our connection to the logger thread. */
stop = true;
break;
case conn_max_state:
assert(false);
break;
}
}
return;
}
void event_handler(const int fd, const short which, void *arg) {
conn *c;
c = (conn *)arg;
assert(c != NULL);
c->which = which;
/* sanity */
if (fd != c->sfd) {
if (settings.verbose > 0)
fprintf(stderr, "Catastrophic: event fd doesn't match conn fd!\n");
conn_close(c);
return;
}
drive_machine(c);
/* wait for next event */
return;
}
static int new_socket(struct addrinfo *ai) {
int sfd;
int flags;
if ((sfd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol)) == -1) {
return -1;
}
if ((flags = fcntl(sfd, F_GETFL, 0)) < 0 ||
fcntl(sfd, F_SETFL, flags | O_NONBLOCK) < 0) {
perror("setting O_NONBLOCK");
close(sfd);
return -1;
}
return sfd;
}
/*
* Sets a socket's send buffer size to the maximum allowed by the system.
*/
static void maximize_sndbuf(const int sfd) {
socklen_t intsize = sizeof(int);
int last_good = 0;
int min, max, avg;
int old_size;
/* Start with the default size. */
if (getsockopt(sfd, SOL_SOCKET, SO_SNDBUF, &old_size, &intsize) != 0) {
if (settings.verbose > 0)
perror("getsockopt(SO_SNDBUF)");
return;
}
/* Binary-search for the real maximum. */
min = old_size;
max = MAX_SENDBUF_SIZE;
while (min <= max) {
avg = ((unsigned int)(min + max)) / 2;
if (setsockopt(sfd, SOL_SOCKET, SO_SNDBUF, (void *)&avg, intsize) == 0) {
last_good = avg;
min = avg + 1;
} else {
max = avg - 1;
}
}
if (settings.verbose > 1)
fprintf(stderr, "<%d send buffer was %d, now %d\n", sfd, old_size, last_good);
}
/**
* Create a socket and bind it to a specific port number
* @param interface the interface to bind to
* @param port the port number to bind to
* @param transport the transport protocol (TCP / UDP)
* @param portnumber_file A filepointer to write the port numbers to
* when they are successfully added to the list of ports we
* listen on.
*/
static int server_socket(const char *interface,
int port,
enum network_transport transport,
FILE *portnumber_file, bool ssl_enabled) {
int sfd;
struct linger ling = {0, 0};
struct addrinfo *ai;
struct addrinfo *next;
struct addrinfo hints = { .ai_flags = AI_PASSIVE,
.ai_family = AF_UNSPEC };
char port_buf[NI_MAXSERV];
int error;
int success = 0;
int flags =1;
hints.ai_socktype = IS_UDP(transport) ? SOCK_DGRAM : SOCK_STREAM;
if (port == -1) {
port = 0;
}
snprintf(port_buf, sizeof(port_buf), "%d", port);
error= getaddrinfo(interface, port_buf, &hints, &ai);
if (error != 0) {
if (error != EAI_SYSTEM)
fprintf(stderr, "getaddrinfo(): %s\n", gai_strerror(error));
else
perror("getaddrinfo()");
return 1;
}
for (next= ai; next; next= next->ai_next) {
conn *listen_conn_add;
if ((sfd = new_socket(next)) == -1) {
/* getaddrinfo can return "junk" addresses,
* we make sure at least one works before erroring.
*/
if (errno == EMFILE) {
/* ...unless we're out of fds */
perror("server_socket");
exit(EX_OSERR);
}
continue;
}
#ifdef IPV6_V6ONLY
if (next->ai_family == AF_INET6) {
error = setsockopt(sfd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &flags, sizeof(flags));
if (error != 0) {
perror("setsockopt");
close(sfd);
continue;
}
}
#endif
setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void *)&flags, sizeof(flags));
if (IS_UDP(transport)) {
maximize_sndbuf(sfd);
} else {
error = setsockopt(sfd, SOL_SOCKET, SO_KEEPALIVE, (void *)&flags, sizeof(flags));
if (error != 0)
perror("setsockopt");
error = setsockopt(sfd, SOL_SOCKET, SO_LINGER, (void *)&ling, sizeof(ling));
if (error != 0)
perror("setsockopt");
error = setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, (void *)&flags, sizeof(flags));
if (error != 0)
perror("setsockopt");
}
if (bind(sfd, next->ai_addr, next->ai_addrlen) == -1) {
if (errno != EADDRINUSE) {
perror("bind()");
close(sfd);
freeaddrinfo(ai);
return 1;
}
close(sfd);
continue;
} else {
success++;
if (!IS_UDP(transport) && listen(sfd, settings.backlog) == -1) {
perror("listen()");
close(sfd);
freeaddrinfo(ai);
return 1;
}
if (portnumber_file != NULL &&
(next->ai_addr->sa_family == AF_INET ||
next->ai_addr->sa_family == AF_INET6)) {
union {
struct sockaddr_in in;
struct sockaddr_in6 in6;
} my_sockaddr;
socklen_t len = sizeof(my_sockaddr);
if (getsockname(sfd, (struct sockaddr*)&my_sockaddr, &len)==0) {
if (next->ai_addr->sa_family == AF_INET) {
fprintf(portnumber_file, "%s INET: %u\n",
IS_UDP(transport) ? "UDP" : "TCP",
ntohs(my_sockaddr.in.sin_port));
} else {
fprintf(portnumber_file, "%s INET6: %u\n",
IS_UDP(transport) ? "UDP" : "TCP",
ntohs(my_sockaddr.in6.sin6_port));
}
}
}
}
if (IS_UDP(transport)) {
int c;
for (c = 0; c < settings.num_threads_per_udp; c++) {
/* Allocate one UDP file descriptor per worker thread;
* this allows "stats conns" to separately list multiple
* parallel UDP requests in progress.
*
* The dispatch code round-robins new connection requests
* among threads, so this is guaranteed to assign one
* FD to each thread.
*/
int per_thread_fd;
if (c == 0) {
per_thread_fd = sfd;
} else {
per_thread_fd = dup(sfd);
if (per_thread_fd < 0) {
perror("Failed to duplicate file descriptor");
exit(EXIT_FAILURE);
}
}
dispatch_conn_new(per_thread_fd, conn_read,
EV_READ | EV_PERSIST,
UDP_READ_BUFFER_SIZE, transport, NULL);
}
} else {
if (!(listen_conn_add = conn_new(sfd, conn_listening,
EV_READ | EV_PERSIST, 1,
transport, main_base, NULL))) {
fprintf(stderr, "failed to create listening connection\n");
exit(EXIT_FAILURE);
}
#ifdef TLS
listen_conn_add->ssl_enabled = ssl_enabled;
#else
assert(ssl_enabled == false);
#endif
listen_conn_add->next = listen_conn;
listen_conn = listen_conn_add;
}
}
freeaddrinfo(ai);
/* Return zero iff we detected no errors in starting up connections */
return success == 0;
}
static int server_sockets(int port, enum network_transport transport,
FILE *portnumber_file) {
bool ssl_enabled = false;
#ifdef TLS
const char *notls = "notls";
ssl_enabled = settings.ssl_enabled;
#endif
if (settings.inter == NULL) {
return server_socket(settings.inter, port, transport, portnumber_file, ssl_enabled);
} else {
// tokenize them and bind to each one of them..
char *b;
int ret = 0;
char *list = strdup(settings.inter);
if (list == NULL) {
fprintf(stderr, "Failed to allocate memory for parsing server interface string\n");
return 1;
}
for (char *p = strtok_r(list, ";,", &b);
p != NULL;
p = strtok_r(NULL, ";,", &b)) {
int the_port = port;
#ifdef TLS
ssl_enabled = settings.ssl_enabled;
// "notls" option is valid only when memcached is run with SSL enabled.
if (strncmp(p, notls, strlen(notls)) == 0) {
if (!settings.ssl_enabled) {
fprintf(stderr, "'notls' option is valid only when SSL is enabled\n");
free(list);
return 1;
}
ssl_enabled = false;
p += strlen(notls) + 1;
}
#endif
char *h = NULL;
if (*p == '[') {
// expecting it to be an IPv6 address enclosed in []
// i.e. RFC3986 style recommended by RFC5952
char *e = strchr(p, ']');
if (e == NULL) {
fprintf(stderr, "Invalid IPV6 address: \"%s\"", p);
free(list);
return 1;
}
h = ++p; // skip the opening '['
*e = '\0';
p = ++e; // skip the closing ']'
}
char *s = strchr(p, ':');
if (s != NULL) {
// If no more semicolons - attempt to treat as port number.
// Otherwise the only valid option is an unenclosed IPv6 without port, until
// of course there was an RFC3986 IPv6 address previously specified -
// in such a case there is no good option, will just send it to fail as port number.
if (strchr(s + 1, ':') == NULL || h != NULL) {
*s = '\0';
++s;
if (!safe_strtol(s, &the_port)) {
fprintf(stderr, "Invalid port number: \"%s\"", s);
free(list);
return 1;
}
}
}
if (h != NULL)
p = h;
if (strcmp(p, "*") == 0) {
p = NULL;
}
ret |= server_socket(p, the_port, transport, portnumber_file, ssl_enabled);
}
free(list);
return ret;
}
}
static int new_socket_unix(void) {
int sfd;
int flags;
if ((sfd = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) {
perror("socket()");
return -1;
}
if ((flags = fcntl(sfd, F_GETFL, 0)) < 0 ||
fcntl(sfd, F_SETFL, flags | O_NONBLOCK) < 0) {
perror("setting O_NONBLOCK");
close(sfd);
return -1;
}
return sfd;
}
static int server_socket_unix(const char *path, int access_mask) {
int sfd;
struct linger ling = {0, 0};
struct sockaddr_un addr;
struct stat tstat;
int flags =1;
int old_umask;
if (!path) {
return 1;
}
if ((sfd = new_socket_unix()) == -1) {
return 1;
}
/*
* Clean up a previous socket file if we left it around
*/
if (lstat(path, &tstat) == 0) {
if (S_ISSOCK(tstat.st_mode))
unlink(path);
}
setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void *)&flags, sizeof(flags));
setsockopt(sfd, SOL_SOCKET, SO_KEEPALIVE, (void *)&flags, sizeof(flags));
setsockopt(sfd, SOL_SOCKET, SO_LINGER, (void *)&ling, sizeof(ling));
/*
* the memset call clears nonstandard fields in some implementations
* that otherwise mess things up.
*/
memset(&addr, 0, sizeof(addr));
addr.sun_family = AF_UNIX;
strncpy(addr.sun_path, path, sizeof(addr.sun_path) - 1);
assert(strcmp(addr.sun_path, path) == 0);
old_umask = umask( ~(access_mask&0777));
if (bind(sfd, (struct sockaddr *)&addr, sizeof(addr)) == -1) {
perror("bind()");
close(sfd);
umask(old_umask);
return 1;
}
umask(old_umask);
if (listen(sfd, settings.backlog) == -1) {
perror("listen()");
close(sfd);
return 1;
}
if (!(listen_conn = conn_new(sfd, conn_listening,
EV_READ | EV_PERSIST, 1,
local_transport, main_base, NULL))) {
fprintf(stderr, "failed to create listening connection\n");
exit(EXIT_FAILURE);
}
return 0;
}
/*
* We keep the current time of day in a global variable that's updated by a
* timer event. This saves us a bunch of time() system calls (we really only
* need to get the time once a second, whereas there can be tens of thousands
* of requests a second) and allows us to use server-start-relative timestamps
* rather than absolute UNIX timestamps, a space savings on systems where
* sizeof(time_t) > sizeof(unsigned int).
*/
volatile rel_time_t current_time;
static struct event clockevent;
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
static bool monotonic = false;
static int64_t monotonic_start;
#endif
/* libevent uses a monotonic clock when available for event scheduling. Aside
* from jitter, simply ticking our internal timer here is accurate enough.
* Note that users who are setting explicit dates for expiration times *must*
* ensure their clocks are correct before starting memcached. */
static void clock_handler(const int fd, const short which, void *arg) {
struct timeval t = {.tv_sec = 1, .tv_usec = 0};
static bool initialized = false;
if (initialized) {
/* only delete the event if it's actually there. */
evtimer_del(&clockevent);
} else {
initialized = true;
}
// While we're here, check for hash table expansion.
// This function should be quick to avoid delaying the timer.
assoc_start_expand(stats_state.curr_items);
// also, if HUP'ed we need to do some maintenance.
// for now that's just the authfile reload.
if (settings.sig_hup) {
settings.sig_hup = false;
authfile_load(settings.auth_file);
}
evtimer_set(&clockevent, clock_handler, 0);
event_base_set(main_base, &clockevent);
evtimer_add(&clockevent, &t);
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
if (monotonic) {
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
return;
current_time = (rel_time_t) (ts.tv_sec - monotonic_start);
return;
}
#endif
{
struct timeval tv;
gettimeofday(&tv, NULL);
current_time = (rel_time_t) (tv.tv_sec - process_started);
}
}
static const char* flag_enabled_disabled(bool flag) {
return (flag ? "enabled" : "disabled");
}
static void verify_default(const char* param, bool condition) {
if (!condition) {
printf("Default value of [%s] has changed."
" Modify the help text and default value check.\n", param);
exit(EXIT_FAILURE);
}
}
static void usage(void) {
printf(PACKAGE " " VERSION "\n");
printf("-p, --port=<num> TCP port to listen on (default: %d)\n"
"-U, --udp-port=<num> UDP port to listen on (default: %d, off)\n"
"-s, --unix-socket=<file> UNIX socket to listen on (disables network support)\n"
"-A, --enable-shutdown enable ascii \"shutdown\" command\n"
"-a, --unix-mask=<mask> access mask for UNIX socket, in octal (default: %o)\n"
"-l, --listen=<addr> interface to listen on (default: INADDR_ANY)\n"
#ifdef TLS
" if TLS/SSL is enabled, 'notls' prefix can be used to\n"
" disable for specific listeners (-l notls:<ip>:<port>) \n"
#endif
"-d, --daemon run as a daemon\n"
"-r, --enable-coredumps maximize core file limit\n"
"-u, --user=<user> assume identity of <username> (only when run as root)\n"
"-m, --memory-limit=<num> item memory in megabytes (default: %lu)\n"
"-M, --disable-evictions return error on memory exhausted instead of evicting\n"
"-c, --conn-limit=<num> max simultaneous connections (default: %d)\n"
"-k, --lock-memory lock down all paged memory\n"
"-v, --verbose verbose (print errors/warnings while in event loop)\n"
"-vv very verbose (also print client commands/responses)\n"
"-vvv extremely verbose (internal state transitions)\n"
"-h, --help print this help and exit\n"
"-i, --license print memcached and libevent license\n"
"-V, --version print version and exit\n"
"-P, --pidfile=<file> save PID in <file>, only used with -d option\n"
"-f, --slab-growth-factor=<num> chunk size growth factor (default: %2.2f)\n"
"-n, --slab-min-size=<bytes> min space used for key+value+flags (default: %d)\n",
settings.port, settings.udpport, settings.access, (unsigned long) settings.maxbytes / (1 << 20),
settings.maxconns, settings.factor, settings.chunk_size);
verify_default("udp-port",settings.udpport == 0);
printf("-L, --enable-largepages try to use large memory pages (if available)\n");
printf("-D <char> Use <char> as the delimiter between key prefixes and IDs.\n"
" This is used for per-prefix stats reporting. The default is\n"
" \"%c\" (colon). If this option is specified, stats collection\n"
" is turned on automatically; if not, then it may be turned on\n"
" by sending the \"stats detail on\" command to the server.\n",
settings.prefix_delimiter);
printf("-t, --threads=<num> number of threads to use (default: %d)\n", settings.num_threads);
printf("-R, --max-reqs-per-event maximum number of requests per event, limits the\n"
" requests processed per connection to prevent \n"
" starvation (default: %d)\n", settings.reqs_per_event);
printf("-C, --disable-cas disable use of CAS\n");
printf("-b, --listen-backlog=<num> set the backlog queue limit (default: %d)\n", settings.backlog);
printf("-B, --protocol=<name> protocol - one of ascii, binary, or auto (default: %s)\n",
prot_text(settings.binding_protocol));
printf("-I, --max-item-size=<num> adjusts max item size\n"
" (default: %dm, min: %dk, max: %dm)\n",
settings.item_size_max/ (1 << 20), ITEM_SIZE_MAX_LOWER_LIMIT / (1 << 10), ITEM_SIZE_MAX_UPPER_LIMIT / (1 << 20));
#ifdef ENABLE_SASL
printf("-S, --enable-sasl turn on Sasl authentication\n");
#endif
printf("-F, --disable-flush-all disable flush_all command\n");
printf("-X, --disable-dumping disable stats cachedump and lru_crawler metadump\n");
printf("-W --disable-watch disable watch commands (live logging)\n");
printf("-Y, --auth-file=<file> (EXPERIMENTAL) enable ASCII protocol authentication. format:\n"
" user:pass\\nuser2:pass2\\n\n");
printf("-e, --memory-file=<file> (EXPERIMENTAL) mmap a file for item memory.\n"
" use only in ram disks or persistent memory mounts!\n"
" enables restartable cache (stop with SIGUSR1)\n");
#ifdef TLS
printf("-Z, --enable-ssl enable TLS/SSL\n");
#endif
printf("-o, --extended comma separated list of extended options\n"
" most options have a 'no_' prefix to disable\n"
" - maxconns_fast: immediately close new connections after limit (default: %s)\n"
" - hashpower: an integer multiplier for how large the hash\n"
" table should be. normally grows at runtime. (default starts at: %d)\n"
" set based on \"STAT hash_power_level\"\n"
" - tail_repair_time: time in seconds for how long to wait before\n"
" forcefully killing LRU tail item.\n"
" disabled by default; very dangerous option.\n"
" - hash_algorithm: the hash table algorithm\n"
" default is murmur3 hash. options: jenkins, murmur3\n"
" - no_lru_crawler: disable LRU Crawler background thread.\n"
" - lru_crawler_sleep: microseconds to sleep between items\n"
" default is %d.\n"
" - lru_crawler_tocrawl: max items to crawl per slab per run\n"
" default is %u (unlimited)\n",
flag_enabled_disabled(settings.maxconns_fast), settings.hashpower_init,
settings.lru_crawler_sleep, settings.lru_crawler_tocrawl);
printf(" - resp_obj_mem_limit: limit in megabytes for connection response objects.\n"
" do not adjust unless you have high (100k+) conn. limits.\n"
" 0 means unlimited (default: %u)\n"
" - read_buf_mem_limit: limit in megabytes for connection read buffers.\n"
" do not adjust unless you have high (100k+) conn. limits.\n"
" 0 means unlimited (default: %u)\n",
settings.resp_obj_mem_limit,
settings.read_buf_mem_limit);
verify_default("resp_obj_mem_limit", settings.resp_obj_mem_limit == 0);
verify_default("read_buf_mem_limit", settings.read_buf_mem_limit == 0);
printf(" - no_lru_maintainer: disable new LRU system + background thread.\n"
" - hot_lru_pct: pct of slab memory to reserve for hot lru.\n"
" (requires lru_maintainer, default pct: %d)\n"
" - warm_lru_pct: pct of slab memory to reserve for warm lru.\n"
" (requires lru_maintainer, default pct: %d)\n"
" - hot_max_factor: items idle > cold lru age * drop from hot lru. (default: %.2f)\n"
" - warm_max_factor: items idle > cold lru age * this drop from warm. (default: %.2f)\n"
" - temporary_ttl: TTL's below get separate LRU, can't be evicted.\n"
" (requires lru_maintainer, default: %d)\n"
" - idle_timeout: timeout for idle connections. (default: %d, no timeout)\n",
settings.hot_lru_pct, settings.warm_lru_pct, settings.hot_max_factor, settings.warm_max_factor,
settings.temporary_ttl, settings.idle_timeout);
printf(" - slab_chunk_max: (EXPERIMENTAL) maximum slab size in kilobytes. use extreme care. (default: %d)\n"
" - watcher_logbuf_size: size in kilobytes of per-watcher write buffer. (default: %u)\n"
" - worker_logbuf_size: size in kilobytes of per-worker-thread buffer\n"
" read by background thread, then written to watchers. (default: %u)\n"
" - track_sizes: enable dynamic reports for 'stats sizes' command.\n"
" - no_hashexpand: disables hash table expansion (dangerous)\n"
" - modern: enables options which will be default in future.\n"
" currently: nothing\n"
" - no_modern: uses defaults of previous major version (1.4.x)\n",
settings.slab_chunk_size_max / (1 << 10), settings.logger_watcher_buf_size / (1 << 10),
settings.logger_buf_size / (1 << 10));
verify_default("tail_repair_time", settings.tail_repair_time == TAIL_REPAIR_TIME_DEFAULT);
verify_default("lru_crawler_tocrawl", settings.lru_crawler_tocrawl == 0);
verify_default("idle_timeout", settings.idle_timeout == 0);
#ifdef HAVE_DROP_PRIVILEGES
printf(" - drop_privileges: enable dropping extra syscall privileges\n"
" - no_drop_privileges: disable drop_privileges in case it causes issues with\n"
" some customisation.\n"
" (default is no_drop_privileges)\n");
verify_default("drop_privileges", !settings.drop_privileges);
#ifdef MEMCACHED_DEBUG
printf(" - relaxed_privileges: running tests requires extra privileges. (default: %s)\n",
flag_enabled_disabled(settings.relaxed_privileges));
#endif
#endif
#ifdef EXTSTORE
printf("\n - External storage (ext_*) related options (see: https://memcached.org/extstore)\n");
printf(" - ext_path: file to write to for external storage.\n"
" ie: ext_path=/mnt/d1/extstore:1G\n"
" - ext_page_size: size in megabytes of storage pages. (default: %u)\n"
" - ext_wbuf_size: size in megabytes of page write buffers. (default: %u)\n"
" - ext_threads: number of IO threads to run. (default: %u)\n"
" - ext_item_size: store items larger than this (bytes, default %u)\n"
" - ext_item_age: store items idle at least this long (seconds, default: no age limit)\n"
" - ext_low_ttl: consider TTLs lower than this specially (default: %u)\n"
" - ext_drop_unread: don't re-write unread values during compaction (default: %s)\n"
" - ext_recache_rate: recache an item every N accesses (default: %u)\n"
" - ext_compact_under: compact when fewer than this many free pages\n"
" (default: 1/4th of the assigned storage)\n"
" - ext_drop_under: drop COLD items when fewer than this many free pages\n"
" (default: 1/4th of the assigned storage)\n"
" - ext_max_frag: max page fragmentation to tolerate (default: %.2f)\n"
" - slab_automove_freeratio: ratio of memory to hold free as buffer.\n"
" (see doc/storage.txt for more info, default: %.3f)\n",
settings.ext_page_size / (1 << 20), settings.ext_wbuf_size / (1 << 20), settings.ext_io_threadcount,
settings.ext_item_size, settings.ext_low_ttl,
flag_enabled_disabled(settings.ext_drop_unread), settings.ext_recache_rate,
settings.ext_max_frag, settings.slab_automove_freeratio);
verify_default("ext_item_age", settings.ext_item_age == UINT_MAX);
#endif
#ifdef TLS
printf(" - ssl_chain_cert: certificate chain file in PEM format\n"
" - ssl_key: private key, if not part of the -ssl_chain_cert\n"
" - ssl_keyformat: private key format (PEM, DER or ENGINE) (default: PEM)\n");
printf(" - ssl_verify_mode: peer certificate verification mode, default is 0(None).\n"
" valid values are 0(None), 1(Request), 2(Require)\n"
" or 3(Once)\n");
printf(" - ssl_ciphers: specify cipher list to be used\n"
" - ssl_ca_cert: PEM format file of acceptable client CA's\n"
" - ssl_wbuf_size: size in kilobytes of per-connection SSL output buffer\n"
" (default: %u)\n", settings.ssl_wbuf_size / (1 << 10));
verify_default("ssl_keyformat", settings.ssl_keyformat == SSL_FILETYPE_PEM);
verify_default("ssl_verify_mode", settings.ssl_verify_mode == SSL_VERIFY_NONE);
#endif
return;
}
static void usage_license(void) {
printf(PACKAGE " " VERSION "\n\n");
printf(
"Copyright (c) 2003, Danga Interactive, Inc. <http://www.danga.com/>\n"
"All rights reserved.\n"
"\n"
"Redistribution and use in source and binary forms, with or without\n"
"modification, are permitted provided that the following conditions are\n"
"met:\n"
"\n"
" * Redistributions of source code must retain the above copyright\n"
"notice, this list of conditions and the following disclaimer.\n"
"\n"
" * Redistributions in binary form must reproduce the above\n"
"copyright notice, this list of conditions and the following disclaimer\n"
"in the documentation and/or other materials provided with the\n"
"distribution.\n"
"\n"
" * Neither the name of the Danga Interactive nor the names of its\n"
"contributors may be used to endorse or promote products derived from\n"
"this software without specific prior written permission.\n"
"\n"
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n"
"\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n"
"LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n"
"A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n"
"OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n"
"SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n"
"LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n"
"DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n"
"THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n"
"OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
"\n"
"\n"
"This product includes software developed by Niels Provos.\n"
"\n"
"[ libevent ]\n"
"\n"
"Copyright 2000-2003 Niels Provos <provos@citi.umich.edu>\n"
"All rights reserved.\n"
"\n"
"Redistribution and use in source and binary forms, with or without\n"
"modification, are permitted provided that the following conditions\n"
"are met:\n"
"1. Redistributions of source code must retain the above copyright\n"
" notice, this list of conditions and the following disclaimer.\n"
"2. Redistributions in binary form must reproduce the above copyright\n"
" notice, this list of conditions and the following disclaimer in the\n"
" documentation and/or other materials provided with the distribution.\n"
"3. All advertising materials mentioning features or use of this software\n"
" must display the following acknowledgement:\n"
" This product includes software developed by Niels Provos.\n"
"4. The name of the author may not be used to endorse or promote products\n"
" derived from this software without specific prior written permission.\n"
"\n"
"THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n"
"IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n"
"OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n"
"IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n"
"INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n"
"NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n"
"DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n"
"THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n"
"THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
);
return;
}
static void save_pid(const char *pid_file) {
FILE *fp;
if (access(pid_file, F_OK) == 0) {
if ((fp = fopen(pid_file, "r")) != NULL) {
char buffer[1024];
if (fgets(buffer, sizeof(buffer), fp) != NULL) {
unsigned int pid;
if (safe_strtoul(buffer, &pid) && kill((pid_t)pid, 0) == 0) {
fprintf(stderr, "WARNING: The pid file contained the following (running) pid: %u\n", pid);
}
}
fclose(fp);
}
}
/* Create the pid file first with a temporary name, then
* atomically move the file to the real name to avoid a race with
* another process opening the file to read the pid, but finding
* it empty.
*/
char tmp_pid_file[1024];
snprintf(tmp_pid_file, sizeof(tmp_pid_file), "%s.tmp", pid_file);
if ((fp = fopen(tmp_pid_file, "w")) == NULL) {
vperror("Could not open the pid file %s for writing", tmp_pid_file);
return;
}
fprintf(fp,"%ld\n", (long)getpid());
if (fclose(fp) == -1) {
vperror("Could not close the pid file %s", tmp_pid_file);
}
if (rename(tmp_pid_file, pid_file) != 0) {
vperror("Could not rename the pid file from %s to %s",
tmp_pid_file, pid_file);
}
}
static void remove_pidfile(const char *pid_file) {
if (pid_file == NULL)
return;
if (unlink(pid_file) != 0) {
vperror("Could not remove the pid file %s", pid_file);
}
}
static void sig_handler(const int sig) {
printf("Signal handled: %s.\n", strsignal(sig));
exit(EXIT_SUCCESS);
}
static void sighup_handler(const int sig) {
settings.sig_hup = true;
}
static void sig_usrhandler(const int sig) {
printf("Graceful shutdown signal handled: %s.\n", strsignal(sig));
stop_main_loop = true;
}
#ifndef HAVE_SIGIGNORE
static int sigignore(int sig) {
struct sigaction sa = { .sa_handler = SIG_IGN, .sa_flags = 0 };
if (sigemptyset(&sa.sa_mask) == -1 || sigaction(sig, &sa, 0) == -1) {
return -1;
}
return 0;
}
#endif
/*
* On systems that supports multiple page sizes we may reduce the
* number of TLB-misses by using the biggest available page size
*/
static int enable_large_pages(void) {
#if defined(HAVE_GETPAGESIZES) && defined(HAVE_MEMCNTL)
int ret = -1;
size_t sizes[32];
int avail = getpagesizes(sizes, 32);
if (avail != -1) {
size_t max = sizes[0];
struct memcntl_mha arg = {0};
int ii;
for (ii = 1; ii < avail; ++ii) {
if (max < sizes[ii]) {
max = sizes[ii];
}
}
arg.mha_flags = 0;
arg.mha_pagesize = max;
arg.mha_cmd = MHA_MAPSIZE_BSSBRK;
if (memcntl(0, 0, MC_HAT_ADVISE, (caddr_t)&arg, 0, 0) == -1) {
fprintf(stderr, "Failed to set large pages: %s\n",
strerror(errno));
fprintf(stderr, "Will use default page size\n");
} else {
ret = 0;
}
} else {
fprintf(stderr, "Failed to get supported pagesizes: %s\n",
strerror(errno));
fprintf(stderr, "Will use default page size\n");
}
return ret;
#elif defined(__linux__) && defined(MADV_HUGEPAGE)
/* check if transparent hugepages is compiled into the kernel */
struct stat st;
int ret = stat("/sys/kernel/mm/transparent_hugepage/enabled", &st);
if (ret || !(st.st_mode & S_IFREG)) {
fprintf(stderr, "Transparent huge pages support not detected.\n");
fprintf(stderr, "Will use default page size.\n");
return -1;
}
return 0;
#elif defined(__FreeBSD__)
int spages;
size_t spagesl = sizeof(spages);
if (sysctlbyname("vm.pmap.pg_ps_enabled", &spages,
&spagesl, NULL, 0) != 0) {
fprintf(stderr, "Could not evaluate the presence of superpages features.");
return -1;
}
if (spages != 1) {
fprintf(stderr, "Superpages support not detected.\n");
fprintf(stderr, "Will use default page size.\n");
return -1;
}
return 0;
#else
return -1;
#endif
}
/**
* Do basic sanity check of the runtime environment
* @return true if no errors found, false if we can't use this env
*/
static bool sanitycheck(void) {
/* One of our biggest problems is old and bogus libevents */
const char *ever = event_get_version();
if (ever != NULL) {
if (strncmp(ever, "1.", 2) == 0) {
/* Require at least 1.3 (that's still a couple of years old) */
if (('0' <= ever[2] && ever[2] < '3') && !isdigit(ever[3])) {
fprintf(stderr, "You are using libevent %s.\nPlease upgrade to"
" a more recent version (1.3 or newer)\n",
event_get_version());
return false;
}
}
}
return true;
}
static bool _parse_slab_sizes(char *s, uint32_t *slab_sizes) {
char *b = NULL;
uint32_t size = 0;
int i = 0;
uint32_t last_size = 0;
if (strlen(s) < 1)
return false;
for (char *p = strtok_r(s, "-", &b);
p != NULL;
p = strtok_r(NULL, "-", &b)) {
if (!safe_strtoul(p, &size) || size < settings.chunk_size
|| size > settings.slab_chunk_size_max) {
fprintf(stderr, "slab size %u is out of valid range\n", size);
return false;
}
if (last_size >= size) {
fprintf(stderr, "slab size %u cannot be lower than or equal to a previous class size\n", size);
return false;
}
if (size <= last_size + CHUNK_ALIGN_BYTES) {
fprintf(stderr, "slab size %u must be at least %d bytes larger than previous class\n",
size, CHUNK_ALIGN_BYTES);
return false;
}
slab_sizes[i++] = size;
last_size = size;
if (i >= MAX_NUMBER_OF_SLAB_CLASSES-1) {
fprintf(stderr, "too many slab classes specified\n");
return false;
}
}
slab_sizes[i] = 0;
return true;
}
struct _mc_meta_data {
void *mmap_base;
uint64_t old_base;
char *slab_config; // string containing either factor or custom slab list.
int64_t time_delta;
uint64_t process_started;
uint32_t current_time;
};
// We need to remember a combination of configuration settings and global
// state for restart viability and resumption of internal services.
// Compared to the number of tunables and state values, relatively little
// does need to be remembered.
// Time is the hardest; we have to assume the sys clock is correct and re-sync for
// the lost time after restart.
static int _mc_meta_save_cb(const char *tag, void *ctx, void *data) {
struct _mc_meta_data *meta = (struct _mc_meta_data *)data;
// Settings to remember.
// TODO: should get a version of version which is numeric, else
// comparisons for compat reasons are difficult.
// it may be possible to punt on this for now; since we can test for the
// absense of another key... such as the new numeric version.
//restart_set_kv(ctx, "version", "%s", VERSION);
// We hold the original factor or subopts _string_
// it can be directly compared without roundtripping through floats or
// serializing/deserializing the long options list.
restart_set_kv(ctx, "slab_config", "%s", meta->slab_config);
restart_set_kv(ctx, "maxbytes", "%llu", (unsigned long long) settings.maxbytes);
restart_set_kv(ctx, "chunk_size", "%d", settings.chunk_size);
restart_set_kv(ctx, "item_size_max", "%d", settings.item_size_max);
restart_set_kv(ctx, "slab_chunk_size_max", "%d", settings.slab_chunk_size_max);
restart_set_kv(ctx, "slab_page_size", "%d", settings.slab_page_size);
restart_set_kv(ctx, "use_cas", "%s", settings.use_cas ? "true" : "false");
restart_set_kv(ctx, "slab_reassign", "%s", settings.slab_reassign ? "true" : "false");
// Online state to remember.
// current time is tough. we need to rely on the clock being correct to
// pull the delta between stop and start times. we also need to know the
// delta between start time and now to restore monotonic clocks.
// for non-monotonic clocks (some OS?), process_started is the only
// important one.
restart_set_kv(ctx, "current_time", "%u", current_time);
// types are great until... this. some systems time_t could be big, but
// I'm assuming never negative.
restart_set_kv(ctx, "process_started", "%llu", (unsigned long long) process_started);
{
struct timeval tv;
gettimeofday(&tv, NULL);
restart_set_kv(ctx, "stop_time", "%lu", tv.tv_sec);
}
// Might as well just fetch the next CAS value to use than tightly
// coupling the internal variable into the restart system.
restart_set_kv(ctx, "current_cas", "%llu", (unsigned long long) get_cas_id());
restart_set_kv(ctx, "oldest_cas", "%llu", (unsigned long long) settings.oldest_cas);
restart_set_kv(ctx, "logger_gid", "%llu", logger_get_gid());
restart_set_kv(ctx, "hashpower", "%u", stats_state.hash_power_level);
// NOTE: oldest_live is a rel_time_t, which aliases for unsigned int.
// should future proof this with a 64bit upcast, or fetch value from a
// converter function/macro?
restart_set_kv(ctx, "oldest_live", "%u", settings.oldest_live);
// TODO: use uintptr_t etc? is it portable enough?
restart_set_kv(ctx, "mmap_oldbase", "%p", meta->mmap_base);
return 0;
}
// We must see at least this number of checked lines. Else empty/missing lines
// could cause a false-positive.
// TODO: Once crc32'ing of the metadata file is done this could be ensured better by
// the restart module itself (crc32 + count of lines must match on the
// backend)
#define RESTART_REQUIRED_META 17
// With this callback we make a decision on if the current configuration
// matches up enough to allow reusing the cache.
// We also re-load important runtime information.
static int _mc_meta_load_cb(const char *tag, void *ctx, void *data) {
struct _mc_meta_data *meta = (struct _mc_meta_data *)data;
char *key;
char *val;
int reuse_mmap = 0;
meta->process_started = 0;
meta->time_delta = 0;
meta->current_time = 0;
int lines_seen = 0;
// TODO: not sure this is any better than just doing an if/else tree with
// strcmp's...
enum {
R_MMAP_OLDBASE = 0,
R_MAXBYTES,
R_CHUNK_SIZE,
R_ITEM_SIZE_MAX,
R_SLAB_CHUNK_SIZE_MAX,
R_SLAB_PAGE_SIZE,
R_SLAB_CONFIG,
R_USE_CAS,
R_SLAB_REASSIGN,
R_CURRENT_CAS,
R_OLDEST_CAS,
R_OLDEST_LIVE,
R_LOGGER_GID,
R_CURRENT_TIME,
R_STOP_TIME,
R_PROCESS_STARTED,
R_HASHPOWER,
};
const char *opts[] = {
[R_MMAP_OLDBASE] = "mmap_oldbase",
[R_MAXBYTES] = "maxbytes",
[R_CHUNK_SIZE] = "chunk_size",
[R_ITEM_SIZE_MAX] = "item_size_max",
[R_SLAB_CHUNK_SIZE_MAX] = "slab_chunk_size_max",
[R_SLAB_PAGE_SIZE] = "slab_page_size",
[R_SLAB_CONFIG] = "slab_config",
[R_USE_CAS] = "use_cas",
[R_SLAB_REASSIGN] = "slab_reassign",
[R_CURRENT_CAS] = "current_cas",
[R_OLDEST_CAS] = "oldest_cas",
[R_OLDEST_LIVE] = "oldest_live",
[R_LOGGER_GID] = "logger_gid",
[R_CURRENT_TIME] = "current_time",
[R_STOP_TIME] = "stop_time",
[R_PROCESS_STARTED] = "process_started",
[R_HASHPOWER] = "hashpower",
NULL
};
while (restart_get_kv(ctx, &key, &val) == RESTART_OK) {
int type = 0;
int32_t val_int = 0;
uint32_t val_uint = 0;
int64_t bigval_int = 0;
uint64_t bigval_uint = 0;
while (opts[type] != NULL && strcmp(key, opts[type]) != 0) {
type++;
}
if (opts[type] == NULL) {
fprintf(stderr, "[restart] unknown/unhandled key: %s\n", key);
continue;
}
lines_seen++;
// helper for any boolean checkers.
bool val_bool = false;
bool is_bool = true;
if (strcmp(val, "false") == 0) {
val_bool = false;
} else if (strcmp(val, "true") == 0) {
val_bool = true;
} else {
is_bool = false;
}
switch (type) {
case R_MMAP_OLDBASE:
if (!safe_strtoull_hex(val, &meta->old_base)) {
fprintf(stderr, "[restart] failed to parse %s: %s\n", key, val);
reuse_mmap = -1;
}
break;
case R_MAXBYTES:
if (!safe_strtoll(val, &bigval_int) || settings.maxbytes != bigval_int) {
reuse_mmap = -1;
}
break;
case R_CHUNK_SIZE:
if (!safe_strtol(val, &val_int) || settings.chunk_size != val_int) {
reuse_mmap = -1;
}
break;
case R_ITEM_SIZE_MAX:
if (!safe_strtol(val, &val_int) || settings.item_size_max != val_int) {
reuse_mmap = -1;
}
break;
case R_SLAB_CHUNK_SIZE_MAX:
if (!safe_strtol(val, &val_int) || settings.slab_chunk_size_max != val_int) {
reuse_mmap = -1;
}
break;
case R_SLAB_PAGE_SIZE:
if (!safe_strtol(val, &val_int) || settings.slab_page_size != val_int) {
reuse_mmap = -1;
}
break;
case R_SLAB_CONFIG:
if (strcmp(val, meta->slab_config) != 0) {
reuse_mmap = -1;
}
break;
case R_USE_CAS:
if (!is_bool || settings.use_cas != val_bool) {
reuse_mmap = -1;
}
break;
case R_SLAB_REASSIGN:
if (!is_bool || settings.slab_reassign != val_bool) {
reuse_mmap = -1;
}
break;
case R_CURRENT_CAS:
// FIXME: do we need to fail if these values _aren't_ found?
if (!safe_strtoull(val, &bigval_uint)) {
reuse_mmap = -1;
} else {
set_cas_id(bigval_uint);
}
break;
case R_OLDEST_CAS:
if (!safe_strtoull(val, &bigval_uint)) {
reuse_mmap = -1;
} else {
settings.oldest_cas = bigval_uint;
}
break;
case R_OLDEST_LIVE:
if (!safe_strtoul(val, &val_uint)) {
reuse_mmap = -1;
} else {
settings.oldest_live = val_uint;
}
break;
case R_LOGGER_GID:
if (!safe_strtoull(val, &bigval_uint)) {
reuse_mmap = -1;
} else {
logger_set_gid(bigval_uint);
}
break;
case R_PROCESS_STARTED:
if (!safe_strtoull(val, &bigval_uint)) {
reuse_mmap = -1;
} else {
meta->process_started = bigval_uint;
}
break;
case R_CURRENT_TIME:
if (!safe_strtoul(val, &val_uint)) {
reuse_mmap = -1;
} else {
meta->current_time = val_uint;
}
break;
case R_STOP_TIME:
if (!safe_strtoll(val, &bigval_int)) {
reuse_mmap = -1;
} else {
struct timeval t;
gettimeofday(&t, NULL);
meta->time_delta = t.tv_sec - bigval_int;
// clock has done something crazy.
// there are _lots_ of ways the clock can go wrong here, but
// this is a safe sanity check since there's nothing else we
// can realistically do.
if (meta->time_delta <= 0) {
reuse_mmap = -1;
}
}
break;
case R_HASHPOWER:
if (!safe_strtoul(val, &val_uint)) {
reuse_mmap = -1;
} else {
settings.hashpower_init = val_uint;
}
break;
default:
fprintf(stderr, "[restart] unhandled key: %s\n", key);
}
if (reuse_mmap != 0) {
fprintf(stderr, "[restart] restart incompatible due to setting for [%s] [old value: %s]\n", key, val);
break;
}
}
if (lines_seen < RESTART_REQUIRED_META) {
fprintf(stderr, "[restart] missing some metadata lines\n");
reuse_mmap = -1;
}
return reuse_mmap;
}
int main (int argc, char **argv) {
int c;
bool lock_memory = false;
bool do_daemonize = false;
bool preallocate = false;
int maxcore = 0;
char *username = NULL;
char *pid_file = NULL;
char *memory_file = NULL;
struct passwd *pw;
struct rlimit rlim;
char *buf;
char unit = '\0';
int size_max = 0;
int retval = EXIT_SUCCESS;
bool protocol_specified = false;
bool tcp_specified = false;
bool udp_specified = false;
bool start_lru_maintainer = true;
bool start_lru_crawler = true;
bool start_assoc_maint = true;
enum hashfunc_type hash_type = MURMUR3_HASH;
uint32_t tocrawl;
uint32_t slab_sizes[MAX_NUMBER_OF_SLAB_CLASSES];
bool use_slab_sizes = false;
char *slab_sizes_unparsed = NULL;
bool slab_chunk_size_changed = false;
// struct for restart code. Initialized up here so we can curry
// important settings to save or validate.
struct _mc_meta_data *meta = malloc(sizeof(struct _mc_meta_data));
meta->slab_config = NULL;
#ifdef EXTSTORE
void *storage = NULL;
struct extstore_conf_file *storage_file = NULL;
struct extstore_conf ext_cf;
#endif
char *subopts, *subopts_orig;
char *subopts_value;
enum {
MAXCONNS_FAST = 0,
HASHPOWER_INIT,
NO_HASHEXPAND,
SLAB_REASSIGN,
SLAB_AUTOMOVE,
SLAB_AUTOMOVE_RATIO,
SLAB_AUTOMOVE_WINDOW,
TAIL_REPAIR_TIME,
HASH_ALGORITHM,
LRU_CRAWLER,
LRU_CRAWLER_SLEEP,
LRU_CRAWLER_TOCRAWL,
LRU_MAINTAINER,
HOT_LRU_PCT,
WARM_LRU_PCT,
HOT_MAX_FACTOR,
WARM_MAX_FACTOR,
TEMPORARY_TTL,
IDLE_TIMEOUT,
WATCHER_LOGBUF_SIZE,
WORKER_LOGBUF_SIZE,
SLAB_SIZES,
SLAB_CHUNK_MAX,
TRACK_SIZES,
NO_INLINE_ASCII_RESP,
MODERN,
NO_MODERN,
NO_CHUNKED_ITEMS,
NO_SLAB_REASSIGN,
NO_SLAB_AUTOMOVE,
NO_MAXCONNS_FAST,
INLINE_ASCII_RESP,
NO_LRU_CRAWLER,
NO_LRU_MAINTAINER,
NO_DROP_PRIVILEGES,
DROP_PRIVILEGES,
RESP_OBJ_MEM_LIMIT,
READ_BUF_MEM_LIMIT,
#ifdef TLS
SSL_CERT,
SSL_KEY,
SSL_VERIFY_MODE,
SSL_KEYFORM,
SSL_CIPHERS,
SSL_CA_CERT,
SSL_WBUF_SIZE,
#endif
#ifdef MEMCACHED_DEBUG
RELAXED_PRIVILEGES,
#endif
#ifdef EXTSTORE
EXT_PAGE_SIZE,
EXT_WBUF_SIZE,
EXT_THREADS,
EXT_IO_DEPTH,
EXT_PATH,
EXT_ITEM_SIZE,
EXT_ITEM_AGE,
EXT_LOW_TTL,
EXT_RECACHE_RATE,
EXT_COMPACT_UNDER,
EXT_DROP_UNDER,
EXT_MAX_FRAG,
EXT_DROP_UNREAD,
SLAB_AUTOMOVE_FREERATIO,
#endif
};
char *const subopts_tokens[] = {
[MAXCONNS_FAST] = "maxconns_fast",
[HASHPOWER_INIT] = "hashpower",
[NO_HASHEXPAND] = "no_hashexpand",
[SLAB_REASSIGN] = "slab_reassign",
[SLAB_AUTOMOVE] = "slab_automove",
[SLAB_AUTOMOVE_RATIO] = "slab_automove_ratio",
[SLAB_AUTOMOVE_WINDOW] = "slab_automove_window",
[TAIL_REPAIR_TIME] = "tail_repair_time",
[HASH_ALGORITHM] = "hash_algorithm",
[LRU_CRAWLER] = "lru_crawler",
[LRU_CRAWLER_SLEEP] = "lru_crawler_sleep",
[LRU_CRAWLER_TOCRAWL] = "lru_crawler_tocrawl",
[LRU_MAINTAINER] = "lru_maintainer",
[HOT_LRU_PCT] = "hot_lru_pct",
[WARM_LRU_PCT] = "warm_lru_pct",
[HOT_MAX_FACTOR] = "hot_max_factor",
[WARM_MAX_FACTOR] = "warm_max_factor",
[TEMPORARY_TTL] = "temporary_ttl",
[IDLE_TIMEOUT] = "idle_timeout",
[WATCHER_LOGBUF_SIZE] = "watcher_logbuf_size",
[WORKER_LOGBUF_SIZE] = "worker_logbuf_size",
[SLAB_SIZES] = "slab_sizes",
[SLAB_CHUNK_MAX] = "slab_chunk_max",
[TRACK_SIZES] = "track_sizes",
[NO_INLINE_ASCII_RESP] = "no_inline_ascii_resp",
[MODERN] = "modern",
[NO_MODERN] = "no_modern",
[NO_CHUNKED_ITEMS] = "no_chunked_items",
[NO_SLAB_REASSIGN] = "no_slab_reassign",
[NO_SLAB_AUTOMOVE] = "no_slab_automove",
[NO_MAXCONNS_FAST] = "no_maxconns_fast",
[INLINE_ASCII_RESP] = "inline_ascii_resp",
[NO_LRU_CRAWLER] = "no_lru_crawler",
[NO_LRU_MAINTAINER] = "no_lru_maintainer",
[NO_DROP_PRIVILEGES] = "no_drop_privileges",
[DROP_PRIVILEGES] = "drop_privileges",
[RESP_OBJ_MEM_LIMIT] = "resp_obj_mem_limit",
[READ_BUF_MEM_LIMIT] = "read_buf_mem_limit",
#ifdef TLS
[SSL_CERT] = "ssl_chain_cert",
[SSL_KEY] = "ssl_key",
[SSL_VERIFY_MODE] = "ssl_verify_mode",
[SSL_KEYFORM] = "ssl_keyformat",
[SSL_CIPHERS] = "ssl_ciphers",
[SSL_CA_CERT] = "ssl_ca_cert",
[SSL_WBUF_SIZE] = "ssl_wbuf_size",
#endif
#ifdef MEMCACHED_DEBUG
[RELAXED_PRIVILEGES] = "relaxed_privileges",
#endif
#ifdef EXTSTORE
[EXT_PAGE_SIZE] = "ext_page_size",
[EXT_WBUF_SIZE] = "ext_wbuf_size",
[EXT_THREADS] = "ext_threads",
[EXT_IO_DEPTH] = "ext_io_depth",
[EXT_PATH] = "ext_path",
[EXT_ITEM_SIZE] = "ext_item_size",
[EXT_ITEM_AGE] = "ext_item_age",
[EXT_LOW_TTL] = "ext_low_ttl",
[EXT_RECACHE_RATE] = "ext_recache_rate",
[EXT_COMPACT_UNDER] = "ext_compact_under",
[EXT_DROP_UNDER] = "ext_drop_under",
[EXT_MAX_FRAG] = "ext_max_frag",
[EXT_DROP_UNREAD] = "ext_drop_unread",
[SLAB_AUTOMOVE_FREERATIO] = "slab_automove_freeratio",
#endif
NULL
};
if (!sanitycheck()) {
free(meta);
return EX_OSERR;
}
/* handle SIGINT, SIGTERM */
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
signal(SIGHUP, sighup_handler);
signal(SIGUSR1, sig_usrhandler);
/* init settings */
settings_init();
verify_default("hash_algorithm", hash_type == MURMUR3_HASH);
#ifdef EXTSTORE
settings.ext_item_size = 512;
settings.ext_item_age = UINT_MAX;
settings.ext_low_ttl = 0;
settings.ext_recache_rate = 2000;
settings.ext_max_frag = 0.8;
settings.ext_drop_unread = false;
settings.ext_wbuf_size = 1024 * 1024 * 4;
settings.ext_compact_under = 0;
settings.ext_drop_under = 0;
settings.slab_automove_freeratio = 0.01;
settings.ext_page_size = 1024 * 1024 * 64;
settings.ext_io_threadcount = 1;
ext_cf.page_size = settings.ext_page_size;
ext_cf.wbuf_size = settings.ext_wbuf_size;
ext_cf.io_threadcount = settings.ext_io_threadcount;
ext_cf.io_depth = 1;
ext_cf.page_buckets = 4;
ext_cf.wbuf_count = ext_cf.page_buckets;
#endif
/* Run regardless of initializing it later */
init_lru_maintainer();
/* set stderr non-buffering (for running under, say, daemontools) */
setbuf(stderr, NULL);
char *shortopts =
"a:" /* access mask for unix socket */
"A" /* enable admin shutdown command */
"Z" /* enable SSL */
"p:" /* TCP port number to listen on */
"s:" /* unix socket path to listen on */
"U:" /* UDP port number to listen on */
"m:" /* max memory to use for items in megabytes */
"M" /* return error on memory exhausted */
"c:" /* max simultaneous connections */
"k" /* lock down all paged memory */
"hiV" /* help, licence info, version */
"r" /* maximize core file limit */
"v" /* verbose */
"d" /* daemon mode */
"l:" /* interface to listen on */
"u:" /* user identity to run as */
"P:" /* save PID in file */
"f:" /* factor? */
"n:" /* minimum space allocated for key+value+flags */
"t:" /* threads */
"D:" /* prefix delimiter? */
"L" /* Large memory pages */
"R:" /* max requests per event */
"C" /* Disable use of CAS */
"b:" /* backlog queue limit */
"B:" /* Binding protocol */
"I:" /* Max item size */
"S" /* Sasl ON */
"F" /* Disable flush_all */
"X" /* Disable dump commands */
"W" /* Disable watch commands */
"Y:" /* Enable token auth */
"e:" /* mmap path for external item memory */
"o:" /* Extended generic options */
;
/* process arguments */
#ifdef HAVE_GETOPT_LONG
const struct option longopts[] = {
{"unix-mask", required_argument, 0, 'a'},
{"enable-shutdown", no_argument, 0, 'A'},
{"enable-ssl", no_argument, 0, 'Z'},
{"port", required_argument, 0, 'p'},
{"unix-socket", required_argument, 0, 's'},
{"udp-port", required_argument, 0, 'U'},
{"memory-limit", required_argument, 0, 'm'},
{"disable-evictions", no_argument, 0, 'M'},
{"conn-limit", required_argument, 0, 'c'},
{"lock-memory", no_argument, 0, 'k'},
{"help", no_argument, 0, 'h'},
{"license", no_argument, 0, 'i'},
{"version", no_argument, 0, 'V'},
{"enable-coredumps", no_argument, 0, 'r'},
{"verbose", optional_argument, 0, 'v'},
{"daemon", no_argument, 0, 'd'},
{"listen", required_argument, 0, 'l'},
{"user", required_argument, 0, 'u'},
{"pidfile", required_argument, 0, 'P'},
{"slab-growth-factor", required_argument, 0, 'f'},
{"slab-min-size", required_argument, 0, 'n'},
{"threads", required_argument, 0, 't'},
{"enable-largepages", no_argument, 0, 'L'},
{"max-reqs-per-event", required_argument, 0, 'R'},
{"disable-cas", no_argument, 0, 'C'},
{"listen-backlog", required_argument, 0, 'b'},
{"protocol", required_argument, 0, 'B'},
{"max-item-size", required_argument, 0, 'I'},
{"enable-sasl", no_argument, 0, 'S'},
{"disable-flush-all", no_argument, 0, 'F'},
{"disable-dumping", no_argument, 0, 'X'},
{"disable-watch", no_argument, 0, 'W'},
{"auth-file", required_argument, 0, 'Y'},
{"memory-file", required_argument, 0, 'e'},
{"extended", required_argument, 0, 'o'},
{0, 0, 0, 0}
};
int optindex;
while (-1 != (c = getopt_long(argc, argv, shortopts,
longopts, &optindex))) {
#else
while (-1 != (c = getopt(argc, argv, shortopts))) {
#endif
switch (c) {
case 'A':
/* enables "shutdown" command */
settings.shutdown_command = true;
break;
case 'Z':
/* enable secure communication*/
#ifdef TLS
settings.ssl_enabled = true;
#else
fprintf(stderr, "This server is not built with TLS support.\n");
exit(EX_USAGE);
#endif
break;
case 'a':
/* access for unix domain socket, as octal mask (like chmod)*/
settings.access= strtol(optarg,NULL,8);
break;
case 'U':
settings.udpport = atoi(optarg);
udp_specified = true;
break;
case 'p':
settings.port = atoi(optarg);
tcp_specified = true;
break;
case 's':
settings.socketpath = optarg;
break;
case 'm':
settings.maxbytes = ((size_t)atoi(optarg)) * 1024 * 1024;
break;
case 'M':
settings.evict_to_free = 0;
break;
case 'c':
settings.maxconns = atoi(optarg);
if (settings.maxconns <= 0) {
fprintf(stderr, "Maximum connections must be greater than 0\n");
return 1;
}
break;
case 'h':
usage();
exit(EXIT_SUCCESS);
case 'i':
usage_license();
exit(EXIT_SUCCESS);
case 'V':
printf(PACKAGE " " VERSION "\n");
exit(EXIT_SUCCESS);
case 'k':
lock_memory = true;
break;
case 'v':
settings.verbose++;
break;
case 'l':
if (settings.inter != NULL) {
if (strstr(settings.inter, optarg) != NULL) {
break;
}
size_t len = strlen(settings.inter) + strlen(optarg) + 2;
char *p = malloc(len);
if (p == NULL) {
fprintf(stderr, "Failed to allocate memory\n");
return 1;
}
snprintf(p, len, "%s,%s", settings.inter, optarg);
free(settings.inter);
settings.inter = p;
} else {
settings.inter= strdup(optarg);
}
break;
case 'd':
do_daemonize = true;
break;
case 'r':
maxcore = 1;
break;
case 'R':
settings.reqs_per_event = atoi(optarg);
if (settings.reqs_per_event == 0) {
fprintf(stderr, "Number of requests per event must be greater than 0\n");
return 1;
}
break;
case 'u':
username = optarg;
break;
case 'P':
pid_file = optarg;
break;
case 'e':
memory_file = optarg;
break;
case 'f':
settings.factor = atof(optarg);
if (settings.factor <= 1.0) {
fprintf(stderr, "Factor must be greater than 1\n");
return 1;
}
meta->slab_config = strdup(optarg);
break;
case 'n':
settings.chunk_size = atoi(optarg);
if (settings.chunk_size == 0) {
fprintf(stderr, "Chunk size must be greater than 0\n");
return 1;
}
break;
case 't':
settings.num_threads = atoi(optarg);
if (settings.num_threads <= 0) {
fprintf(stderr, "Number of threads must be greater than 0\n");
return 1;
}
/* There're other problems when you get above 64 threads.
* In the future we should portably detect # of cores for the
* default.
*/
if (settings.num_threads > 64) {
fprintf(stderr, "WARNING: Setting a high number of worker"
"threads is not recommended.\n"
" Set this value to the number of cores in"
" your machine or less.\n");
}
break;
case 'D':
if (! optarg || ! optarg[0]) {
fprintf(stderr, "No delimiter specified\n");
return 1;
}
settings.prefix_delimiter = optarg[0];
settings.detail_enabled = 1;
break;
case 'L' :
if (enable_large_pages() == 0) {
preallocate = true;
} else {
fprintf(stderr, "Cannot enable large pages on this system\n"
"(There is no support as of this version)\n");
return 1;
}
break;
case 'C' :
settings.use_cas = false;
break;
case 'b' :
settings.backlog = atoi(optarg);
break;
case 'B':
protocol_specified = true;
if (strcmp(optarg, "auto") == 0) {
settings.binding_protocol = negotiating_prot;
} else if (strcmp(optarg, "binary") == 0) {
settings.binding_protocol = binary_prot;
} else if (strcmp(optarg, "ascii") == 0) {
settings.binding_protocol = ascii_prot;
} else {
fprintf(stderr, "Invalid value for binding protocol: %s\n"
" -- should be one of auto, binary, or ascii\n", optarg);
exit(EX_USAGE);
}
break;
case 'I':
buf = strdup(optarg);
unit = buf[strlen(buf)-1];
if (unit == 'k' || unit == 'm' ||
unit == 'K' || unit == 'M') {
buf[strlen(buf)-1] = '\0';
size_max = atoi(buf);
if (unit == 'k' || unit == 'K')
size_max *= 1024;
if (unit == 'm' || unit == 'M')
size_max *= 1024 * 1024;
settings.item_size_max = size_max;
} else {
settings.item_size_max = atoi(buf);
}
free(buf);
break;
case 'S': /* set Sasl authentication to true. Default is false */
#ifndef ENABLE_SASL
fprintf(stderr, "This server is not built with SASL support.\n");
exit(EX_USAGE);
#endif
settings.sasl = true;
break;
case 'F' :
settings.flush_enabled = false;
break;
case 'X' :
settings.dump_enabled = false;
break;
case 'W' :
settings.watch_enabled = false;
break;
case 'Y' :
// dupe the file path now just in case the options get mangled.
settings.auth_file = strdup(optarg);
break;
case 'o': /* It's sub-opts time! */
subopts_orig = subopts = strdup(optarg); /* getsubopt() changes the original args */
while (*subopts != '\0') {
switch (getsubopt(&subopts, subopts_tokens, &subopts_value)) {
case MAXCONNS_FAST:
settings.maxconns_fast = true;
break;
case HASHPOWER_INIT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing numeric argument for hashpower\n");
return 1;
}
settings.hashpower_init = atoi(subopts_value);
if (settings.hashpower_init < 12) {
fprintf(stderr, "Initial hashtable multiplier of %d is too low\n",
settings.hashpower_init);
return 1;
} else if (settings.hashpower_init > 32) {
fprintf(stderr, "Initial hashtable multiplier of %d is too high\n"
"Choose a value based on \"STAT hash_power_level\" from a running instance\n",
settings.hashpower_init);
return 1;
}
break;
case NO_HASHEXPAND:
start_assoc_maint = false;
break;
case SLAB_REASSIGN:
settings.slab_reassign = true;
break;
case SLAB_AUTOMOVE:
if (subopts_value == NULL) {
settings.slab_automove = 1;
break;
}
settings.slab_automove = atoi(subopts_value);
if (settings.slab_automove < 0 || settings.slab_automove > 2) {
fprintf(stderr, "slab_automove must be between 0 and 2\n");
return 1;
}
break;
case SLAB_AUTOMOVE_RATIO:
if (subopts_value == NULL) {
fprintf(stderr, "Missing slab_automove_ratio argument\n");
return 1;
}
settings.slab_automove_ratio = atof(subopts_value);
if (settings.slab_automove_ratio <= 0 || settings.slab_automove_ratio > 1) {
fprintf(stderr, "slab_automove_ratio must be > 0 and < 1\n");
return 1;
}
break;
case SLAB_AUTOMOVE_WINDOW:
if (subopts_value == NULL) {
fprintf(stderr, "Missing slab_automove_window argument\n");
return 1;
}
settings.slab_automove_window = atoi(subopts_value);
if (settings.slab_automove_window < 3) {
fprintf(stderr, "slab_automove_window must be > 2\n");
return 1;
}
break;
case TAIL_REPAIR_TIME:
if (subopts_value == NULL) {
fprintf(stderr, "Missing numeric argument for tail_repair_time\n");
return 1;
}
settings.tail_repair_time = atoi(subopts_value);
if (settings.tail_repair_time < 10) {
fprintf(stderr, "Cannot set tail_repair_time to less than 10 seconds\n");
return 1;
}
break;
case HASH_ALGORITHM:
if (subopts_value == NULL) {
fprintf(stderr, "Missing hash_algorithm argument\n");
return 1;
};
if (strcmp(subopts_value, "jenkins") == 0) {
hash_type = JENKINS_HASH;
} else if (strcmp(subopts_value, "murmur3") == 0) {
hash_type = MURMUR3_HASH;
} else {
fprintf(stderr, "Unknown hash_algorithm option (jenkins, murmur3)\n");
return 1;
}
break;
case LRU_CRAWLER:
start_lru_crawler = true;
break;
case LRU_CRAWLER_SLEEP:
if (subopts_value == NULL) {
fprintf(stderr, "Missing lru_crawler_sleep value\n");
return 1;
}
settings.lru_crawler_sleep = atoi(subopts_value);
if (settings.lru_crawler_sleep > 1000000 || settings.lru_crawler_sleep < 0) {
fprintf(stderr, "LRU crawler sleep must be between 0 and 1 second\n");
return 1;
}
break;
case LRU_CRAWLER_TOCRAWL:
if (subopts_value == NULL) {
fprintf(stderr, "Missing lru_crawler_tocrawl value\n");
return 1;
}
if (!safe_strtoul(subopts_value, &tocrawl)) {
fprintf(stderr, "lru_crawler_tocrawl takes a numeric 32bit value\n");
return 1;
}
settings.lru_crawler_tocrawl = tocrawl;
break;
case LRU_MAINTAINER:
start_lru_maintainer = true;
settings.lru_segmented = true;
break;
case HOT_LRU_PCT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing hot_lru_pct argument\n");
return 1;
}
settings.hot_lru_pct = atoi(subopts_value);
if (settings.hot_lru_pct < 1 || settings.hot_lru_pct >= 80) {
fprintf(stderr, "hot_lru_pct must be > 1 and < 80\n");
return 1;
}
break;
case WARM_LRU_PCT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing warm_lru_pct argument\n");
return 1;
}
settings.warm_lru_pct = atoi(subopts_value);
if (settings.warm_lru_pct < 1 || settings.warm_lru_pct >= 80) {
fprintf(stderr, "warm_lru_pct must be > 1 and < 80\n");
return 1;
}
break;
case HOT_MAX_FACTOR:
if (subopts_value == NULL) {
fprintf(stderr, "Missing hot_max_factor argument\n");
return 1;
}
settings.hot_max_factor = atof(subopts_value);
if (settings.hot_max_factor <= 0) {
fprintf(stderr, "hot_max_factor must be > 0\n");
return 1;
}
break;
case WARM_MAX_FACTOR:
if (subopts_value == NULL) {
fprintf(stderr, "Missing warm_max_factor argument\n");
return 1;
}
settings.warm_max_factor = atof(subopts_value);
if (settings.warm_max_factor <= 0) {
fprintf(stderr, "warm_max_factor must be > 0\n");
return 1;
}
break;
case TEMPORARY_TTL:
if (subopts_value == NULL) {
fprintf(stderr, "Missing temporary_ttl argument\n");
return 1;
}
settings.temp_lru = true;
settings.temporary_ttl = atoi(subopts_value);
break;
case IDLE_TIMEOUT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing numeric argument for idle_timeout\n");
return 1;
}
settings.idle_timeout = atoi(subopts_value);
break;
case WATCHER_LOGBUF_SIZE:
if (subopts_value == NULL) {
fprintf(stderr, "Missing watcher_logbuf_size argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.logger_watcher_buf_size)) {
fprintf(stderr, "could not parse argument to watcher_logbuf_size\n");
return 1;
}
settings.logger_watcher_buf_size *= 1024; /* kilobytes */
break;
case WORKER_LOGBUF_SIZE:
if (subopts_value == NULL) {
fprintf(stderr, "Missing worker_logbuf_size argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.logger_buf_size)) {
fprintf(stderr, "could not parse argument to worker_logbuf_size\n");
return 1;
}
settings.logger_buf_size *= 1024; /* kilobytes */
case SLAB_SIZES:
slab_sizes_unparsed = strdup(subopts_value);
break;
case SLAB_CHUNK_MAX:
if (subopts_value == NULL) {
fprintf(stderr, "Missing slab_chunk_max argument\n");
}
if (!safe_strtol(subopts_value, &settings.slab_chunk_size_max)) {
fprintf(stderr, "could not parse argument to slab_chunk_max\n");
}
slab_chunk_size_changed = true;
break;
case TRACK_SIZES:
item_stats_sizes_init();
break;
case NO_INLINE_ASCII_RESP:
break;
case INLINE_ASCII_RESP:
break;
case NO_CHUNKED_ITEMS:
settings.slab_chunk_size_max = settings.slab_page_size;
break;
case NO_SLAB_REASSIGN:
settings.slab_reassign = false;
break;
case NO_SLAB_AUTOMOVE:
settings.slab_automove = 0;
break;
case NO_MAXCONNS_FAST:
settings.maxconns_fast = false;
break;
case NO_LRU_CRAWLER:
settings.lru_crawler = false;
start_lru_crawler = false;
break;
case NO_LRU_MAINTAINER:
start_lru_maintainer = false;
settings.lru_segmented = false;
break;
#ifdef TLS
case SSL_CERT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ssl_chain_cert argument\n");
return 1;
}
settings.ssl_chain_cert = strdup(subopts_value);
break;
case SSL_KEY:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ssl_key argument\n");
return 1;
}
settings.ssl_key = strdup(subopts_value);
break;
case SSL_VERIFY_MODE:
{
if (subopts_value == NULL) {
fprintf(stderr, "Missing ssl_verify_mode argument\n");
return 1;
}
int verify = 0;
if (!safe_strtol(subopts_value, &verify)) {
fprintf(stderr, "could not parse argument to ssl_verify_mode\n");
return 1;
}
switch(verify) {
case 0:
settings.ssl_verify_mode = SSL_VERIFY_NONE;
break;
case 1:
settings.ssl_verify_mode = SSL_VERIFY_PEER;
break;
case 2:
settings.ssl_verify_mode = SSL_VERIFY_PEER |
SSL_VERIFY_FAIL_IF_NO_PEER_CERT;
break;
case 3:
settings.ssl_verify_mode = SSL_VERIFY_PEER |
SSL_VERIFY_FAIL_IF_NO_PEER_CERT |
SSL_VERIFY_CLIENT_ONCE;
break;
default:
fprintf(stderr, "Invalid ssl_verify_mode. Use help to see valid options.\n");
return 1;
}
break;
}
case SSL_KEYFORM:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ssl_keyformat argument\n");
return 1;
}
if (!safe_strtol(subopts_value, &settings.ssl_keyformat)) {
fprintf(stderr, "could not parse argument to ssl_keyformat\n");
return 1;
}
break;
case SSL_CIPHERS:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ssl_ciphers argument\n");
return 1;
}
settings.ssl_ciphers = strdup(subopts_value);
break;
case SSL_CA_CERT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ssl_ca_cert argument\n");
return 1;
}
settings.ssl_ca_cert = strdup(subopts_value);
break;
case SSL_WBUF_SIZE:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ssl_wbuf_size argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.ssl_wbuf_size)) {
fprintf(stderr, "could not parse argument to ssl_wbuf_size\n");
return 1;
}
settings.ssl_wbuf_size *= 1024; /* kilobytes */
break;
#endif
#ifdef EXTSTORE
case EXT_PAGE_SIZE:
if (storage_file) {
fprintf(stderr, "Must specify ext_page_size before any ext_path arguments\n");
return 1;
}
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_page_size argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &ext_cf.page_size)) {
fprintf(stderr, "could not parse argument to ext_page_size\n");
return 1;
}
ext_cf.page_size *= 1024 * 1024; /* megabytes */
break;
case EXT_WBUF_SIZE:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_wbuf_size argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &ext_cf.wbuf_size)) {
fprintf(stderr, "could not parse argument to ext_wbuf_size\n");
return 1;
}
ext_cf.wbuf_size *= 1024 * 1024; /* megabytes */
settings.ext_wbuf_size = ext_cf.wbuf_size;
break;
case EXT_THREADS:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_threads argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &ext_cf.io_threadcount)) {
fprintf(stderr, "could not parse argument to ext_threads\n");
return 1;
}
break;
case EXT_IO_DEPTH:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_io_depth argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &ext_cf.io_depth)) {
fprintf(stderr, "could not parse argument to ext_io_depth\n");
return 1;
}
break;
case EXT_ITEM_SIZE:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_item_size argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.ext_item_size)) {
fprintf(stderr, "could not parse argument to ext_item_size\n");
return 1;
}
break;
case EXT_ITEM_AGE:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_item_age argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.ext_item_age)) {
fprintf(stderr, "could not parse argument to ext_item_age\n");
return 1;
}
break;
case EXT_LOW_TTL:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_low_ttl argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.ext_low_ttl)) {
fprintf(stderr, "could not parse argument to ext_low_ttl\n");
return 1;
}
break;
case EXT_RECACHE_RATE:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_recache_rate argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.ext_recache_rate)) {
fprintf(stderr, "could not parse argument to ext_recache_rate\n");
return 1;
}
break;
case EXT_COMPACT_UNDER:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_compact_under argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.ext_compact_under)) {
fprintf(stderr, "could not parse argument to ext_compact_under\n");
return 1;
}
break;
case EXT_DROP_UNDER:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_drop_under argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.ext_drop_under)) {
fprintf(stderr, "could not parse argument to ext_drop_under\n");
return 1;
}
break;
case EXT_MAX_FRAG:
if (subopts_value == NULL) {
fprintf(stderr, "Missing ext_max_frag argument\n");
return 1;
}
if (!safe_strtod(subopts_value, &settings.ext_max_frag)) {
fprintf(stderr, "could not parse argument to ext_max_frag\n");
return 1;
}
break;
case SLAB_AUTOMOVE_FREERATIO:
if (subopts_value == NULL) {
fprintf(stderr, "Missing slab_automove_freeratio argument\n");
return 1;
}
if (!safe_strtod(subopts_value, &settings.slab_automove_freeratio)) {
fprintf(stderr, "could not parse argument to slab_automove_freeratio\n");
return 1;
}
break;
case EXT_DROP_UNREAD:
settings.ext_drop_unread = true;
break;
case EXT_PATH:
if (subopts_value) {
struct extstore_conf_file *tmp = storage_conf_parse(subopts_value, ext_cf.page_size);
if (tmp == NULL) {
fprintf(stderr, "failed to parse ext_path argument\n");
return 1;
}
if (storage_file != NULL) {
tmp->next = storage_file;
}
storage_file = tmp;
} else {
fprintf(stderr, "missing argument to ext_path, ie: ext_path=/d/file:5G\n");
return 1;
}
break;
#endif
case MODERN:
/* currently no new defaults */
break;
case NO_MODERN:
if (!slab_chunk_size_changed) {
settings.slab_chunk_size_max = settings.slab_page_size;
}
settings.slab_reassign = false;
settings.slab_automove = 0;
settings.maxconns_fast = false;
settings.lru_segmented = false;
hash_type = JENKINS_HASH;
start_lru_crawler = false;
start_lru_maintainer = false;
break;
case NO_DROP_PRIVILEGES:
settings.drop_privileges = false;
break;
case DROP_PRIVILEGES:
settings.drop_privileges = true;
break;
case RESP_OBJ_MEM_LIMIT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing resp_obj_mem_limit argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.resp_obj_mem_limit)) {
fprintf(stderr, "could not parse argument to resp_obj_mem_limit\n");
return 1;
}
settings.resp_obj_mem_limit *= 1024 * 1024; /* megabytes */
break;
case READ_BUF_MEM_LIMIT:
if (subopts_value == NULL) {
fprintf(stderr, "Missing read_buf_mem_limit argument\n");
return 1;
}
if (!safe_strtoul(subopts_value, &settings.read_buf_mem_limit)) {
fprintf(stderr, "could not parse argument to read_buf_mem_limit\n");
return 1;
}
settings.read_buf_mem_limit *= 1024 * 1024; /* megabytes */
break;
#ifdef MEMCACHED_DEBUG
case RELAXED_PRIVILEGES:
settings.relaxed_privileges = true;
break;
#endif
default:
printf("Illegal suboption \"%s\"\n", subopts_value);
return 1;
}
}
free(subopts_orig);
break;
default:
fprintf(stderr, "Illegal argument \"%c\"\n", c);
return 1;
}
}
if (settings.item_size_max < ITEM_SIZE_MAX_LOWER_LIMIT) {
fprintf(stderr, "Item max size cannot be less than 1024 bytes.\n");
exit(EX_USAGE);
}
if (settings.item_size_max > (settings.maxbytes / 2)) {
fprintf(stderr, "Cannot set item size limit higher than 1/2 of memory max.\n");
exit(EX_USAGE);
}
if (settings.item_size_max > (ITEM_SIZE_MAX_UPPER_LIMIT)) {
fprintf(stderr, "Cannot set item size limit higher than a gigabyte.\n");
exit(EX_USAGE);
}
if (settings.item_size_max > 1024 * 1024) {
if (!slab_chunk_size_changed) {
// Ideal new default is 16k, but needs stitching.
settings.slab_chunk_size_max = settings.slab_page_size / 2;
}
}
if (settings.slab_chunk_size_max > settings.item_size_max) {
fprintf(stderr, "slab_chunk_max (bytes: %d) cannot be larger than -I (item_size_max %d)\n",
settings.slab_chunk_size_max, settings.item_size_max);
exit(EX_USAGE);
}
if (settings.item_size_max % settings.slab_chunk_size_max != 0) {
fprintf(stderr, "-I (item_size_max: %d) must be evenly divisible by slab_chunk_max (bytes: %d)\n",
settings.item_size_max, settings.slab_chunk_size_max);
exit(EX_USAGE);
}
if (settings.slab_page_size % settings.slab_chunk_size_max != 0) {
fprintf(stderr, "slab_chunk_max (bytes: %d) must divide evenly into %d (slab_page_size)\n",
settings.slab_chunk_size_max, settings.slab_page_size);
exit(EX_USAGE);
}
#ifdef EXTSTORE
if (storage_file) {
if (settings.item_size_max > ext_cf.wbuf_size) {
fprintf(stderr, "-I (item_size_max: %d) cannot be larger than ext_wbuf_size: %d\n",
settings.item_size_max, ext_cf.wbuf_size);
exit(EX_USAGE);
}
if (settings.udpport) {
fprintf(stderr, "Cannot use UDP with extstore enabled (-U 0 to disable)\n");
exit(EX_USAGE);
}
}
#endif
// Reserve this for the new default. If factor size hasn't changed, use
// new default.
/*if (settings.slab_chunk_size_max == 16384 && settings.factor == 1.25) {
settings.factor = 1.08;
}*/
if (slab_sizes_unparsed != NULL) {
// want the unedited string for restart code.
char *temp = strdup(slab_sizes_unparsed);
if (_parse_slab_sizes(slab_sizes_unparsed, slab_sizes)) {
use_slab_sizes = true;
if (meta->slab_config) {
free(meta->slab_config);
}
meta->slab_config = temp;
} else {
exit(EX_USAGE);
}
} else if (!meta->slab_config) {
// using the default factor.
meta->slab_config = "1.25";
}
if (settings.hot_lru_pct + settings.warm_lru_pct > 80) {
fprintf(stderr, "hot_lru_pct + warm_lru_pct cannot be more than 80%% combined\n");
exit(EX_USAGE);
}
if (settings.temp_lru && !start_lru_maintainer) {
fprintf(stderr, "temporary_ttl requires lru_maintainer to be enabled\n");
exit(EX_USAGE);
}
if (hash_init(hash_type) != 0) {
fprintf(stderr, "Failed to initialize hash_algorithm!\n");
exit(EX_USAGE);
}
/*
* Use one workerthread to serve each UDP port if the user specified
* multiple ports
*/
if (settings.inter != NULL && strchr(settings.inter, ',')) {
settings.num_threads_per_udp = 1;
} else {
settings.num_threads_per_udp = settings.num_threads;
}
if (settings.sasl) {
if (!protocol_specified) {
settings.binding_protocol = binary_prot;
} else {
if (settings.binding_protocol != binary_prot) {
fprintf(stderr, "ERROR: You cannot allow the ASCII protocol while using SASL.\n");
exit(EX_USAGE);
}
}
if (settings.udpport) {
fprintf(stderr, "ERROR: Cannot enable UDP while using binary SASL authentication.\n");
exit(EX_USAGE);
}
}
if (settings.auth_file) {
if (!protocol_specified) {
settings.binding_protocol = ascii_prot;
} else {
if (settings.binding_protocol != ascii_prot) {
fprintf(stderr, "ERROR: You cannot allow the BINARY protocol while using ascii authentication tokens.\n");
exit(EX_USAGE);
}
}
}
if (udp_specified && settings.udpport != 0 && !tcp_specified) {
settings.port = settings.udpport;
}
#ifdef TLS
/*
* Setup SSL if enabled
*/
if (settings.ssl_enabled) {
if (!settings.port) {
fprintf(stderr, "ERROR: You cannot enable SSL without a TCP port.\n");
exit(EX_USAGE);
}
// openssl init methods.
SSL_load_error_strings();
SSLeay_add_ssl_algorithms();
// Initiate the SSL context.
ssl_init();
}
#endif
if (maxcore != 0) {
struct rlimit rlim_new;
/*
* First try raising to infinity; if that fails, try bringing
* the soft limit to the hard.
*/
if (getrlimit(RLIMIT_CORE, &rlim) == 0) {
rlim_new.rlim_cur = rlim_new.rlim_max = RLIM_INFINITY;
if (setrlimit(RLIMIT_CORE, &rlim_new)!= 0) {
/* failed. try raising just to the old max */
rlim_new.rlim_cur = rlim_new.rlim_max = rlim.rlim_max;
(void)setrlimit(RLIMIT_CORE, &rlim_new);
}
}
/*
* getrlimit again to see what we ended up with. Only fail if
* the soft limit ends up 0, because then no core files will be
* created at all.
*/
if ((getrlimit(RLIMIT_CORE, &rlim) != 0) || rlim.rlim_cur == 0) {
fprintf(stderr, "failed to ensure corefile creation\n");
exit(EX_OSERR);
}
}
/*
* If needed, increase rlimits to allow as many connections
* as needed.
*/
if (getrlimit(RLIMIT_NOFILE, &rlim) != 0) {
fprintf(stderr, "failed to getrlimit number of files\n");
exit(EX_OSERR);
} else {
rlim.rlim_cur = settings.maxconns;
rlim.rlim_max = settings.maxconns;
if (setrlimit(RLIMIT_NOFILE, &rlim) != 0) {
fprintf(stderr, "failed to set rlimit for open files. Try starting as root or requesting smaller maxconns value.\n");
exit(EX_OSERR);
}
}
/* lose root privileges if we have them */
if (getuid() == 0 || geteuid() == 0) {
if (username == 0 || *username == '\0') {
fprintf(stderr, "can't run as root without the -u switch\n");
exit(EX_USAGE);
}
if ((pw = getpwnam(username)) == 0) {
fprintf(stderr, "can't find the user %s to switch to\n", username);
exit(EX_NOUSER);
}
if (setgroups(0, NULL) < 0) {
/* setgroups may fail with EPERM, indicating we are already in a
* minimally-privileged state. In that case we continue. For all
* other failure codes we exit.
*
* Note that errno is stored here because fprintf may change it.
*/
bool should_exit = errno != EPERM;
fprintf(stderr, "failed to drop supplementary groups: %s\n",
strerror(errno));
if (should_exit) {
exit(EX_OSERR);
}
}
if (setgid(pw->pw_gid) < 0 || setuid(pw->pw_uid) < 0) {
fprintf(stderr, "failed to assume identity of user %s\n", username);
exit(EX_OSERR);
}
}
/* Initialize Sasl if -S was specified */
if (settings.sasl) {
init_sasl();
}
/* daemonize if requested */
/* if we want to ensure our ability to dump core, don't chdir to / */
if (do_daemonize) {
if (sigignore(SIGHUP) == -1) {
perror("Failed to ignore SIGHUP");
}
if (daemonize(maxcore, settings.verbose) == -1) {
fprintf(stderr, "failed to daemon() in order to daemonize\n");
exit(EXIT_FAILURE);
}
}
/* lock paged memory if needed */
if (lock_memory) {
#ifdef HAVE_MLOCKALL
int res = mlockall(MCL_CURRENT | MCL_FUTURE);
if (res != 0) {
fprintf(stderr, "warning: -k invalid, mlockall() failed: %s\n",
strerror(errno));
}
#else
fprintf(stderr, "warning: -k invalid, mlockall() not supported on this platform. proceeding without.\n");
#endif
}
/* initialize main thread libevent instance */
#if defined(LIBEVENT_VERSION_NUMBER) && LIBEVENT_VERSION_NUMBER >= 0x02000101
/* If libevent version is larger/equal to 2.0.2-alpha, use newer version */
struct event_config *ev_config;
ev_config = event_config_new();
event_config_set_flag(ev_config, EVENT_BASE_FLAG_NOLOCK);
main_base = event_base_new_with_config(ev_config);
event_config_free(ev_config);
#else
/* Otherwise, use older API */
main_base = event_init();
#endif
/* Load initial auth file if required */
if (settings.auth_file) {
if (settings.udpport) {
fprintf(stderr, "Cannot use UDP with ascii authentication enabled (-U 0 to disable)\n");
exit(EX_USAGE);
}
switch (authfile_load(settings.auth_file)) {
case AUTHFILE_MISSING: // fall through.
case AUTHFILE_OPENFAIL:
vperror("Could not open authfile [%s] for reading", settings.auth_file);
exit(EXIT_FAILURE);
break;
case AUTHFILE_OOM:
fprintf(stderr, "Out of memory reading password file: %s", settings.auth_file);
exit(EXIT_FAILURE);
break;
case AUTHFILE_MALFORMED:
fprintf(stderr, "Authfile [%s] has a malformed entry. Should be 'user:password'", settings.auth_file);
exit(EXIT_FAILURE);
break;
case AUTHFILE_OK:
break;
}
}
/* initialize other stuff */
stats_init();
logger_init();
conn_init();
bool reuse_mem = false;
void *mem_base = NULL;
bool prefill = false;
if (memory_file != NULL) {
preallocate = true;
// Easier to manage memory if we prefill the global pool when reusing.
prefill = true;
restart_register("main", _mc_meta_load_cb, _mc_meta_save_cb, meta);
reuse_mem = restart_mmap_open(settings.maxbytes,
memory_file,
&mem_base);
// The "save" callback gets called when we're closing out the mmap,
// but we don't know what the mmap_base is until after we call open.
// So we pass the struct above but have to fill it in here so the
// data's available during the save routine.
meta->mmap_base = mem_base;
// Also, the callbacks for load() run before _open returns, so we
// should have the old base in 'meta' as of here.
}
// Initialize the hash table _after_ checking restart metadata.
// We override the hash table start argument with what was live
// previously, to avoid filling a huge set of items into a tiny hash
// table.
assoc_init(settings.hashpower_init);
#ifdef EXTSTORE
if (storage_file && reuse_mem) {
fprintf(stderr, "[restart] memory restart with extstore not presently supported.\n");
reuse_mem = false;
}
#endif
slabs_init(settings.maxbytes, settings.factor, preallocate,
use_slab_sizes ? slab_sizes : NULL, mem_base, reuse_mem);
#ifdef EXTSTORE
if (storage_file) {
enum extstore_res eres;
if (settings.ext_compact_under == 0) {
// If changing the default fraction (4), change the help text as well.
settings.ext_compact_under = storage_file->page_count / 4;
/* Only rescues non-COLD items if below this threshold */
settings.ext_drop_under = storage_file->page_count / 4;
}
crc32c_init();
/* Init free chunks to zero. */
for (int x = 0; x < MAX_NUMBER_OF_SLAB_CLASSES; x++) {
settings.ext_free_memchunks[x] = 0;
}
storage = extstore_init(storage_file, &ext_cf, &eres);
if (storage == NULL) {
fprintf(stderr, "Failed to initialize external storage: %s\n",
extstore_err(eres));
if (eres == EXTSTORE_INIT_OPEN_FAIL) {
perror("extstore open");
}
exit(EXIT_FAILURE);
}
ext_storage = storage;
/* page mover algorithm for extstore needs memory prefilled */
prefill = true;
}
#endif
if (settings.drop_privileges) {
setup_privilege_violations_handler();
}
if (prefill)
slabs_prefill_global();
/* In restartable mode and we've decided to issue a fixup on memory */
if (memory_file != NULL && reuse_mem) {
mc_ptr_t old_base = meta->old_base;
assert(old_base == meta->old_base);
// should've pulled in process_started from meta file.
process_started = meta->process_started;
// TODO: must be a more canonical way of serializing/deserializing
// pointers? passing through uint64_t should work, and we're not
// annotating the pointer with anything, but it's still slightly
// insane.
restart_fixup((void *)old_base);
}
/*
* ignore SIGPIPE signals; we can use errno == EPIPE if we
* need that information
*/
if (sigignore(SIGPIPE) == -1) {
perror("failed to ignore SIGPIPE; sigaction");
exit(EX_OSERR);
}
/* start up worker threads if MT mode */
#ifdef EXTSTORE
slabs_set_storage(storage);
memcached_thread_init(settings.num_threads, storage);
init_lru_crawler(storage);
#else
memcached_thread_init(settings.num_threads, NULL);
init_lru_crawler(NULL);
#endif
if (start_assoc_maint && start_assoc_maintenance_thread() == -1) {
exit(EXIT_FAILURE);
}
if (start_lru_crawler && start_item_crawler_thread() != 0) {
fprintf(stderr, "Failed to enable LRU crawler thread\n");
exit(EXIT_FAILURE);
}
#ifdef EXTSTORE
if (storage && start_storage_compact_thread(storage) != 0) {
fprintf(stderr, "Failed to start storage compaction thread\n");
exit(EXIT_FAILURE);
}
if (storage && start_storage_write_thread(storage) != 0) {
fprintf(stderr, "Failed to start storage writer thread\n");
exit(EXIT_FAILURE);
}
if (start_lru_maintainer && start_lru_maintainer_thread(storage) != 0) {
#else
if (start_lru_maintainer && start_lru_maintainer_thread(NULL) != 0) {
#endif
fprintf(stderr, "Failed to enable LRU maintainer thread\n");
free(meta);
return 1;
}
if (settings.slab_reassign &&
start_slab_maintenance_thread() == -1) {
exit(EXIT_FAILURE);
}
if (settings.idle_timeout && start_conn_timeout_thread() == -1) {
exit(EXIT_FAILURE);
}
/* initialise clock event */
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
{
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
monotonic = true;
monotonic_start = ts.tv_sec;
// Monotonic clock needs special handling for restarts.
// We get a start time at an arbitrary place, so we need to
// restore the original time delta, which is always "now" - _start
if (reuse_mem) {
// the running timespan at stop time + the time we think we
// were stopped.
monotonic_start -= meta->current_time + meta->time_delta;
} else {
monotonic_start -= ITEM_UPDATE_INTERVAL + 2;
}
}
}
#endif
clock_handler(0, 0, 0);
/* create unix mode sockets after dropping privileges */
if (settings.socketpath != NULL) {
errno = 0;
if (server_socket_unix(settings.socketpath,settings.access)) {
vperror("failed to listen on UNIX socket: %s", settings.socketpath);
exit(EX_OSERR);
}
}
/* create the listening socket, bind it, and init */
if (settings.socketpath == NULL) {
const char *portnumber_filename = getenv("MEMCACHED_PORT_FILENAME");
char *temp_portnumber_filename = NULL;
size_t len;
FILE *portnumber_file = NULL;
if (portnumber_filename != NULL) {
len = strlen(portnumber_filename)+4+1;
temp_portnumber_filename = malloc(len);
snprintf(temp_portnumber_filename,
len,
"%s.lck", portnumber_filename);
portnumber_file = fopen(temp_portnumber_filename, "a");
if (portnumber_file == NULL) {
fprintf(stderr, "Failed to open \"%s\": %s\n",
temp_portnumber_filename, strerror(errno));
}
}
errno = 0;
if (settings.port && server_sockets(settings.port, tcp_transport,
portnumber_file)) {
vperror("failed to listen on TCP port %d", settings.port);
exit(EX_OSERR);
}
/*
* initialization order: first create the listening sockets
* (may need root on low ports), then drop root if needed,
* then daemonize if needed, then init libevent (in some cases
* descriptors created by libevent wouldn't survive forking).
*/
/* create the UDP listening socket and bind it */
errno = 0;
if (settings.udpport && server_sockets(settings.udpport, udp_transport,
portnumber_file)) {
vperror("failed to listen on UDP port %d", settings.udpport);
exit(EX_OSERR);
}
if (portnumber_file) {
fclose(portnumber_file);
rename(temp_portnumber_filename, portnumber_filename);
}
if (temp_portnumber_filename)
free(temp_portnumber_filename);
}
/* Give the sockets a moment to open. I know this is dumb, but the error
* is only an advisory.
*/
usleep(1000);
if (stats_state.curr_conns + stats_state.reserved_fds >= settings.maxconns - 1) {
fprintf(stderr, "Maxconns setting is too low, use -c to increase.\n");
exit(EXIT_FAILURE);
}
if (pid_file != NULL) {
save_pid(pid_file);
}
/* Drop privileges no longer needed */
if (settings.drop_privileges) {
drop_privileges();
}
/* Initialize the uriencode lookup table. */
uriencode_init();
/* enter the event loop */
while (!stop_main_loop) {
if (event_base_loop(main_base, EVLOOP_ONCE) != 0) {
retval = EXIT_FAILURE;
break;
}
}
fprintf(stderr, "Gracefully stopping\n");
stop_threads();
int i;
// FIXME: make a function callable from threads.c
for (i = 0; i < max_fds; i++) {
if (conns[i] && conns[i]->state != conn_closed) {
conn_close(conns[i]);
}
}
if (memory_file != NULL) {
restart_mmap_close();
}
/* remove the PID file if we're a daemon */
if (do_daemonize)
remove_pidfile(pid_file);
/* Clean up strdup() call for bind() address */
if (settings.inter)
free(settings.inter);
/* cleanup base */
event_base_free(main_base);
free(meta);
return retval;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_3888_0 |
crossvul-cpp_data_good_4697_1 | /*
* irc-nick.c - nick management for IRC plugin
*
* Copyright (C) 2003-2020 Sébastien Helleu <flashcode@flashtux.org>
*
* This file is part of WeeChat, the extensible chat client.
*
* WeeChat is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* WeeChat is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with WeeChat. If not, see <https://www.gnu.org/licenses/>.
*/
#include <stdlib.h>
#include <stddef.h>
#include <stdio.h>
#include <string.h>
#include <limits.h>
#include "../weechat-plugin.h"
#include "irc.h"
#include "irc-nick.h"
#include "irc-color.h"
#include "irc-config.h"
#include "irc-mode.h"
#include "irc-server.h"
#include "irc-channel.h"
/*
* Checks if a nick pointer is valid.
*
* Returns:
* 1: nick exists in channel
* 0: nick does not exist in channel
*/
int
irc_nick_valid (struct t_irc_channel *channel, struct t_irc_nick *nick)
{
struct t_irc_nick *ptr_nick;
if (!channel || !nick)
return 0;
for (ptr_nick = channel->nicks; ptr_nick; ptr_nick = ptr_nick->next_nick)
{
if (ptr_nick == nick)
return 1;
}
/* nick not found */
return 0;
}
/*
* Checks if string is a valid nick string (RFC 1459).
*
* Returns:
* 1: string is a valid nick
* 0: string is not a valid nick
*/
int
irc_nick_is_nick (const char *string)
{
const char *ptr;
if (!string || !string[0])
return 0;
/* first char must not be a number or hyphen */
ptr = string;
if (strchr ("0123456789-", *ptr))
return 0;
while (ptr && ptr[0])
{
if (!strchr (IRC_NICK_VALID_CHARS, *ptr))
return 0;
ptr++;
}
return 1;
}
/*
* Finds a color code for a nick (according to nick letters).
*
* Returns a WeeChat color code (that can be used for display).
*/
char *
irc_nick_find_color (const char *nickname)
{
return weechat_info_get ("nick_color", nickname);
}
/*
* Finds a color name for a nick (according to nick letters).
*
* Returns the name of a color (for example: "green").
*/
char *
irc_nick_find_color_name (const char *nickname)
{
return weechat_info_get ("nick_color_name", nickname);
}
/*
* Sets current prefix, using higher prefix set in prefixes.
*/
void
irc_nick_set_current_prefix (struct t_irc_nick *nick)
{
char *ptr_prefixes;
if (!nick)
return;
nick->prefix[0] = ' ';
for (ptr_prefixes = nick->prefixes; ptr_prefixes[0]; ptr_prefixes++)
{
if (ptr_prefixes[0] != ' ')
{
nick->prefix[0] = ptr_prefixes[0];
break;
}
}
}
/*
* Sets/unsets a prefix in prefixes.
*
* If set == 1, sets prefix (prefix is used).
* If set == 0, unsets prefix (space is used).
*/
void
irc_nick_set_prefix (struct t_irc_server *server, struct t_irc_nick *nick,
int set, char prefix)
{
int index;
if (!nick)
return;
index = irc_server_get_prefix_char_index (server, prefix);
if (index >= 0)
{
nick->prefixes[index] = (set) ? prefix : ' ';
irc_nick_set_current_prefix (nick);
}
}
/*
* Sets prefixes for nick.
*/
void
irc_nick_set_prefixes (struct t_irc_server *server, struct t_irc_nick *nick,
const char *prefixes)
{
const char *ptr_prefixes;
if (!nick)
return;
/* reset all prefixes in nick */
memset (nick->prefixes, ' ', strlen (nick->prefixes));
/* add prefixes in nick */
if (prefixes)
{
for (ptr_prefixes = prefixes; ptr_prefixes[0]; ptr_prefixes++)
{
irc_nick_set_prefix (server, nick, 1, ptr_prefixes[0]);
}
}
/* set current prefix */
irc_nick_set_current_prefix (nick);
}
/*
* Sets host for nick.
*/
void
irc_nick_set_host (struct t_irc_nick *nick, const char *host)
{
if (!nick)
return;
/* if host is the same, just return */
if ((!nick->host && !host)
|| (nick->host && host && strcmp (nick->host, host) == 0))
{
return;
}
/* update the host in nick */
if (nick->host)
free (nick->host);
nick->host = (host) ? strdup (host) : NULL;
}
/*
* Checks if nick is "op" (or better than "op", for example channel admin or
* channel owner).
*
* Returns:
* 1: nick is "op" (or better)
* 0: nick is not op
*/
int
irc_nick_is_op (struct t_irc_server *server, struct t_irc_nick *nick)
{
int index;
if (nick->prefix[0] == ' ')
return 0;
index = irc_server_get_prefix_char_index (server, nick->prefix[0]);
if (index < 0)
return 0;
return (index <= irc_server_get_prefix_mode_index (server, 'o')) ? 1 : 0;
}
/*
* Checks if nick prefixes contains prefix for a given mode.
*
* For example if prefix_mode is 'o', searches for '@' in nick prefixes.
*
* Returns:
* 1: prefixes contains prefix for the given mode
* 0: prefixes does not contain prefix for the given mode.
*/
int
irc_nick_has_prefix_mode (struct t_irc_server *server, struct t_irc_nick *nick,
char prefix_mode)
{
char prefix_char;
prefix_char = irc_server_get_prefix_char_for_mode (server, prefix_mode);
if (prefix_char == ' ')
return 0;
return (strchr (nick->prefixes, prefix_char)) ? 1 : 0;
}
/*
* Gets nicklist group for a nick.
*/
struct t_gui_nick_group *
irc_nick_get_nicklist_group (struct t_irc_server *server,
struct t_gui_buffer *buffer,
struct t_irc_nick *nick)
{
int index;
char str_group[2];
const char *prefix_modes;
struct t_gui_nick_group *ptr_group;
if (!server || !buffer || !nick)
return NULL;
ptr_group = NULL;
index = irc_server_get_prefix_char_index (server, nick->prefix[0]);
if (index < 0)
{
ptr_group = weechat_nicklist_search_group (buffer, NULL,
IRC_NICK_GROUP_OTHER_NAME);
}
else
{
prefix_modes = irc_server_get_prefix_modes (server);
str_group[0] = prefix_modes[index];
str_group[1] = '\0';
ptr_group = weechat_nicklist_search_group (buffer, NULL, str_group);
}
return ptr_group;
}
/*
* Gets name of prefix color for a nick.
*/
const char *
irc_nick_get_prefix_color_name (struct t_irc_server *server, char prefix)
{
static char *default_color = "";
const char *prefix_modes, *color;
char mode[2];
int index;
if (irc_config_hashtable_nick_prefixes)
{
mode[0] = ' ';
mode[1] = '\0';
index = irc_server_get_prefix_char_index (server, prefix);
if (index >= 0)
{
prefix_modes = irc_server_get_prefix_modes (server);
mode[0] = prefix_modes[index];
color = weechat_hashtable_get (irc_config_hashtable_nick_prefixes,
mode);
if (color)
return color;
}
/* fallback to "*" if no color is found with mode */
mode[0] = '*';
color = weechat_hashtable_get (irc_config_hashtable_nick_prefixes,
mode);
if (color)
return color;
}
/* no color by default */
return default_color;
}
/*
* Gets nick color for nicklist.
*/
char *
irc_nick_get_color_for_nicklist (struct t_irc_server *server,
struct t_irc_nick *nick)
{
static char *nick_color_bar_fg = "bar_fg";
static char *nick_color_self = "weechat.color.chat_nick_self";
static char *nick_color_away = "weechat.color.nicklist_away";
if (nick->away)
return strdup (nick_color_away);
if (weechat_config_boolean (irc_config_look_color_nicks_in_nicklist))
{
if (irc_server_strcasecmp (server, nick->name, server->nick) == 0)
return strdup (nick_color_self);
else
return irc_nick_find_color_name (nick->name);
}
return strdup (nick_color_bar_fg);
}
/*
* Adds a nick to buffer nicklist.
*/
void
irc_nick_nicklist_add (struct t_irc_server *server,
struct t_irc_channel *channel,
struct t_irc_nick *nick)
{
struct t_gui_nick_group *ptr_group;
char *color;
ptr_group = irc_nick_get_nicklist_group (server, channel->buffer, nick);
color = irc_nick_get_color_for_nicklist (server, nick);
weechat_nicklist_add_nick (channel->buffer, ptr_group,
nick->name,
color,
nick->prefix,
irc_nick_get_prefix_color_name (server, nick->prefix[0]),
1);
if (color)
free (color);
}
/*
* Removes a nick from buffer nicklist.
*/
void
irc_nick_nicklist_remove (struct t_irc_server *server,
struct t_irc_channel *channel,
struct t_irc_nick *nick)
{
struct t_gui_nick_group *ptr_group;
ptr_group = irc_nick_get_nicklist_group (server, channel->buffer, nick);
weechat_nicklist_remove_nick (channel->buffer,
weechat_nicklist_search_nick (channel->buffer,
ptr_group,
nick->name));
}
/*
* Sets a property for nick in buffer nicklist.
*/
void
irc_nick_nicklist_set (struct t_irc_channel *channel,
struct t_irc_nick *nick,
const char *property, const char *value)
{
struct t_gui_nick *ptr_nick;
ptr_nick = weechat_nicklist_search_nick (channel->buffer, NULL, nick->name);
if (ptr_nick)
{
weechat_nicklist_nick_set (channel->buffer, ptr_nick, property, value);
}
}
/*
* Sets nick prefix colors in nicklist for all servers/channels.
*/
void
irc_nick_nicklist_set_prefix_color_all ()
{
struct t_irc_server *ptr_server;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
for (ptr_channel = ptr_server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
for (ptr_nick = ptr_channel->nicks; ptr_nick;
ptr_nick = ptr_nick->next_nick)
{
irc_nick_nicklist_set (ptr_channel, ptr_nick, "prefix_color",
irc_nick_get_prefix_color_name (ptr_server,
ptr_nick->prefix[0]));
}
}
}
}
/*
* Sets nick colors in nicklist for all servers/channels.
*/
void
irc_nick_nicklist_set_color_all ()
{
struct t_irc_server *ptr_server;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
char *color;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
for (ptr_channel = ptr_server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
for (ptr_nick = ptr_channel->nicks; ptr_nick;
ptr_nick = ptr_nick->next_nick)
{
color = irc_nick_get_color_for_nicklist (ptr_server, ptr_nick);
irc_nick_nicklist_set (ptr_channel, ptr_nick, "color", color);
if (color)
free (color);
}
}
}
}
/*
* Adds a new nick in channel.
*
* Returns pointer to new nick, NULL if error.
*/
struct t_irc_nick *
irc_nick_new (struct t_irc_server *server, struct t_irc_channel *channel,
const char *nickname, const char *host, const char *prefixes,
int away, const char *account, const char *realname)
{
struct t_irc_nick *new_nick, *ptr_nick;
int length;
if (!nickname || !nickname[0])
return NULL;
if (!channel->nicks)
irc_channel_add_nicklist_groups (server, channel);
/* nick already exists on this channel? */
ptr_nick = irc_nick_search (server, channel, nickname);
if (ptr_nick)
{
/* remove old nick from nicklist */
irc_nick_nicklist_remove (server, channel, ptr_nick);
/* update nick prefixes */
irc_nick_set_prefixes (server, ptr_nick, prefixes);
/* add new nick in nicklist */
irc_nick_nicklist_add (server, channel, ptr_nick);
return ptr_nick;
}
/* alloc memory for new nick */
if ((new_nick = malloc (sizeof (*new_nick))) == NULL)
return NULL;
/* initialize new nick */
new_nick->name = strdup (nickname);
new_nick->host = (host) ? strdup (host) : NULL;
new_nick->account = (account) ? strdup (account) : NULL;
new_nick->realname = (realname) ? strdup (realname) : NULL;
length = strlen (irc_server_get_prefix_chars (server));
new_nick->prefixes = malloc (length + 1);
new_nick->prefix = malloc (2);
if (!new_nick->name || !new_nick->prefixes || !new_nick->prefix)
{
if (new_nick->name)
free (new_nick->name);
if (new_nick->host)
free (new_nick->host);
if (new_nick->account)
free (new_nick->account);
if (new_nick->realname)
free (new_nick->realname);
if (new_nick->prefixes)
free (new_nick->prefixes);
if (new_nick->prefix)
free (new_nick->prefix);
free (new_nick);
return NULL;
}
memset (new_nick->prefixes, ' ', length);
new_nick->prefixes[length] = '\0';
new_nick->prefix[0] = ' ';
new_nick->prefix[1] = '\0';
irc_nick_set_prefixes (server, new_nick, prefixes);
new_nick->away = away;
if (irc_server_strcasecmp (server, new_nick->name, server->nick) == 0)
new_nick->color = strdup (IRC_COLOR_CHAT_NICK_SELF);
else
new_nick->color = irc_nick_find_color (new_nick->name);
/* add nick to end of list */
new_nick->prev_nick = channel->last_nick;
if (channel->last_nick)
channel->last_nick->next_nick = new_nick;
else
channel->nicks = new_nick;
channel->last_nick = new_nick;
new_nick->next_nick = NULL;
channel->nicks_count++;
channel->nick_completion_reset = 1;
/* add nick to buffer nicklist */
irc_nick_nicklist_add (server, channel, new_nick);
/* all is OK, return address of new nick */
return new_nick;
}
/*
* Changes nickname.
*/
void
irc_nick_change (struct t_irc_server *server, struct t_irc_channel *channel,
struct t_irc_nick *nick, const char *new_nick)
{
int nick_is_me;
/* remove nick from nicklist */
irc_nick_nicklist_remove (server, channel, nick);
/* update nicks speaking */
nick_is_me = (irc_server_strcasecmp (server, new_nick, server->nick) == 0) ? 1 : 0;
if (!nick_is_me)
irc_channel_nick_speaking_rename (channel, nick->name, new_nick);
/* change nickname */
if (nick->name)
free (nick->name);
nick->name = strdup (new_nick);
if (nick->color)
free (nick->color);
if (nick_is_me)
nick->color = strdup (IRC_COLOR_CHAT_NICK_SELF);
else
nick->color = irc_nick_find_color (nick->name);
/* add nick in nicklist */
irc_nick_nicklist_add (server, channel, nick);
}
/*
* Sets a mode for a nick.
*/
void
irc_nick_set_mode (struct t_irc_server *server, struct t_irc_channel *channel,
struct t_irc_nick *nick, int set, char mode)
{
int index;
const char *prefix_chars;
index = irc_server_get_prefix_mode_index (server, mode);
if (index < 0)
return;
/* remove nick from nicklist */
irc_nick_nicklist_remove (server, channel, nick);
/* set flag */
prefix_chars = irc_server_get_prefix_chars (server);
irc_nick_set_prefix (server, nick, set, prefix_chars[index]);
/* add nick in nicklist */
irc_nick_nicklist_add (server, channel, nick);
if (irc_server_strcasecmp (server, nick->name, server->nick) == 0)
{
weechat_bar_item_update ("input_prompt");
weechat_bar_item_update ("irc_nick");
weechat_bar_item_update ("irc_nick_host");
}
}
/*
* Reallocates the "prefixes" string in all nicks of all channels on the server
* (after 005 has been received).
*/
void
irc_nick_realloc_prefixes (struct t_irc_server *server,
int old_length, int new_length)
{
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
char *new_prefixes;
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
for (ptr_nick = ptr_channel->nicks; ptr_nick;
ptr_nick = ptr_nick->next_nick)
{
if (ptr_nick->prefixes)
{
new_prefixes = realloc (ptr_nick->prefixes, new_length + 1);
if (new_prefixes)
{
ptr_nick->prefixes = new_prefixes;
if (new_length > old_length)
{
memset (ptr_nick->prefixes + old_length,
' ',
new_length - old_length);
}
ptr_nick->prefixes[new_length] = '\0';
}
}
else
{
ptr_nick->prefixes = malloc (new_length + 1);
if (ptr_nick->prefixes)
{
memset (ptr_nick->prefixes, ' ', new_length);
ptr_nick->prefixes[new_length] = '\0';
}
}
}
}
}
/*
* Removes a nick from a channel.
*/
void
irc_nick_free (struct t_irc_server *server, struct t_irc_channel *channel,
struct t_irc_nick *nick)
{
struct t_irc_nick *new_nicks;
if (!channel || !nick)
return;
/* remove nick from nicklist */
irc_nick_nicklist_remove (server, channel, nick);
/* remove nick */
if (channel->last_nick == nick)
channel->last_nick = nick->prev_nick;
if (nick->prev_nick)
{
(nick->prev_nick)->next_nick = nick->next_nick;
new_nicks = channel->nicks;
}
else
new_nicks = nick->next_nick;
if (nick->next_nick)
(nick->next_nick)->prev_nick = nick->prev_nick;
channel->nicks_count--;
/* free data */
if (nick->name)
free (nick->name);
if (nick->host)
free (nick->host);
if (nick->prefixes)
free (nick->prefixes);
if (nick->prefix)
free (nick->prefix);
if (nick->account)
free (nick->account);
if (nick->realname)
free (nick->realname);
if (nick->color)
free (nick->color);
free (nick);
channel->nicks = new_nicks;
channel->nick_completion_reset = 1;
}
/*
* Removes all nicks from a channel.
*/
void
irc_nick_free_all (struct t_irc_server *server, struct t_irc_channel *channel)
{
if (!channel)
return;
/* remove all nicks for the channel */
while (channel->nicks)
{
irc_nick_free (server, channel, channel->nicks);
}
/* remove all groups in nicklist */
weechat_nicklist_remove_all (channel->buffer);
/* should be zero, but prevent any bug :D */
channel->nicks_count = 0;
}
/*
* Searches for a nick in a channel.
*
* Returns pointer to nick found, NULL if error.
*/
struct t_irc_nick *
irc_nick_search (struct t_irc_server *server, struct t_irc_channel *channel,
const char *nickname)
{
struct t_irc_nick *ptr_nick;
if (!channel || !nickname)
return NULL;
for (ptr_nick = channel->nicks; ptr_nick;
ptr_nick = ptr_nick->next_nick)
{
if (irc_server_strcasecmp (server, ptr_nick->name, nickname) == 0)
return ptr_nick;
}
/* nick not found */
return NULL;
}
/*
* Returns number of nicks (total, op, halfop, voice, normal) on a channel.
*/
void
irc_nick_count (struct t_irc_server *server, struct t_irc_channel *channel,
int *total, int *count_op, int *count_halfop, int *count_voice,
int *count_normal)
{
struct t_irc_nick *ptr_nick;
(*total) = 0;
(*count_op) = 0;
(*count_halfop) = 0;
(*count_voice) = 0;
(*count_normal) = 0;
for (ptr_nick = channel->nicks; ptr_nick;
ptr_nick = ptr_nick->next_nick)
{
(*total)++;
if (irc_nick_is_op (server, ptr_nick))
(*count_op)++;
else
{
if (irc_nick_has_prefix_mode (server, ptr_nick, 'h'))
(*count_halfop)++;
else
{
if (irc_nick_has_prefix_mode (server, ptr_nick, 'v'))
(*count_voice)++;
else
(*count_normal)++;
}
}
}
}
/*
* Sets/unsets away status for a nick.
*/
void
irc_nick_set_away (struct t_irc_server *server, struct t_irc_channel *channel,
struct t_irc_nick *nick, int is_away)
{
char *color;
if (is_away != nick->away)
{
nick->away = is_away;
color = irc_nick_get_color_for_nicklist (server, nick);
irc_nick_nicklist_set (channel, nick, "color", color);
if (color)
free (color);
}
}
/*
* Gets nick mode for display (color + mode).
*
* If prefix == 1, returns string for display in prefix, otherwise returns
* string for display in action message (/me).
*/
const char *
irc_nick_mode_for_display (struct t_irc_server *server, struct t_irc_nick *nick,
int prefix)
{
static char result[32];
char str_prefix[2];
int nick_mode;
const char *str_prefix_color;
str_prefix[0] = (nick) ? nick->prefix[0] : '\0';
str_prefix[1] = '\0';
nick_mode = weechat_config_integer (irc_config_look_nick_mode);
if ((nick_mode == IRC_CONFIG_LOOK_NICK_MODE_BOTH)
|| (prefix && (nick_mode == IRC_CONFIG_LOOK_NICK_MODE_PREFIX))
|| (!prefix && (nick_mode == IRC_CONFIG_LOOK_NICK_MODE_ACTION)))
{
if (nick)
{
if ((str_prefix[0] == ' ')
&& (!prefix || !weechat_config_boolean (irc_config_look_nick_mode_empty)))
{
str_prefix[0] = '\0';
}
str_prefix_color = weechat_color (
irc_nick_get_prefix_color_name (server, nick->prefix[0]));
}
else
{
str_prefix[0] = (prefix
&& weechat_config_boolean (irc_config_look_nick_mode_empty)) ?
' ' : '\0';
str_prefix_color = IRC_COLOR_RESET;
}
}
else
{
str_prefix[0] = '\0';
str_prefix_color = IRC_COLOR_RESET;
}
snprintf (result, sizeof (result), "%s%s", str_prefix_color, str_prefix);
return result;
}
/*
* Returns string with nick to display as prefix on buffer (returned string ends
* by a tab).
*/
const char *
irc_nick_as_prefix (struct t_irc_server *server, struct t_irc_nick *nick,
const char *nickname, const char *force_color)
{
static char result[256];
char *color;
if (force_color)
color = strdup (force_color);
else if (nick)
color = strdup (nick->color);
else if (nickname)
color = irc_nick_find_color (nickname);
else
color = strdup (IRC_COLOR_CHAT_NICK);
snprintf (result, sizeof (result), "%s%s%s\t",
irc_nick_mode_for_display (server, nick, 1),
color,
(nick) ? nick->name : nickname);
if (color)
free (color);
return result;
}
/*
* Returns WeeChat color code for a nick.
*/
const char *
irc_nick_color_for_msg (struct t_irc_server *server, int server_message,
struct t_irc_nick *nick, const char *nickname)
{
static char color[16][64];
static int index_color = 0;
char *color_found;
if (server_message
&& !weechat_config_boolean (irc_config_look_color_nicks_in_server_messages))
{
return IRC_COLOR_CHAT_NICK;
}
if (nick)
return nick->color;
if (nickname)
{
if (server
&& (irc_server_strcasecmp (server, nickname, server->nick) == 0))
{
return IRC_COLOR_CHAT_NICK_SELF;
}
color_found = irc_nick_find_color (nickname);
index_color = (index_color + 1) % 16;
snprintf (color[index_color], sizeof (color[index_color]),
"%s",
color_found);
if (color_found)
free (color_found);
return color[index_color];
}
return IRC_COLOR_CHAT_NICK;
}
/*
* Returns string with color of nick for private.
*/
const char *
irc_nick_color_for_pv (struct t_irc_channel *channel, const char *nickname)
{
if (weechat_config_boolean (irc_config_look_color_pv_nick_like_channel))
{
if (!channel->pv_remote_nick_color)
channel->pv_remote_nick_color = irc_nick_find_color (nickname);
if (channel->pv_remote_nick_color)
return channel->pv_remote_nick_color;
}
return IRC_COLOR_CHAT_NICK_OTHER;
}
/*
* Returns default ban mask for the nick.
*
* Note: result must be freed after use (if not NULL).
*/
char *
irc_nick_default_ban_mask (struct t_irc_nick *nick)
{
const char *ptr_ban_mask;
char *pos_hostname, user[128], ident[128], *res, *temp;
if (!nick)
return NULL;
ptr_ban_mask = weechat_config_string (irc_config_network_ban_mask_default);
pos_hostname = (nick->host) ? strchr (nick->host, '@') : NULL;
if (!nick->host || !pos_hostname || !ptr_ban_mask || !ptr_ban_mask[0])
return NULL;
if (pos_hostname - nick->host > (int)sizeof (user) - 1)
return NULL;
strncpy (user, nick->host, pos_hostname - nick->host);
user[pos_hostname - nick->host] = '\0';
strcpy (ident, (user[0] != '~') ? user : "*");
pos_hostname++;
/* replace nick */
temp = weechat_string_replace (ptr_ban_mask, "$nick", nick->name);
if (!temp)
return NULL;
res = temp;
/* replace user */
temp = weechat_string_replace (res, "$user", user);
free (res);
if (!temp)
return NULL;
res = temp;
/* replace ident */
temp = weechat_string_replace (res, "$ident", ident);
free (res);
if (!temp)
return NULL;
res = temp;
/* replace hostname */
temp = weechat_string_replace (res, "$host", pos_hostname);
free (res);
if (!temp)
return NULL;
res = temp;
return res;
}
/*
* Returns hdata for nick.
*/
struct t_hdata *
irc_nick_hdata_nick_cb (const void *pointer, void *data,
const char *hdata_name)
{
struct t_hdata *hdata;
/* make C compiler happy */
(void) pointer;
(void) data;
hdata = weechat_hdata_new (hdata_name, "prev_nick", "next_nick",
0, 0, NULL, NULL);
if (hdata)
{
WEECHAT_HDATA_VAR(struct t_irc_nick, name, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, host, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, prefixes, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, prefix, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, away, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, account, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, realname, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, color, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, prev_nick, POINTER, 0, NULL, hdata_name);
WEECHAT_HDATA_VAR(struct t_irc_nick, next_nick, POINTER, 0, NULL, hdata_name);
}
return hdata;
}
/*
* Adds a nick in an infolist.
*
* Returns:
* 1: OK
* 0: error
*/
int
irc_nick_add_to_infolist (struct t_infolist *infolist,
struct t_irc_nick *nick)
{
struct t_infolist_item *ptr_item;
if (!infolist || !nick)
return 0;
ptr_item = weechat_infolist_new_item (infolist);
if (!ptr_item)
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "name", nick->name))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "host", nick->host))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "prefixes", nick->prefixes))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "prefix", nick->prefix))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "away", nick->away))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "account", nick->account))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "realname", nick->realname))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "color", nick->color))
return 0;
return 1;
}
/*
* Prints nick infos in WeeChat log file (usually for crash dump).
*/
void
irc_nick_print_log (struct t_irc_nick *nick)
{
weechat_log_printf ("");
weechat_log_printf (" => nick %s (addr:0x%lx):", nick->name, nick);
weechat_log_printf (" host . . . . . : '%s'", nick->host);
weechat_log_printf (" prefixes . . . : '%s'", nick->prefixes);
weechat_log_printf (" prefix . . . . : '%s'", nick->prefix);
weechat_log_printf (" away . . . . . : %d", nick->away);
weechat_log_printf (" account. . . . : '%s'", nick->account);
weechat_log_printf (" realname . . . : '%s'", nick->realname);
weechat_log_printf (" color. . . . . : '%s'", nick->color);
weechat_log_printf (" prev_nick. . . : 0x%lx", nick->prev_nick);
weechat_log_printf (" next_nick. . . : 0x%lx", nick->next_nick);
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_4697_1 |
crossvul-cpp_data_good_1769_0 | /* -*- mode: c; c-file-style: "openbsd" -*- */
/*
* Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "lldpd.h"
#include "frame.h"
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <time.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
inline static int
lldpd_af_to_lldp_proto(int af)
{
switch (af) {
case LLDPD_AF_IPV4:
return LLDP_MGMT_ADDR_IP4;
case LLDPD_AF_IPV6:
return LLDP_MGMT_ADDR_IP6;
default:
return LLDP_MGMT_ADDR_NONE;
}
}
inline static int
lldpd_af_from_lldp_proto(int proto)
{
switch (proto) {
case LLDP_MGMT_ADDR_IP4:
return LLDPD_AF_IPV4;
case LLDP_MGMT_ADDR_IP6:
return LLDPD_AF_IPV6;
default:
return LLDPD_AF_UNSPEC;
}
}
static int _lldp_send(struct lldpd *global,
struct lldpd_hardware *hardware,
u_int8_t c_id_subtype,
char *c_id,
int c_id_len,
u_int8_t p_id_subtype,
char *p_id,
int p_id_len,
int shutdown)
{
struct lldpd_port *port;
struct lldpd_chassis *chassis;
struct lldpd_frame *frame;
int length;
u_int8_t *packet, *pos, *tlv;
struct lldpd_mgmt *mgmt;
int proto;
u_int8_t mcastaddr[] = LLDP_MULTICAST_ADDR;
#ifdef ENABLE_DOT1
const u_int8_t dot1[] = LLDP_TLV_ORG_DOT1;
struct lldpd_vlan *vlan;
struct lldpd_ppvid *ppvid;
struct lldpd_pi *pi;
#endif
#ifdef ENABLE_DOT3
const u_int8_t dot3[] = LLDP_TLV_ORG_DOT3;
#endif
#ifdef ENABLE_LLDPMED
int i;
const u_int8_t med[] = LLDP_TLV_ORG_MED;
#endif
#ifdef ENABLE_CUSTOM
struct lldpd_custom *custom;
#endif
port = &hardware->h_lport;
chassis = port->p_chassis;
length = hardware->h_mtu;
if ((packet = (u_int8_t*)calloc(1, length)) == NULL)
return ENOMEM;
pos = packet;
/* Ethernet header */
if (!(
/* LLDP multicast address */
POKE_BYTES(mcastaddr, sizeof(mcastaddr)) &&
/* Source MAC address */
POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) &&
/* LLDP frame */
POKE_UINT16(ETHERTYPE_LLDP)))
goto toobig;
/* Chassis ID */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_CHASSIS_ID) &&
POKE_UINT8(c_id_subtype) &&
POKE_BYTES(c_id, c_id_len) &&
POKE_END_LLDP_TLV))
goto toobig;
/* Port ID */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_PORT_ID) &&
POKE_UINT8(p_id_subtype) &&
POKE_BYTES(p_id, p_id_len) &&
POKE_END_LLDP_TLV))
goto toobig;
/* Time to live */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_TTL) &&
POKE_UINT16(shutdown?0:chassis->c_ttl) &&
POKE_END_LLDP_TLV))
goto toobig;
if (shutdown)
goto end;
/* System name */
if (chassis->c_name && *chassis->c_name != '\0') {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_NAME) &&
POKE_BYTES(chassis->c_name, strlen(chassis->c_name)) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* System description (skip it if empty) */
if (chassis->c_descr && *chassis->c_descr != '\0') {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_DESCR) &&
POKE_BYTES(chassis->c_descr, strlen(chassis->c_descr)) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* System capabilities */
if (global->g_config.c_cap_advertise && chassis->c_cap_available) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_CAP) &&
POKE_UINT16(chassis->c_cap_available) &&
POKE_UINT16(chassis->c_cap_enabled) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* Management addresses */
TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) {
proto = lldpd_af_to_lldp_proto(mgmt->m_family);
assert(proto != LLDP_MGMT_ADDR_NONE);
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_MGMT_ADDR) &&
/* Size of the address, including its type */
POKE_UINT8(mgmt->m_addrsize + 1) &&
POKE_UINT8(proto) &&
POKE_BYTES(&mgmt->m_addr, mgmt->m_addrsize)))
goto toobig;
/* Interface port type, OID */
if (mgmt->m_iface == 0) {
if (!(
/* We don't know the management interface */
POKE_UINT8(LLDP_MGMT_IFACE_UNKNOWN) &&
POKE_UINT32(0)))
goto toobig;
} else {
if (!(
/* We have the index of the management interface */
POKE_UINT8(LLDP_MGMT_IFACE_IFINDEX) &&
POKE_UINT32(mgmt->m_iface)))
goto toobig;
}
if (!(
/* We don't provide an OID for management */
POKE_UINT8(0) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* Port description */
if (port->p_descr && *port->p_descr != '\0') {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_PORT_DESCR) &&
POKE_BYTES(port->p_descr, strlen(port->p_descr)) &&
POKE_END_LLDP_TLV))
goto toobig;
}
#ifdef ENABLE_DOT1
/* Port VLAN ID */
if(port->p_pvid != 0) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot1, sizeof(dot1)) &&
POKE_UINT8(LLDP_TLV_DOT1_PVID) &&
POKE_UINT16(port->p_pvid) &&
POKE_END_LLDP_TLV)) {
goto toobig;
}
}
/* Port and Protocol VLAN IDs */
TAILQ_FOREACH(ppvid, &port->p_ppvids, p_entries) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot1, sizeof(dot1)) &&
POKE_UINT8(LLDP_TLV_DOT1_PPVID) &&
POKE_UINT8(ppvid->p_cap_status) &&
POKE_UINT16(ppvid->p_ppvid) &&
POKE_END_LLDP_TLV)) {
goto toobig;
}
}
/* VLANs */
TAILQ_FOREACH(vlan, &port->p_vlans, v_entries) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot1, sizeof(dot1)) &&
POKE_UINT8(LLDP_TLV_DOT1_VLANNAME) &&
POKE_UINT16(vlan->v_vid) &&
POKE_UINT8(strlen(vlan->v_name)) &&
POKE_BYTES(vlan->v_name, strlen(vlan->v_name)) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* Protocol Identities */
TAILQ_FOREACH(pi, &port->p_pids, p_entries) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot1, sizeof(dot1)) &&
POKE_UINT8(LLDP_TLV_DOT1_PI) &&
POKE_UINT8(pi->p_pi_len) &&
POKE_BYTES(pi->p_pi, pi->p_pi_len) &&
POKE_END_LLDP_TLV))
goto toobig;
}
#endif
#ifdef ENABLE_DOT3
/* Aggregation status */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot3, sizeof(dot3)) &&
POKE_UINT8(LLDP_TLV_DOT3_LA) &&
/* Bit 0 = capability ; Bit 1 = status */
POKE_UINT8((port->p_aggregid) ? 3:1) &&
POKE_UINT32(port->p_aggregid) &&
POKE_END_LLDP_TLV))
goto toobig;
/* MAC/PHY */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot3, sizeof(dot3)) &&
POKE_UINT8(LLDP_TLV_DOT3_MAC) &&
POKE_UINT8(port->p_macphy.autoneg_support |
(port->p_macphy.autoneg_enabled << 1)) &&
POKE_UINT16(port->p_macphy.autoneg_advertised) &&
POKE_UINT16(port->p_macphy.mau_type) &&
POKE_END_LLDP_TLV))
goto toobig;
/* MFS */
if (port->p_mfs) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot3, sizeof(dot3)) &&
POKE_UINT8(LLDP_TLV_DOT3_MFS) &&
POKE_UINT16(port->p_mfs) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* Power */
if (port->p_power.devicetype) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot3, sizeof(dot3)) &&
POKE_UINT8(LLDP_TLV_DOT3_POWER) &&
POKE_UINT8((
(((2 - port->p_power.devicetype) %(1<< 1))<<0) |
(( port->p_power.supported %(1<< 1))<<1) |
(( port->p_power.enabled %(1<< 1))<<2) |
(( port->p_power.paircontrol %(1<< 1))<<3))) &&
POKE_UINT8(port->p_power.pairs) &&
POKE_UINT8(port->p_power.class)))
goto toobig;
/* 802.3at */
if (port->p_power.powertype != LLDP_DOT3_POWER_8023AT_OFF) {
if (!(
POKE_UINT8((
(((port->p_power.powertype ==
LLDP_DOT3_POWER_8023AT_TYPE1)?1:0) << 7) |
(((port->p_power.devicetype ==
LLDP_DOT3_POWER_PSE)?0:1) << 6) |
((port->p_power.source %(1<< 2))<<4) |
((port->p_power.priority %(1<< 2))<<0))) &&
POKE_UINT16(port->p_power.requested) &&
POKE_UINT16(port->p_power.allocated)))
goto toobig;
}
if (!(POKE_END_LLDP_TLV))
goto toobig;
}
#endif
#ifdef ENABLE_LLDPMED
if (port->p_med_cap_enabled) {
/* LLDP-MED cap */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(med, sizeof(med)) &&
POKE_UINT8(LLDP_TLV_MED_CAP) &&
POKE_UINT16(chassis->c_med_cap_available) &&
POKE_UINT8(chassis->c_med_type) &&
POKE_END_LLDP_TLV))
goto toobig;
/* LLDP-MED inventory */
#define LLDP_INVENTORY(value, subtype) \
if (value) { \
if (!( \
POKE_START_LLDP_TLV(LLDP_TLV_ORG) && \
POKE_BYTES(med, sizeof(med)) && \
POKE_UINT8(subtype) && \
POKE_BYTES(value, \
(strlen(value)>32)?32:strlen(value)) && \
POKE_END_LLDP_TLV)) \
goto toobig; \
}
if (port->p_med_cap_enabled & LLDP_MED_CAP_IV) {
LLDP_INVENTORY(chassis->c_med_hw,
LLDP_TLV_MED_IV_HW);
LLDP_INVENTORY(chassis->c_med_fw,
LLDP_TLV_MED_IV_FW);
LLDP_INVENTORY(chassis->c_med_sw,
LLDP_TLV_MED_IV_SW);
LLDP_INVENTORY(chassis->c_med_sn,
LLDP_TLV_MED_IV_SN);
LLDP_INVENTORY(chassis->c_med_manuf,
LLDP_TLV_MED_IV_MANUF);
LLDP_INVENTORY(chassis->c_med_model,
LLDP_TLV_MED_IV_MODEL);
LLDP_INVENTORY(chassis->c_med_asset,
LLDP_TLV_MED_IV_ASSET);
}
/* LLDP-MED location */
for (i = 0; i < LLDP_MED_LOCFORMAT_LAST; i++) {
if (port->p_med_location[i].format == i + 1) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(med, sizeof(med)) &&
POKE_UINT8(LLDP_TLV_MED_LOCATION) &&
POKE_UINT8(port->p_med_location[i].format) &&
POKE_BYTES(port->p_med_location[i].data,
port->p_med_location[i].data_len) &&
POKE_END_LLDP_TLV))
goto toobig;
}
}
/* LLDP-MED network policy */
for (i = 0; i < LLDP_MED_APPTYPE_LAST; i++) {
if (port->p_med_policy[i].type == i + 1) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(med, sizeof(med)) &&
POKE_UINT8(LLDP_TLV_MED_POLICY) &&
POKE_UINT32((
((port->p_med_policy[i].type %(1<< 8))<<24) |
((port->p_med_policy[i].unknown %(1<< 1))<<23) |
((port->p_med_policy[i].tagged %(1<< 1))<<22) |
/*((0 %(1<< 1))<<21) |*/
((port->p_med_policy[i].vid %(1<<12))<< 9) |
((port->p_med_policy[i].priority %(1<< 3))<< 6) |
((port->p_med_policy[i].dscp %(1<< 6))<< 0) )) &&
POKE_END_LLDP_TLV))
goto toobig;
}
}
/* LLDP-MED POE-MDI */
if ((port->p_med_power.devicetype == LLDP_MED_POW_TYPE_PSE) ||
(port->p_med_power.devicetype == LLDP_MED_POW_TYPE_PD)) {
int devicetype = 0, source = 0;
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(med, sizeof(med)) &&
POKE_UINT8(LLDP_TLV_MED_MDI)))
goto toobig;
switch (port->p_med_power.devicetype) {
case LLDP_MED_POW_TYPE_PSE:
devicetype = 0;
switch (port->p_med_power.source) {
case LLDP_MED_POW_SOURCE_PRIMARY: source = 1; break;
case LLDP_MED_POW_SOURCE_BACKUP: source = 2; break;
case LLDP_MED_POW_SOURCE_RESERVED: source = 3; break;
default: source = 0; break;
}
break;
case LLDP_MED_POW_TYPE_PD:
devicetype = 1;
switch (port->p_med_power.source) {
case LLDP_MED_POW_SOURCE_PSE: source = 1; break;
case LLDP_MED_POW_SOURCE_LOCAL: source = 2; break;
case LLDP_MED_POW_SOURCE_BOTH: source = 3; break;
default: source = 0; break;
}
break;
}
if (!(
POKE_UINT8((
((devicetype %(1<< 2))<<6) |
((source %(1<< 2))<<4) |
((port->p_med_power.priority %(1<< 4))<<0) )) &&
POKE_UINT16(port->p_med_power.val) &&
POKE_END_LLDP_TLV))
goto toobig;
}
}
#endif
#ifdef ENABLE_CUSTOM
TAILQ_FOREACH(custom, &port->p_custom_list, next) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(custom->oui, sizeof(custom->oui)) &&
POKE_UINT8(custom->subtype) &&
POKE_BYTES(custom->oui_info, custom->oui_info_len) &&
POKE_END_LLDP_TLV))
goto toobig;
}
#endif
end:
/* END */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_END) &&
POKE_END_LLDP_TLV))
goto toobig;
if (interfaces_send_helper(global, hardware,
(char *)packet, pos - packet) == -1) {
log_warn("lldp", "unable to send packet on real device for %s",
hardware->h_ifname);
free(packet);
return ENETDOWN;
}
hardware->h_tx_cnt++;
/* We assume that LLDP frame is the reference */
if (!shutdown && (frame = (struct lldpd_frame*)malloc(
sizeof(int) + pos - packet)) != NULL) {
frame->size = pos - packet;
memcpy(&frame->frame, packet, frame->size);
if ((hardware->h_lport.p_lastframe == NULL) ||
(hardware->h_lport.p_lastframe->size != frame->size) ||
(memcmp(hardware->h_lport.p_lastframe->frame, frame->frame,
frame->size) != 0)) {
free(hardware->h_lport.p_lastframe);
hardware->h_lport.p_lastframe = frame;
hardware->h_lport.p_lastchange = time(NULL);
} else free(frame);
}
free(packet);
return 0;
toobig:
free(packet);
return E2BIG;
}
/* Send a shutdown LLDPDU. */
int
lldp_send_shutdown(struct lldpd *global,
struct lldpd_hardware *hardware)
{
if (hardware->h_lchassis_previous_id == NULL ||
hardware->h_lport_previous_id == NULL)
return 0;
return _lldp_send(global, hardware,
hardware->h_lchassis_previous_id_subtype,
hardware->h_lchassis_previous_id,
hardware->h_lchassis_previous_id_len,
hardware->h_lport_previous_id_subtype,
hardware->h_lport_previous_id,
hardware->h_lport_previous_id_len,
1);
}
int
lldp_send(struct lldpd *global,
struct lldpd_hardware *hardware)
{
struct lldpd_port *port = &hardware->h_lport;
struct lldpd_chassis *chassis = port->p_chassis;
int ret;
/* Check if we have a change. */
if (hardware->h_lchassis_previous_id != NULL &&
hardware->h_lport_previous_id != NULL &&
(hardware->h_lchassis_previous_id_subtype != chassis->c_id_subtype ||
hardware->h_lchassis_previous_id_len != chassis->c_id_len ||
hardware->h_lport_previous_id_subtype != port->p_id_subtype ||
hardware->h_lport_previous_id_len != port->p_id_len ||
memcmp(hardware->h_lchassis_previous_id,
chassis->c_id, chassis->c_id_len) ||
memcmp(hardware->h_lport_previous_id,
port->p_id, port->p_id_len))) {
log_info("lldp", "MSAP has changed for port %s, sending a shutdown LLDPDU",
hardware->h_ifname);
if ((ret = lldp_send_shutdown(global, hardware)) != 0)
return ret;
}
log_debug("lldp", "send LLDP PDU to %s",
hardware->h_ifname);
if ((ret = _lldp_send(global, hardware,
chassis->c_id_subtype,
chassis->c_id,
chassis->c_id_len,
port->p_id_subtype,
port->p_id,
port->p_id_len,
0)) != 0)
return ret;
/* Record current chassis and port ID */
free(hardware->h_lchassis_previous_id);
hardware->h_lchassis_previous_id_subtype = chassis->c_id_subtype;
hardware->h_lchassis_previous_id_len = chassis->c_id_len;
if ((hardware->h_lchassis_previous_id = malloc(chassis->c_id_len)) != NULL)
memcpy(hardware->h_lchassis_previous_id, chassis->c_id,
chassis->c_id_len);
free(hardware->h_lport_previous_id);
hardware->h_lport_previous_id_subtype = port->p_id_subtype;
hardware->h_lport_previous_id_len = port->p_id_len;
if ((hardware->h_lport_previous_id = malloc(port->p_id_len)) != NULL)
memcpy(hardware->h_lport_previous_id, port->p_id,
port->p_id_len);
return 0;
}
#define CHECK_TLV_SIZE(x, name) \
do { if (tlv_size < (x)) { \
log_warnx("lldp", name " TLV too short received on %s", \
hardware->h_ifname); \
goto malformed; \
} } while (0)
int
lldp_decode(struct lldpd *cfg, char *frame, int s,
struct lldpd_hardware *hardware,
struct lldpd_chassis **newchassis, struct lldpd_port **newport)
{
struct lldpd_chassis *chassis;
struct lldpd_port *port;
const char lldpaddr[] = LLDP_MULTICAST_ADDR;
const char dot1[] = LLDP_TLV_ORG_DOT1;
const char dot3[] = LLDP_TLV_ORG_DOT3;
const char med[] = LLDP_TLV_ORG_MED;
const char dcbx[] = LLDP_TLV_ORG_DCBX;
unsigned char orgid[3];
int length, gotend = 0, ttl_received = 0;
int tlv_size, tlv_type, tlv_subtype;
u_int8_t *pos, *tlv;
char *b;
#ifdef ENABLE_DOT1
struct lldpd_vlan *vlan = NULL;
int vlan_len;
struct lldpd_ppvid *ppvid;
struct lldpd_pi *pi = NULL;
#endif
struct lldpd_mgmt *mgmt;
int af;
u_int8_t addr_str_length, addr_str_buffer[32];
u_int8_t addr_family, addr_length, *addr_ptr, iface_subtype;
u_int32_t iface_number, iface;
#ifdef ENABLE_CUSTOM
struct lldpd_custom *custom = NULL;
#endif
log_debug("lldp", "receive LLDP PDU on %s",
hardware->h_ifname);
if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) {
log_warn("lldp", "failed to allocate remote chassis");
return -1;
}
TAILQ_INIT(&chassis->c_mgmt);
if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) {
log_warn("lldp", "failed to allocate remote port");
free(chassis);
return -1;
}
#ifdef ENABLE_DOT1
TAILQ_INIT(&port->p_vlans);
TAILQ_INIT(&port->p_ppvids);
TAILQ_INIT(&port->p_pids);
#endif
#ifdef ENABLE_CUSTOM
TAILQ_INIT(&port->p_custom_list);
#endif
length = s;
pos = (u_int8_t*)frame;
if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t)) {
log_warnx("lldp", "too short frame received on %s", hardware->h_ifname);
goto malformed;
}
if (PEEK_CMP(lldpaddr, ETHER_ADDR_LEN) != 0) {
log_info("lldp", "frame not targeted at LLDP multicast address received on %s",
hardware->h_ifname);
goto malformed;
}
PEEK_DISCARD(ETHER_ADDR_LEN); /* Skip source address */
if (PEEK_UINT16 != ETHERTYPE_LLDP) {
log_info("lldp", "non LLDP frame received on %s",
hardware->h_ifname);
goto malformed;
}
while (length && (!gotend)) {
if (length < 2) {
log_warnx("lldp", "tlv header too short received on %s",
hardware->h_ifname);
goto malformed;
}
tlv_size = PEEK_UINT16;
tlv_type = tlv_size >> 9;
tlv_size = tlv_size & 0x1ff;
(void)PEEK_SAVE(tlv);
if (length < tlv_size) {
log_warnx("lldp", "frame too short for tlv received on %s",
hardware->h_ifname);
goto malformed;
}
switch (tlv_type) {
case LLDP_TLV_END:
if (tlv_size != 0) {
log_warnx("lldp", "lldp end received with size not null on %s",
hardware->h_ifname);
goto malformed;
}
if (length)
log_debug("lldp", "extra data after lldp end on %s",
hardware->h_ifname);
gotend = 1;
break;
case LLDP_TLV_CHASSIS_ID:
case LLDP_TLV_PORT_ID:
CHECK_TLV_SIZE(2, "Port Id");
tlv_subtype = PEEK_UINT8;
if ((tlv_subtype == 0) || (tlv_subtype > 7)) {
log_warnx("lldp", "unknown subtype for tlv id received on %s",
hardware->h_ifname);
goto malformed;
}
if ((b = (char *)calloc(1, tlv_size - 1)) == NULL) {
log_warn("lldp", "unable to allocate memory for id tlv "
"received on %s",
hardware->h_ifname);
goto malformed;
}
PEEK_BYTES(b, tlv_size - 1);
if (tlv_type == LLDP_TLV_PORT_ID) {
port->p_id_subtype = tlv_subtype;
port->p_id = b;
port->p_id_len = tlv_size - 1;
} else {
chassis->c_id_subtype = tlv_subtype;
chassis->c_id = b;
chassis->c_id_len = tlv_size - 1;
}
break;
case LLDP_TLV_TTL:
CHECK_TLV_SIZE(2, "TTL");
chassis->c_ttl = PEEK_UINT16;
ttl_received = 1;
break;
case LLDP_TLV_PORT_DESCR:
case LLDP_TLV_SYSTEM_NAME:
case LLDP_TLV_SYSTEM_DESCR:
if (tlv_size < 1) {
log_debug("lldp", "empty tlv received on %s",
hardware->h_ifname);
break;
}
if ((b = (char *)calloc(1, tlv_size + 1)) == NULL) {
log_warn("lldp", "unable to allocate memory for string tlv "
"received on %s",
hardware->h_ifname);
goto malformed;
}
PEEK_BYTES(b, tlv_size);
if (tlv_type == LLDP_TLV_PORT_DESCR)
port->p_descr = b;
else if (tlv_type == LLDP_TLV_SYSTEM_NAME)
chassis->c_name = b;
else chassis->c_descr = b;
break;
case LLDP_TLV_SYSTEM_CAP:
CHECK_TLV_SIZE(4, "System capabilities");
chassis->c_cap_available = PEEK_UINT16;
chassis->c_cap_enabled = PEEK_UINT16;
break;
case LLDP_TLV_MGMT_ADDR:
CHECK_TLV_SIZE(1, "Management address");
addr_str_length = PEEK_UINT8;
if (addr_str_length > sizeof(addr_str_buffer)) {
log_warnx("lldp", "too large management address on %s",
hardware->h_ifname);
goto malformed;
}
CHECK_TLV_SIZE(1 + addr_str_length, "Management address");
PEEK_BYTES(addr_str_buffer, addr_str_length);
addr_length = addr_str_length - 1;
addr_family = addr_str_buffer[0];
addr_ptr = &addr_str_buffer[1];
CHECK_TLV_SIZE(1 + addr_str_length + 5, "Management address");
iface_subtype = PEEK_UINT8;
iface_number = PEEK_UINT32;
af = lldpd_af_from_lldp_proto(addr_family);
if (af == LLDPD_AF_UNSPEC)
break;
if (iface_subtype == LLDP_MGMT_IFACE_IFINDEX)
iface = iface_number;
else
iface = 0;
mgmt = lldpd_alloc_mgmt(af, addr_ptr, addr_length, iface);
if (mgmt == NULL) {
assert(errno == ENOMEM);
log_warn("lldp", "unable to allocate memory "
"for management address");
goto malformed;
}
TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries);
break;
case LLDP_TLV_ORG:
CHECK_TLV_SIZE(1 + (int)sizeof(orgid), "Organisational");
PEEK_BYTES(orgid, sizeof(orgid));
tlv_subtype = PEEK_UINT8;
if (memcmp(dot1, orgid, sizeof(orgid)) == 0) {
#ifndef ENABLE_DOT1
hardware->h_rx_unrecognized_cnt++;
#else
/* Dot1 */
switch (tlv_subtype) {
case LLDP_TLV_DOT1_VLANNAME:
CHECK_TLV_SIZE(7, "VLAN");
if ((vlan = (struct lldpd_vlan *)calloc(1,
sizeof(struct lldpd_vlan))) == NULL) {
log_warn("lldp", "unable to alloc vlan "
"structure for "
"tlv received on %s",
hardware->h_ifname);
goto malformed;
}
vlan->v_vid = PEEK_UINT16;
vlan_len = PEEK_UINT8;
CHECK_TLV_SIZE(7 + vlan_len, "VLAN");
if ((vlan->v_name =
(char *)calloc(1, vlan_len + 1)) == NULL) {
log_warn("lldp", "unable to alloc vlan name for "
"tlv received on %s",
hardware->h_ifname);
goto malformed;
}
PEEK_BYTES(vlan->v_name, vlan_len);
TAILQ_INSERT_TAIL(&port->p_vlans,
vlan, v_entries);
vlan = NULL;
break;
case LLDP_TLV_DOT1_PVID:
CHECK_TLV_SIZE(6, "PVID");
port->p_pvid = PEEK_UINT16;
break;
case LLDP_TLV_DOT1_PPVID:
CHECK_TLV_SIZE(7, "PPVID");
/* validation needed */
/* PPVID has to be unique if more than
one PPVID TLVs are received -
discard if duplicate */
/* if support bit is not set and
enabled bit is set - PPVID TLV is
considered error and discarded */
/* if PPVID > 4096 - bad and discard */
if ((ppvid = (struct lldpd_ppvid *)calloc(1,
sizeof(struct lldpd_ppvid))) == NULL) {
log_warn("lldp", "unable to alloc ppvid "
"structure for "
"tlv received on %s",
hardware->h_ifname);
goto malformed;
}
ppvid->p_cap_status = PEEK_UINT8;
ppvid->p_ppvid = PEEK_UINT16;
TAILQ_INSERT_TAIL(&port->p_ppvids,
ppvid, p_entries);
break;
case LLDP_TLV_DOT1_PI:
/* validation needed */
/* PI has to be unique if more than
one PI TLVs are received - discard
if duplicate ?? */
CHECK_TLV_SIZE(5, "PI");
if ((pi = (struct lldpd_pi *)calloc(1,
sizeof(struct lldpd_pi))) == NULL) {
log_warn("lldp", "unable to alloc PI "
"structure for "
"tlv received on %s",
hardware->h_ifname);
goto malformed;
}
pi->p_pi_len = PEEK_UINT8;
CHECK_TLV_SIZE(5 + pi->p_pi_len, "PI");
if ((pi->p_pi =
(char *)calloc(1, pi->p_pi_len)) == NULL) {
log_warn("lldp", "unable to alloc pid name for "
"tlv received on %s",
hardware->h_ifname);
goto malformed;
}
PEEK_BYTES(pi->p_pi, pi->p_pi_len);
TAILQ_INSERT_TAIL(&port->p_pids,
pi, p_entries);
pi = NULL;
break;
default:
/* Unknown Dot1 TLV, ignore it */
hardware->h_rx_unrecognized_cnt++;
}
#endif
} else if (memcmp(dot3, orgid, sizeof(orgid)) == 0) {
#ifndef ENABLE_DOT3
hardware->h_rx_unrecognized_cnt++;
#else
/* Dot3 */
switch (tlv_subtype) {
case LLDP_TLV_DOT3_MAC:
CHECK_TLV_SIZE(9, "MAC/PHY");
port->p_macphy.autoneg_support = PEEK_UINT8;
port->p_macphy.autoneg_enabled =
(port->p_macphy.autoneg_support & 0x2) >> 1;
port->p_macphy.autoneg_support =
port->p_macphy.autoneg_support & 0x1;
port->p_macphy.autoneg_advertised =
PEEK_UINT16;
port->p_macphy.mau_type = PEEK_UINT16;
break;
case LLDP_TLV_DOT3_LA:
CHECK_TLV_SIZE(9, "Link aggregation");
PEEK_DISCARD_UINT8;
port->p_aggregid = PEEK_UINT32;
break;
case LLDP_TLV_DOT3_MFS:
CHECK_TLV_SIZE(6, "MFS");
port->p_mfs = PEEK_UINT16;
break;
case LLDP_TLV_DOT3_POWER:
CHECK_TLV_SIZE(7, "Power");
port->p_power.devicetype = PEEK_UINT8;
port->p_power.supported =
(port->p_power.devicetype & 0x2) >> 1;
port->p_power.enabled =
(port->p_power.devicetype & 0x4) >> 2;
port->p_power.paircontrol =
(port->p_power.devicetype & 0x8) >> 3;
port->p_power.devicetype =
(port->p_power.devicetype & 0x1)?
LLDP_DOT3_POWER_PSE:LLDP_DOT3_POWER_PD;
port->p_power.pairs = PEEK_UINT8;
port->p_power.class = PEEK_UINT8;
/* 802.3at? */
if (tlv_size >= 12) {
port->p_power.powertype = PEEK_UINT8;
port->p_power.source =
(port->p_power.powertype & (1<<5 | 1<<4)) >> 4;
port->p_power.priority =
(port->p_power.powertype & (1<<1 | 1<<0));
port->p_power.powertype =
(port->p_power.powertype & (1<<7))?
LLDP_DOT3_POWER_8023AT_TYPE1:
LLDP_DOT3_POWER_8023AT_TYPE2;
port->p_power.requested = PEEK_UINT16;
port->p_power.allocated = PEEK_UINT16;
} else
port->p_power.powertype =
LLDP_DOT3_POWER_8023AT_OFF;
break;
default:
/* Unknown Dot3 TLV, ignore it */
hardware->h_rx_unrecognized_cnt++;
}
#endif
} else if (memcmp(med, orgid, sizeof(orgid)) == 0) {
/* LLDP-MED */
#ifndef ENABLE_LLDPMED
hardware->h_rx_unrecognized_cnt++;
#else
u_int32_t policy;
unsigned loctype;
unsigned power;
switch (tlv_subtype) {
case LLDP_TLV_MED_CAP:
CHECK_TLV_SIZE(7, "LLDP-MED capabilities");
chassis->c_med_cap_available = PEEK_UINT16;
chassis->c_med_type = PEEK_UINT8;
port->p_med_cap_enabled |=
LLDP_MED_CAP_CAP;
break;
case LLDP_TLV_MED_POLICY:
CHECK_TLV_SIZE(8, "LLDP-MED policy");
policy = PEEK_UINT32;
if (((policy >> 24) < 1) ||
((policy >> 24) > LLDP_MED_APPTYPE_LAST)) {
log_info("lldp", "unknown policy field %d "
"received on %s",
policy,
hardware->h_ifname);
break;
}
port->p_med_policy[(policy >> 24) - 1].type =
(policy >> 24);
port->p_med_policy[(policy >> 24) - 1].unknown =
((policy & 0x800000) != 0);
port->p_med_policy[(policy >> 24) - 1].tagged =
((policy & 0x400000) != 0);
port->p_med_policy[(policy >> 24) - 1].vid =
(policy & 0x001FFE00) >> 9;
port->p_med_policy[(policy >> 24) - 1].priority =
(policy & 0x1C0) >> 6;
port->p_med_policy[(policy >> 24) - 1].dscp =
policy & 0x3F;
port->p_med_cap_enabled |=
LLDP_MED_CAP_POLICY;
break;
case LLDP_TLV_MED_LOCATION:
CHECK_TLV_SIZE(5, "LLDP-MED Location");
loctype = PEEK_UINT8;
if ((loctype < 1) ||
(loctype > LLDP_MED_LOCFORMAT_LAST)) {
log_info("lldp", "unknown location type "
"received on %s",
hardware->h_ifname);
break;
}
if ((port->p_med_location[loctype - 1].data =
(char*)malloc(tlv_size - 5)) == NULL) {
log_warn("lldp", "unable to allocate memory "
"for LLDP-MED location for "
"frame received on %s",
hardware->h_ifname);
goto malformed;
}
PEEK_BYTES(port->p_med_location[loctype - 1].data,
tlv_size - 5);
port->p_med_location[loctype - 1].data_len =
tlv_size - 5;
port->p_med_location[loctype - 1].format = loctype;
port->p_med_cap_enabled |=
LLDP_MED_CAP_LOCATION;
break;
case LLDP_TLV_MED_MDI:
CHECK_TLV_SIZE(7, "LLDP-MED PoE-MDI");
power = PEEK_UINT8;
switch (power & 0xC0) {
case 0x0:
port->p_med_power.devicetype = LLDP_MED_POW_TYPE_PSE;
port->p_med_cap_enabled |=
LLDP_MED_CAP_MDI_PSE;
switch (power & 0x30) {
case 0x0:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_UNKNOWN;
break;
case 0x10:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_PRIMARY;
break;
case 0x20:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_BACKUP;
break;
default:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_RESERVED;
}
break;
case 0x40:
port->p_med_power.devicetype = LLDP_MED_POW_TYPE_PD;
port->p_med_cap_enabled |=
LLDP_MED_CAP_MDI_PD;
switch (power & 0x30) {
case 0x0:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_UNKNOWN;
break;
case 0x10:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_PSE;
break;
case 0x20:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_LOCAL;
break;
default:
port->p_med_power.source =
LLDP_MED_POW_SOURCE_BOTH;
}
break;
default:
port->p_med_power.devicetype =
LLDP_MED_POW_TYPE_RESERVED;
}
if ((power & 0x0F) > LLDP_MED_POW_PRIO_LOW)
port->p_med_power.priority =
LLDP_MED_POW_PRIO_UNKNOWN;
else
port->p_med_power.priority =
power & 0x0F;
port->p_med_power.val = PEEK_UINT16;
break;
case LLDP_TLV_MED_IV_HW:
case LLDP_TLV_MED_IV_SW:
case LLDP_TLV_MED_IV_FW:
case LLDP_TLV_MED_IV_SN:
case LLDP_TLV_MED_IV_MANUF:
case LLDP_TLV_MED_IV_MODEL:
case LLDP_TLV_MED_IV_ASSET:
if (tlv_size <= 4)
b = NULL;
else {
if ((b = (char*)malloc(tlv_size - 3)) ==
NULL) {
log_warn("lldp", "unable to allocate "
"memory for LLDP-MED "
"inventory for frame "
"received on %s",
hardware->h_ifname);
goto malformed;
}
PEEK_BYTES(b, tlv_size - 4);
b[tlv_size - 4] = '\0';
}
switch (tlv_subtype) {
case LLDP_TLV_MED_IV_HW:
chassis->c_med_hw = b;
break;
case LLDP_TLV_MED_IV_FW:
chassis->c_med_fw = b;
break;
case LLDP_TLV_MED_IV_SW:
chassis->c_med_sw = b;
break;
case LLDP_TLV_MED_IV_SN:
chassis->c_med_sn = b;
break;
case LLDP_TLV_MED_IV_MANUF:
chassis->c_med_manuf = b;
break;
case LLDP_TLV_MED_IV_MODEL:
chassis->c_med_model = b;
break;
case LLDP_TLV_MED_IV_ASSET:
chassis->c_med_asset = b;
break;
}
port->p_med_cap_enabled |=
LLDP_MED_CAP_IV;
break;
default:
/* Unknown LLDP MED, ignore it */
hardware->h_rx_unrecognized_cnt++;
}
#endif /* ENABLE_LLDPMED */
} else if (memcmp(dcbx, orgid, sizeof(orgid)) == 0) {
log_debug("lldp", "unsupported DCBX tlv received on %s - ignore",
hardware->h_ifname);
hardware->h_rx_unrecognized_cnt++;
} else {
log_debug("lldp", "unknown org tlv [%02x:%02x:%02x] received on %s",
orgid[0], orgid[1], orgid[2],
hardware->h_ifname);
hardware->h_rx_unrecognized_cnt++;
#ifdef ENABLE_CUSTOM
custom = (struct lldpd_custom*)calloc(1, sizeof(struct lldpd_custom));
if (!custom) {
log_warn("lldp",
"unable to allocate memory for custom TLV");
goto malformed;
}
custom->oui_info_len = tlv_size > 4 ? tlv_size - 4 : 0;
memcpy(custom->oui, orgid, sizeof(custom->oui));
custom->subtype = tlv_subtype;
if (custom->oui_info_len > 0) {
custom->oui_info = malloc(custom->oui_info_len);
if (!custom->oui_info) {
log_warn("lldp",
"unable to allocate memory for custom TLV data");
goto malformed;
}
PEEK_BYTES(custom->oui_info, custom->oui_info_len);
}
TAILQ_INSERT_TAIL(&port->p_custom_list, custom, next);
custom = NULL;
#endif
}
break;
default:
log_warnx("lldp", "unknown tlv (%d) received on %s",
tlv_type, hardware->h_ifname);
goto malformed;
}
if (pos > tlv + tlv_size) {
log_warnx("lldp", "BUG: already past TLV!");
goto malformed;
}
PEEK_DISCARD(tlv + tlv_size - pos);
}
/* Some random check */
if ((chassis->c_id == NULL) ||
(port->p_id == NULL) ||
(!ttl_received) ||
(gotend == 0)) {
log_warnx("lldp", "some mandatory tlv are missing for frame received on %s",
hardware->h_ifname);
goto malformed;
}
*newchassis = chassis;
*newport = port;
return 1;
malformed:
#ifdef ENABLE_CUSTOM
free(custom);
#endif
#ifdef ENABLE_DOT1
free(vlan);
free(pi);
#endif
lldpd_chassis_cleanup(chassis, 1);
lldpd_port_cleanup(port, 1);
free(port);
return -1;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_1769_0 |
crossvul-cpp_data_bad_3995_0 | // SPDX-License-Identifier: ISC
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
#include <linux/dma-mapping.h>
#include "mt76.h"
#include "dma.h"
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
u32 ring_base)
{
int size;
int i;
spin_lock_init(&q->lock);
q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
q->ndesc = n_desc;
q->buf_size = bufsize;
q->hw_idx = idx;
size = q->ndesc * sizeof(struct mt76_desc);
q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
if (!q->desc)
return -ENOMEM;
size = q->ndesc * sizeof(*q->entry);
q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
/* clear descriptors */
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
writel(q->desc_dma, &q->regs->desc_base);
writel(0, &q->regs->cpu_idx);
writel(0, &q->regs->dma_idx);
writel(q->ndesc, &q->regs->ring_size);
return 0;
}
static int
mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi)
{
struct mt76_desc *desc;
u32 ctrl;
int i, idx = -1;
if (txwi) {
q->entry[q->head].txwi = DMA_DUMMY_DATA;
q->entry[q->head].skip_buf0 = true;
}
for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0;
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
if (i < nbufs - 1) {
buf1 = buf[1].addr;
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
}
if (i == nbufs - 1)
ctrl |= MT_DMA_CTL_LAST_SEC0;
else if (i == nbufs - 2)
ctrl |= MT_DMA_CTL_LAST_SEC1;
idx = q->head;
q->head = (q->head + 1) % q->ndesc;
desc = &q->desc[idx];
WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
WRITE_ONCE(desc->info, cpu_to_le32(info));
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
q->queued++;
}
q->entry[idx].txwi = txwi;
q->entry[idx].skb = skb;
return idx;
}
static void
mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
struct mt76_queue_entry *prev_e)
{
struct mt76_queue_entry *e = &q->entry[idx];
__le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
u32 ctrl = le32_to_cpu(__ctrl);
if (!e->skip_buf0) {
__le32 addr = READ_ONCE(q->desc[idx].buf0);
u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
DMA_TO_DEVICE);
}
if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
__le32 addr = READ_ONCE(q->desc[idx].buf1);
u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
DMA_TO_DEVICE);
}
if (e->txwi == DMA_DUMMY_DATA)
e->txwi = NULL;
if (e->skb == DMA_DUMMY_DATA)
e->skb = NULL;
*prev_e = *e;
memset(e, 0, sizeof(*e));
}
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
writel(q->desc_dma, &q->regs->desc_base);
writel(q->ndesc, &q->regs->ring_size);
q->head = readl(&q->regs->dma_idx);
q->tail = q->head;
writel(q->head, &q->regs->cpu_idx);
}
static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
{
struct mt76_sw_queue *sq = &dev->q_tx[qid];
struct mt76_queue *q = sq->q;
struct mt76_queue_entry entry;
unsigned int n_swq_queued[4] = {};
unsigned int n_queued = 0;
bool wake = false;
int i, last;
if (!q)
return;
if (flush)
last = -1;
else
last = readl(&q->regs->dma_idx);
while ((q->queued > n_queued) && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
if (entry.schedule)
n_swq_queued[entry.qid]++;
q->tail = (q->tail + 1) % q->ndesc;
n_queued++;
if (entry.skb)
dev->drv->tx_complete_skb(dev, qid, &entry);
if (entry.txwi) {
if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
mt76_put_txwi(dev, entry.txwi);
wake = !flush;
}
if (!flush && q->tail == last)
last = readl(&q->regs->dma_idx);
}
spin_lock_bh(&q->lock);
q->queued -= n_queued;
for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
if (!n_swq_queued[i])
continue;
dev->q_tx[i].swq_queued -= n_swq_queued[i];
}
if (flush)
mt76_dma_sync_idx(dev, q);
wake = wake && q->stopped &&
qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
if (wake)
q->stopped = false;
if (!q->queued)
wake_up(&dev->tx_wait);
spin_unlock_bh(&q->lock);
if (wake)
ieee80211_wake_queue(dev->hw, qid);
}
static void *
mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
int *len, u32 *info, bool *more)
{
struct mt76_queue_entry *e = &q->entry[idx];
struct mt76_desc *desc = &q->desc[idx];
dma_addr_t buf_addr;
void *buf = e->buf;
int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
if (len) {
u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
*more = !(ctl & MT_DMA_CTL_LAST_SEC0);
}
if (info)
*info = le32_to_cpu(desc->info);
dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
e->buf = NULL;
return buf;
}
static void *
mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
int *len, u32 *info, bool *more)
{
int idx = q->tail;
*more = false;
if (!q->queued)
return NULL;
if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
return NULL;
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
return mt76_dma_get_buf(dev, q, idx, len, info, more);
}
static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
writel(q->head, &q->regs->cpu_idx);
}
static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_queue_buf buf;
dma_addr_t addr;
addr = dma_map_single(dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr)))
return -ENOMEM;
buf.addr = addr;
buf.len = skb->len;
spin_lock_bh(&q->lock);
mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
mt76_dma_kick_queue(dev, q);
spin_unlock_bh(&q->lock);
return 0;
}
static int
mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_tx_info tx_info = {
.skb = skb,
};
int len, n = 0, ret = -ENOMEM;
struct mt76_queue_entry e;
struct mt76_txwi_cache *t;
struct sk_buff *iter;
dma_addr_t addr;
u8 *txwi;
t = mt76_get_txwi(dev);
if (!t) {
ieee80211_free_txskb(dev->hw, skb);
return -ENOMEM;
}
txwi = mt76_get_txwi_ptr(dev, t);
skb->prev = skb->next = NULL;
if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
mt76_insert_hdr_pad(skb);
len = skb_headlen(skb);
addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr)))
goto free;
tx_info.buf[n].addr = t->dma_addr;
tx_info.buf[n++].len = dev->drv->txwi_size;
tx_info.buf[n].addr = addr;
tx_info.buf[n++].len = len;
skb_walk_frags(skb, iter) {
if (n == ARRAY_SIZE(tx_info.buf))
goto unmap;
addr = dma_map_single(dev->dev, iter->data, iter->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr)))
goto unmap;
tx_info.buf[n].addr = addr;
tx_info.buf[n++].len = iter->len;
}
tx_info.nbuf = n;
dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
if (ret < 0)
goto unmap;
if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
ret = -ENOMEM;
goto unmap;
}
return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
tx_info.info, tx_info.skb, t);
unmap:
for (n--; n > 0; n--)
dma_unmap_single(dev->dev, tx_info.buf[n].addr,
tx_info.buf[n].len, DMA_TO_DEVICE);
free:
e.skb = tx_info.skb;
e.txwi = t;
dev->drv->tx_complete_skb(dev, qid, &e);
mt76_put_txwi(dev, t);
return ret;
}
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
{
dma_addr_t addr;
void *buf;
int frames = 0;
int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset;
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
struct mt76_queue_buf qbuf;
buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
if (!buf)
break;
addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr))) {
skb_free_frag(buf);
break;
}
qbuf.addr = addr + offset;
qbuf.len = len - offset;
mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
frames++;
}
if (frames)
mt76_dma_kick_queue(dev, q);
spin_unlock_bh(&q->lock);
return frames;
}
static void
mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
{
struct page *page;
void *buf;
bool more;
spin_lock_bh(&q->lock);
do {
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
if (!buf)
break;
skb_free_frag(buf);
} while (1);
spin_unlock_bh(&q->lock);
if (!q->rx_page.va)
return;
page = virt_to_page(q->rx_page.va);
__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
memset(&q->rx_page, 0, sizeof(q->rx_page));
}
static void
mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
{
struct mt76_queue *q = &dev->q_rx[qid];
int i;
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
mt76_dma_rx_cleanup(dev, q);
mt76_dma_sync_idx(dev, q);
mt76_dma_rx_fill(dev, q);
if (!q->rx_head)
return;
dev_kfree_skb(q->rx_head);
q->rx_head = NULL;
}
static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
int len, bool more)
{
struct page *page = virt_to_head_page(data);
int offset = data - page_address(page);
struct sk_buff *skb = q->rx_head;
offset += q->buf_offset;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
q->buf_size);
if (more)
return;
q->rx_head = NULL;
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
}
static int
mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
{
int len, data_len, done = 0;
struct sk_buff *skb;
unsigned char *data;
bool more;
while (done < budget) {
u32 info;
data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
if (!data)
break;
if (q->rx_head)
data_len = q->buf_size;
else
data_len = SKB_WITH_OVERHEAD(q->buf_size);
if (data_len < len + q->buf_offset) {
dev_kfree_skb(q->rx_head);
q->rx_head = NULL;
skb_free_frag(data);
continue;
}
if (q->rx_head) {
mt76_add_fragment(dev, q, data, len, more);
continue;
}
skb = build_skb(data, q->buf_size);
if (!skb) {
skb_free_frag(data);
continue;
}
skb_reserve(skb, q->buf_offset);
if (q == &dev->q_rx[MT_RXQ_MCU]) {
u32 *rxfce = (u32 *)skb->cb;
*rxfce = info;
}
__skb_put(skb, len);
done++;
if (more) {
q->rx_head = skb;
continue;
}
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
}
mt76_dma_rx_fill(dev, q);
return done;
}
static int
mt76_dma_rx_poll(struct napi_struct *napi, int budget)
{
struct mt76_dev *dev;
int qid, done = 0, cur;
dev = container_of(napi->dev, struct mt76_dev, napi_dev);
qid = napi - dev->napi;
rcu_read_lock();
do {
cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
mt76_rx_poll_complete(dev, qid, napi);
done += cur;
} while (cur && done < budget);
rcu_read_unlock();
if (done < budget && napi_complete(napi))
dev->drv->rx_poll_complete(dev, qid);
return done;
}
static int
mt76_dma_init(struct mt76_dev *dev)
{
int i;
init_dummy_netdev(&dev->napi_dev);
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
64);
mt76_dma_rx_fill(dev, &dev->q_rx[i]);
skb_queue_head_init(&dev->rx_skb[i]);
napi_enable(&dev->napi[i]);
}
return 0;
}
static const struct mt76_queue_ops mt76_dma_ops = {
.init = mt76_dma_init,
.alloc = mt76_dma_alloc_queue,
.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
.tx_queue_skb = mt76_dma_tx_queue_skb,
.tx_cleanup = mt76_dma_tx_cleanup,
.rx_reset = mt76_dma_rx_reset,
.kick = mt76_dma_kick_queue,
};
void mt76_dma_attach(struct mt76_dev *dev)
{
dev->queue_ops = &mt76_dma_ops;
}
EXPORT_SYMBOL_GPL(mt76_dma_attach);
void mt76_dma_cleanup(struct mt76_dev *dev)
{
int i;
netif_napi_del(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
mt76_dma_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
netif_napi_del(&dev->napi[i]);
mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
}
}
EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_3995_0 |
crossvul-cpp_data_bad_998_1 | /*
* Marvell Wireless LAN device driver: AP specific command handling
*
* Copyright (C) 2012-2014, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "main.h"
#include "11ac.h"
#include "11n.h"
/* This function parses security related parameters from cfg80211_ap_settings
* and sets into FW understandable bss_config structure.
*/
int mwifiex_set_secure_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_config,
struct cfg80211_ap_settings *params) {
int i;
struct mwifiex_wep_key wep_key;
if (!params->privacy) {
bss_config->protocol = PROTOCOL_NO_SECURITY;
bss_config->key_mgmt = KEY_MGMT_NONE;
bss_config->wpa_cfg.length = 0;
priv->sec_info.wep_enabled = 0;
priv->sec_info.wpa_enabled = 0;
priv->sec_info.wpa2_enabled = 0;
return 0;
}
switch (params->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
bss_config->auth_mode = WLAN_AUTH_OPEN;
break;
case NL80211_AUTHTYPE_SHARED_KEY:
bss_config->auth_mode = WLAN_AUTH_SHARED_KEY;
break;
case NL80211_AUTHTYPE_NETWORK_EAP:
bss_config->auth_mode = WLAN_AUTH_LEAP;
break;
default:
bss_config->auth_mode = MWIFIEX_AUTH_MODE_AUTO;
break;
}
bss_config->key_mgmt_operation |= KEY_MGMT_ON_HOST;
for (i = 0; i < params->crypto.n_akm_suites; i++) {
switch (params->crypto.akm_suites[i]) {
case WLAN_AKM_SUITE_8021X:
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_1) {
bss_config->protocol = PROTOCOL_WPA;
bss_config->key_mgmt = KEY_MGMT_EAP;
}
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_2) {
bss_config->protocol |= PROTOCOL_WPA2;
bss_config->key_mgmt = KEY_MGMT_EAP;
}
break;
case WLAN_AKM_SUITE_PSK:
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_1) {
bss_config->protocol = PROTOCOL_WPA;
bss_config->key_mgmt = KEY_MGMT_PSK;
}
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_2) {
bss_config->protocol |= PROTOCOL_WPA2;
bss_config->key_mgmt = KEY_MGMT_PSK;
}
break;
default:
break;
}
}
for (i = 0; i < params->crypto.n_ciphers_pairwise; i++) {
switch (params->crypto.ciphers_pairwise[i]) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
break;
case WLAN_CIPHER_SUITE_TKIP:
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
bss_config->wpa_cfg.pairwise_cipher_wpa |=
CIPHER_TKIP;
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
CIPHER_TKIP;
break;
case WLAN_CIPHER_SUITE_CCMP:
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
bss_config->wpa_cfg.pairwise_cipher_wpa |=
CIPHER_AES_CCMP;
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
CIPHER_AES_CCMP;
default:
break;
}
}
switch (params->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
if (priv->sec_info.wep_enabled) {
bss_config->protocol = PROTOCOL_STATIC_WEP;
bss_config->key_mgmt = KEY_MGMT_NONE;
bss_config->wpa_cfg.length = 0;
for (i = 0; i < NUM_WEP_KEYS; i++) {
wep_key = priv->wep_key[i];
bss_config->wep_cfg[i].key_index = i;
if (priv->wep_key_curr_index == i)
bss_config->wep_cfg[i].is_default = 1;
else
bss_config->wep_cfg[i].is_default = 0;
bss_config->wep_cfg[i].length =
wep_key.key_length;
memcpy(&bss_config->wep_cfg[i].key,
&wep_key.key_material,
wep_key.key_length);
}
}
break;
case WLAN_CIPHER_SUITE_TKIP:
bss_config->wpa_cfg.group_cipher = CIPHER_TKIP;
break;
case WLAN_CIPHER_SUITE_CCMP:
bss_config->wpa_cfg.group_cipher = CIPHER_AES_CCMP;
break;
default:
break;
}
return 0;
}
/* This function updates 11n related parameters from IE and sets them into
* bss_config structure.
*/
void
mwifiex_set_ht_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *ht_ie;
if (!ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
return;
ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
params->beacon.tail_len);
if (ht_ie) {
memcpy(&bss_cfg->ht_cap, ht_ie + 2,
sizeof(struct ieee80211_ht_cap));
priv->ap_11n_enabled = 1;
} else {
memset(&bss_cfg->ht_cap, 0, sizeof(struct ieee80211_ht_cap));
bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
bss_cfg->ht_cap.ampdu_params_info = MWIFIEX_DEF_AMPDU;
}
return;
}
/* This function updates 11ac related parameters from IE
* and sets them into bss_config structure.
*/
void mwifiex_set_vht_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *vht_ie;
vht_ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, params->beacon.tail,
params->beacon.tail_len);
if (vht_ie) {
memcpy(&bss_cfg->vht_cap, vht_ie + 2,
sizeof(struct ieee80211_vht_cap));
priv->ap_11ac_enabled = 1;
} else {
priv->ap_11ac_enabled = 0;
}
return;
}
/* This function updates 11ac related parameters from IE
* and sets them into bss_config structure.
*/
void mwifiex_set_tpc_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *tpc_ie;
tpc_ie = cfg80211_find_ie(WLAN_EID_TPC_REQUEST, params->beacon.tail,
params->beacon.tail_len);
if (tpc_ie)
bss_cfg->power_constraint = *(tpc_ie + 2);
else
bss_cfg->power_constraint = 0;
}
/* Enable VHT only when cfg80211_ap_settings has VHT IE.
* Otherwise disable VHT.
*/
void mwifiex_set_vht_width(struct mwifiex_private *priv,
enum nl80211_chan_width width,
bool ap_11ac_enable)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_11ac_vht_cfg vht_cfg;
vht_cfg.band_config = VHT_CFG_5GHZ;
vht_cfg.cap_info = adapter->hw_dot_11ac_dev_cap;
if (!ap_11ac_enable) {
vht_cfg.mcs_tx_set = DISABLE_VHT_MCS_SET;
vht_cfg.mcs_rx_set = DISABLE_VHT_MCS_SET;
} else {
vht_cfg.mcs_tx_set = DEFAULT_VHT_MCS_SET;
vht_cfg.mcs_rx_set = DEFAULT_VHT_MCS_SET;
}
vht_cfg.misc_config = VHT_CAP_UAP_ONLY;
if (ap_11ac_enable && width >= NL80211_CHAN_WIDTH_80)
vht_cfg.misc_config |= VHT_BW_80_160_80P80;
mwifiex_send_cmd(priv, HostCmd_CMD_11AC_CFG,
HostCmd_ACT_GEN_SET, 0, &vht_cfg, true);
return;
}
/* This function finds supported rates IE from beacon parameter and sets
* these rates into bss_config structure.
*/
void
mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
struct ieee_types_header *rate_ie;
int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
const u8 *var_pos = params->beacon.head + var_offset;
int len = params->beacon.head_len - var_offset;
u8 rate_len = 0;
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
if (rate_ie) {
memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
rate_len = rate_ie->len;
}
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
params->beacon.tail,
params->beacon.tail_len);
if (rate_ie)
memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
return;
}
/* This function initializes some of mwifiex_uap_bss_param variables.
* This helps FW in ignoring invalid values. These values may or may not
* be get updated to valid ones at later stage.
*/
void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config)
{
config->bcast_ssid_ctl = 0x7F;
config->radio_ctl = 0x7F;
config->dtim_period = 0x7F;
config->beacon_period = 0x7FFF;
config->auth_mode = 0x7F;
config->rts_threshold = 0x7FFF;
config->frag_threshold = 0x7FFF;
config->retry_limit = 0x7F;
config->qos_info = 0xFF;
}
/* This function parses BSS related parameters from structure
* and prepares TLVs specific to WPA/WPA2 security.
* These TLVs are appended to command buffer.
*/
static void
mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
{
struct host_cmd_tlv_pwk_cipher *pwk_cipher;
struct host_cmd_tlv_gwk_cipher *gwk_cipher;
struct host_cmd_tlv_passphrase *passphrase;
struct host_cmd_tlv_akmp *tlv_akmp;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
u16 cmd_size = *param_size;
u8 *tlv = *tlv_buf;
tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
tlv_akmp->header.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
tlv_akmp->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
sizeof(struct mwifiex_ie_types_header));
tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation);
tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
cmd_size += sizeof(struct host_cmd_tlv_akmp);
tlv += sizeof(struct host_cmd_tlv_akmp);
if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) {
pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
pwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
sizeof(struct mwifiex_ie_types_header));
pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa;
cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
}
if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) {
pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
pwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
sizeof(struct mwifiex_ie_types_header));
pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
}
if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
gwk_cipher->header.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
gwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) -
sizeof(struct mwifiex_ie_types_header));
gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
}
if (bss_cfg->wpa_cfg.length) {
passphrase = (struct host_cmd_tlv_passphrase *)tlv;
passphrase->header.type =
cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
passphrase->header.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase,
bss_cfg->wpa_cfg.length);
cmd_size += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->wpa_cfg.length;
tlv += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->wpa_cfg.length;
}
*param_size = cmd_size;
*tlv_buf = tlv;
return;
}
/* This function parses WMM related parameters from cfg80211_ap_settings
* structure and updates bss_config structure.
*/
void
mwifiex_set_wmm_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *vendor_ie;
const u8 *wmm_ie;
u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02};
vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
params->beacon.tail,
params->beacon.tail_len);
if (vendor_ie) {
wmm_ie = vendor_ie;
memcpy(&bss_cfg->wmm_info, wmm_ie +
sizeof(struct ieee_types_header), *(wmm_ie + 1));
priv->wmm_enabled = 1;
} else {
memset(&bss_cfg->wmm_info, 0, sizeof(bss_cfg->wmm_info));
memcpy(&bss_cfg->wmm_info.oui, wmm_oui, sizeof(wmm_oui));
bss_cfg->wmm_info.subtype = MWIFIEX_WMM_SUBTYPE;
bss_cfg->wmm_info.version = MWIFIEX_WMM_VERSION;
priv->wmm_enabled = 0;
}
bss_cfg->qos_info = 0x00;
return;
}
/* This function parses BSS related parameters from structure
* and prepares TLVs specific to WEP encryption.
* These TLVs are appended to command buffer.
*/
static void
mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
{
struct host_cmd_tlv_wep_key *wep_key;
u16 cmd_size = *param_size;
int i;
u8 *tlv = *tlv_buf;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
for (i = 0; i < NUM_WEP_KEYS; i++) {
if (bss_cfg->wep_cfg[i].length &&
(bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 ||
bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) {
wep_key = (struct host_cmd_tlv_wep_key *)tlv;
wep_key->header.type =
cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
wep_key->header.len =
cpu_to_le16(bss_cfg->wep_cfg[i].length + 2);
wep_key->key_index = bss_cfg->wep_cfg[i].key_index;
wep_key->is_default = bss_cfg->wep_cfg[i].is_default;
memcpy(wep_key->key, bss_cfg->wep_cfg[i].key,
bss_cfg->wep_cfg[i].length);
cmd_size += sizeof(struct mwifiex_ie_types_header) + 2 +
bss_cfg->wep_cfg[i].length;
tlv += sizeof(struct mwifiex_ie_types_header) + 2 +
bss_cfg->wep_cfg[i].length;
}
}
*param_size = cmd_size;
*tlv_buf = tlv;
return;
}
/* This function enable 11D if userspace set the country IE.
*/
void mwifiex_config_uap_11d(struct mwifiex_private *priv,
struct cfg80211_beacon_data *beacon_data)
{
enum state_11d_t state_11d;
const u8 *country_ie;
country_ie = cfg80211_find_ie(WLAN_EID_COUNTRY, beacon_data->tail,
beacon_data->tail_len);
if (country_ie) {
/* Send cmd to FW to enable 11D function */
state_11d = ENABLE_11D;
if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
HostCmd_ACT_GEN_SET, DOT11D_I,
&state_11d, true)) {
mwifiex_dbg(priv->adapter, ERROR,
"11D: failed to enable 11D\n");
}
}
}
/* This function parses BSS related parameters from structure
* and prepares TLVs. These TLVs are appended to command buffer.
*/
static int
mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
{
struct host_cmd_tlv_dtim_period *dtim_period;
struct host_cmd_tlv_beacon_period *beacon_period;
struct host_cmd_tlv_ssid *ssid;
struct host_cmd_tlv_bcast_ssid *bcast_ssid;
struct host_cmd_tlv_channel_band *chan_band;
struct host_cmd_tlv_frag_threshold *frag_threshold;
struct host_cmd_tlv_rts_threshold *rts_threshold;
struct host_cmd_tlv_retry_limit *retry_limit;
struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
struct host_cmd_tlv_auth_type *auth_type;
struct host_cmd_tlv_rates *tlv_rates;
struct host_cmd_tlv_ageout_timer *ao_timer, *ps_ao_timer;
struct host_cmd_tlv_power_constraint *pwr_ct;
struct mwifiex_ie_types_htcap *htcap;
struct mwifiex_ie_types_wmmcap *wmm_cap;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
int i;
u16 cmd_size = *param_size;
if (bss_cfg->ssid.ssid_len) {
ssid = (struct host_cmd_tlv_ssid *)tlv;
ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
ssid->header.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len);
memcpy(ssid->ssid, bss_cfg->ssid.ssid, bss_cfg->ssid.ssid_len);
cmd_size += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->ssid.ssid_len;
tlv += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->ssid.ssid_len;
bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
bcast_ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
bcast_ssid->header.len =
cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
}
if (bss_cfg->rates[0]) {
tlv_rates = (struct host_cmd_tlv_rates *)tlv;
tlv_rates->header.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i];
i++)
tlv_rates->rates[i] = bss_cfg->rates[i];
tlv_rates->header.len = cpu_to_le16(i);
cmd_size += sizeof(struct host_cmd_tlv_rates) + i;
tlv += sizeof(struct host_cmd_tlv_rates) + i;
}
if (bss_cfg->channel &&
(((bss_cfg->band_cfg & BIT(0)) == BAND_CONFIG_BG &&
bss_cfg->channel <= MAX_CHANNEL_BAND_BG) ||
((bss_cfg->band_cfg & BIT(0)) == BAND_CONFIG_A &&
bss_cfg->channel <= MAX_CHANNEL_BAND_A))) {
chan_band = (struct host_cmd_tlv_channel_band *)tlv;
chan_band->header.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
chan_band->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_channel_band) -
sizeof(struct mwifiex_ie_types_header));
chan_band->band_config = bss_cfg->band_cfg;
chan_band->channel = bss_cfg->channel;
cmd_size += sizeof(struct host_cmd_tlv_channel_band);
tlv += sizeof(struct host_cmd_tlv_channel_band);
}
if (bss_cfg->beacon_period >= MIN_BEACON_PERIOD &&
bss_cfg->beacon_period <= MAX_BEACON_PERIOD) {
beacon_period = (struct host_cmd_tlv_beacon_period *)tlv;
beacon_period->header.type =
cpu_to_le16(TLV_TYPE_UAP_BEACON_PERIOD);
beacon_period->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_beacon_period) -
sizeof(struct mwifiex_ie_types_header));
beacon_period->period = cpu_to_le16(bss_cfg->beacon_period);
cmd_size += sizeof(struct host_cmd_tlv_beacon_period);
tlv += sizeof(struct host_cmd_tlv_beacon_period);
}
if (bss_cfg->dtim_period >= MIN_DTIM_PERIOD &&
bss_cfg->dtim_period <= MAX_DTIM_PERIOD) {
dtim_period = (struct host_cmd_tlv_dtim_period *)tlv;
dtim_period->header.type =
cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD);
dtim_period->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_dtim_period) -
sizeof(struct mwifiex_ie_types_header));
dtim_period->period = bss_cfg->dtim_period;
cmd_size += sizeof(struct host_cmd_tlv_dtim_period);
tlv += sizeof(struct host_cmd_tlv_dtim_period);
}
if (bss_cfg->rts_threshold <= MWIFIEX_RTS_MAX_VALUE) {
rts_threshold = (struct host_cmd_tlv_rts_threshold *)tlv;
rts_threshold->header.type =
cpu_to_le16(TLV_TYPE_UAP_RTS_THRESHOLD);
rts_threshold->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_rts_threshold) -
sizeof(struct mwifiex_ie_types_header));
rts_threshold->rts_thr = cpu_to_le16(bss_cfg->rts_threshold);
cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
tlv += sizeof(struct host_cmd_tlv_frag_threshold);
}
if ((bss_cfg->frag_threshold >= MWIFIEX_FRAG_MIN_VALUE) &&
(bss_cfg->frag_threshold <= MWIFIEX_FRAG_MAX_VALUE)) {
frag_threshold = (struct host_cmd_tlv_frag_threshold *)tlv;
frag_threshold->header.type =
cpu_to_le16(TLV_TYPE_UAP_FRAG_THRESHOLD);
frag_threshold->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_frag_threshold) -
sizeof(struct mwifiex_ie_types_header));
frag_threshold->frag_thr = cpu_to_le16(bss_cfg->frag_threshold);
cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
tlv += sizeof(struct host_cmd_tlv_frag_threshold);
}
if (bss_cfg->retry_limit <= MWIFIEX_RETRY_LIMIT) {
retry_limit = (struct host_cmd_tlv_retry_limit *)tlv;
retry_limit->header.type =
cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT);
retry_limit->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_retry_limit) -
sizeof(struct mwifiex_ie_types_header));
retry_limit->limit = (u8)bss_cfg->retry_limit;
cmd_size += sizeof(struct host_cmd_tlv_retry_limit);
tlv += sizeof(struct host_cmd_tlv_retry_limit);
}
if ((bss_cfg->protocol & PROTOCOL_WPA) ||
(bss_cfg->protocol & PROTOCOL_WPA2) ||
(bss_cfg->protocol & PROTOCOL_EAP))
mwifiex_uap_bss_wpa(&tlv, cmd_buf, &cmd_size);
else
mwifiex_uap_bss_wep(&tlv, cmd_buf, &cmd_size);
if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) ||
(bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) {
auth_type = (struct host_cmd_tlv_auth_type *)tlv;
auth_type->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
auth_type->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_auth_type) -
sizeof(struct mwifiex_ie_types_header));
auth_type->auth_type = (u8)bss_cfg->auth_mode;
cmd_size += sizeof(struct host_cmd_tlv_auth_type);
tlv += sizeof(struct host_cmd_tlv_auth_type);
}
if (bss_cfg->protocol) {
encrypt_protocol = (struct host_cmd_tlv_encrypt_protocol *)tlv;
encrypt_protocol->header.type =
cpu_to_le16(TLV_TYPE_UAP_ENCRY_PROTOCOL);
encrypt_protocol->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_encrypt_protocol)
- sizeof(struct mwifiex_ie_types_header));
encrypt_protocol->proto = cpu_to_le16(bss_cfg->protocol);
cmd_size += sizeof(struct host_cmd_tlv_encrypt_protocol);
tlv += sizeof(struct host_cmd_tlv_encrypt_protocol);
}
if (bss_cfg->ht_cap.cap_info) {
htcap = (struct mwifiex_ie_types_htcap *)tlv;
htcap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
htcap->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
htcap->ht_cap.cap_info = bss_cfg->ht_cap.cap_info;
htcap->ht_cap.ampdu_params_info =
bss_cfg->ht_cap.ampdu_params_info;
memcpy(&htcap->ht_cap.mcs, &bss_cfg->ht_cap.mcs,
sizeof(struct ieee80211_mcs_info));
htcap->ht_cap.extended_ht_cap_info =
bss_cfg->ht_cap.extended_ht_cap_info;
htcap->ht_cap.tx_BF_cap_info = bss_cfg->ht_cap.tx_BF_cap_info;
htcap->ht_cap.antenna_selection_info =
bss_cfg->ht_cap.antenna_selection_info;
cmd_size += sizeof(struct mwifiex_ie_types_htcap);
tlv += sizeof(struct mwifiex_ie_types_htcap);
}
if (bss_cfg->wmm_info.qos_info != 0xFF) {
wmm_cap = (struct mwifiex_ie_types_wmmcap *)tlv;
wmm_cap->header.type = cpu_to_le16(WLAN_EID_VENDOR_SPECIFIC);
wmm_cap->header.len = cpu_to_le16(sizeof(wmm_cap->wmm_info));
memcpy(&wmm_cap->wmm_info, &bss_cfg->wmm_info,
sizeof(wmm_cap->wmm_info));
cmd_size += sizeof(struct mwifiex_ie_types_wmmcap);
tlv += sizeof(struct mwifiex_ie_types_wmmcap);
}
if (bss_cfg->sta_ao_timer) {
ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
ao_timer->header.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
ao_timer->header.len = cpu_to_le16(sizeof(*ao_timer) -
sizeof(struct mwifiex_ie_types_header));
ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer);
cmd_size += sizeof(*ao_timer);
tlv += sizeof(*ao_timer);
}
if (bss_cfg->power_constraint) {
pwr_ct = (void *)tlv;
pwr_ct->header.type = cpu_to_le16(TLV_TYPE_PWR_CONSTRAINT);
pwr_ct->header.len = cpu_to_le16(sizeof(u8));
pwr_ct->constraint = bss_cfg->power_constraint;
cmd_size += sizeof(*pwr_ct);
tlv += sizeof(*pwr_ct);
}
if (bss_cfg->ps_sta_ao_timer) {
ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
ps_ao_timer->header.type =
cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
ps_ao_timer->header.len = cpu_to_le16(sizeof(*ps_ao_timer) -
sizeof(struct mwifiex_ie_types_header));
ps_ao_timer->sta_ao_timer =
cpu_to_le32(bss_cfg->ps_sta_ao_timer);
cmd_size += sizeof(*ps_ao_timer);
tlv += sizeof(*ps_ao_timer);
}
*param_size = cmd_size;
return 0;
}
/* This function parses custom IEs from IE list and prepares command buffer */
static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size)
{
struct mwifiex_ie_list *ap_ie = cmd_buf;
struct mwifiex_ie_types_header *tlv_ie = (void *)tlv;
if (!ap_ie || !ap_ie->len)
return -1;
*ie_size += le16_to_cpu(ap_ie->len) +
sizeof(struct mwifiex_ie_types_header);
tlv_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
tlv_ie->len = ap_ie->len;
tlv += sizeof(struct mwifiex_ie_types_header);
memcpy(tlv, ap_ie->ie_list, le16_to_cpu(ap_ie->len));
return 0;
}
/* Parse AP config structure and prepare TLV based command structure
* to be sent to FW for uAP configuration
*/
static int
mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action,
u32 type, void *cmd_buf)
{
u8 *tlv;
u16 cmd_size, param_size, ie_size;
struct host_cmd_ds_sys_config *sys_cfg;
cmd->command = cpu_to_le16(HostCmd_CMD_UAP_SYS_CONFIG);
cmd_size = (u16)(sizeof(struct host_cmd_ds_sys_config) + S_DS_GEN);
sys_cfg = (struct host_cmd_ds_sys_config *)&cmd->params.uap_sys_config;
sys_cfg->action = cpu_to_le16(cmd_action);
tlv = sys_cfg->tlv;
switch (type) {
case UAP_BSS_PARAMS_I:
param_size = cmd_size;
if (mwifiex_uap_bss_param_prepare(tlv, cmd_buf, ¶m_size))
return -1;
cmd->size = cpu_to_le16(param_size);
break;
case UAP_CUSTOM_IE_I:
ie_size = cmd_size;
if (mwifiex_uap_custom_ie_prepare(tlv, cmd_buf, &ie_size))
return -1;
cmd->size = cpu_to_le16(ie_size);
break;
default:
return -1;
}
return 0;
}
/* This function prepares AP specific deauth command with mac supplied in
* function parameter.
*/
static int mwifiex_cmd_uap_sta_deauth(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd, u8 *mac)
{
struct host_cmd_ds_sta_deauth *sta_deauth = &cmd->params.sta_deauth;
cmd->command = cpu_to_le16(HostCmd_CMD_UAP_STA_DEAUTH);
memcpy(sta_deauth->mac, mac, ETH_ALEN);
sta_deauth->reason = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING);
cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_sta_deauth) +
S_DS_GEN);
return 0;
}
/* This function prepares the AP specific commands before sending them
* to the firmware.
* This is a generic function which calls specific command preparation
* routines based upon the command number.
*/
int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
u16 cmd_action, u32 type,
void *data_buf, void *cmd_buf)
{
struct host_cmd_ds_command *cmd = cmd_buf;
switch (cmd_no) {
case HostCmd_CMD_UAP_SYS_CONFIG:
if (mwifiex_cmd_uap_sys_config(cmd, cmd_action, type, data_buf))
return -1;
break;
case HostCmd_CMD_UAP_BSS_START:
case HostCmd_CMD_UAP_BSS_STOP:
case HOST_CMD_APCMD_SYS_RESET:
case HOST_CMD_APCMD_STA_LIST:
cmd->command = cpu_to_le16(cmd_no);
cmd->size = cpu_to_le16(S_DS_GEN);
break;
case HostCmd_CMD_UAP_STA_DEAUTH:
if (mwifiex_cmd_uap_sta_deauth(priv, cmd, data_buf))
return -1;
break;
case HostCmd_CMD_CHAN_REPORT_REQUEST:
if (mwifiex_cmd_issue_chan_report_request(priv, cmd_buf,
data_buf))
return -1;
break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd %#x\n", cmd_no);
return -1;
}
return 0;
}
void mwifiex_uap_set_channel(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_chan_def chandef)
{
u8 config_bands = 0, old_bands = priv->adapter->config_bands;
priv->bss_chandef = chandef;
bss_cfg->channel = ieee80211_frequency_to_channel(
chandef.chan->center_freq);
/* Set appropriate bands */
if (chandef.chan->band == NL80211_BAND_2GHZ) {
bss_cfg->band_cfg = BAND_CONFIG_BG;
config_bands = BAND_B | BAND_G;
if (chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
config_bands |= BAND_GN;
} else {
bss_cfg->band_cfg = BAND_CONFIG_A;
config_bands = BAND_A;
if (chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
config_bands |= BAND_AN;
if (chandef.width > NL80211_CHAN_WIDTH_40)
config_bands |= BAND_AAC;
}
switch (chandef.width) {
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
break;
case NL80211_CHAN_WIDTH_40:
if (chandef.center_freq1 < chandef.chan->center_freq)
bss_cfg->band_cfg |= MWIFIEX_SEC_CHAN_BELOW;
else
bss_cfg->band_cfg |= MWIFIEX_SEC_CHAN_ABOVE;
break;
case NL80211_CHAN_WIDTH_80:
case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_160:
bss_cfg->band_cfg |=
mwifiex_get_sec_chan_offset(bss_cfg->channel) << 4;
break;
default:
mwifiex_dbg(priv->adapter,
WARN, "Unknown channel width: %d\n",
chandef.width);
break;
}
priv->adapter->config_bands = config_bands;
if (old_bands != config_bands) {
mwifiex_send_domain_info_cmd_fw(priv->adapter->wiphy);
mwifiex_dnld_txpwr_table(priv);
}
}
int mwifiex_config_start_uap(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg)
{
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
HostCmd_ACT_GEN_SET,
UAP_BSS_PARAMS_I, bss_cfg, true)) {
mwifiex_dbg(priv->adapter, ERROR,
"Failed to set AP configuration\n");
return -1;
}
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
HostCmd_ACT_GEN_SET, 0, NULL, true)) {
mwifiex_dbg(priv->adapter, ERROR,
"Failed to start the BSS\n");
return -1;
}
if (priv->sec_info.wep_enabled)
priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
else
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
HostCmd_ACT_GEN_SET, 0,
&priv->curr_pkt_filter, true))
return -1;
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_998_1 |
crossvul-cpp_data_bad_3861_0 | /*
* Copyright (c) 2018 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <logging/log.h>
LOG_MODULE_REGISTER(net_mqtt_rx, CONFIG_MQTT_LOG_LEVEL);
#include "mqtt_internal.h"
#include "mqtt_transport.h"
#include "mqtt_os.h"
/** @file mqtt_rx.c
*
* @brief MQTT Received data handling.
*/
static int mqtt_handle_packet(struct mqtt_client *client,
u8_t type_and_flags,
u32_t var_length,
struct buf_ctx *buf)
{
int err_code = 0;
bool notify_event = true;
struct mqtt_evt evt;
/* Success by default, overwritten in special cases. */
evt.result = 0;
switch (type_and_flags & 0xF0) {
case MQTT_PKT_TYPE_CONNACK:
MQTT_TRC("[CID %p]: Received MQTT_PKT_TYPE_CONNACK!", client);
evt.type = MQTT_EVT_CONNACK;
err_code = connect_ack_decode(client, buf, &evt.param.connack);
if (err_code == 0) {
MQTT_TRC("[CID %p]: return_code: %d", client,
evt.param.connack.return_code);
if (evt.param.connack.return_code ==
MQTT_CONNECTION_ACCEPTED) {
/* Set state. */
MQTT_SET_STATE(client, MQTT_STATE_CONNECTED);
}
evt.result = evt.param.connack.return_code;
} else {
evt.result = err_code;
}
break;
case MQTT_PKT_TYPE_PUBLISH:
MQTT_TRC("[CID %p]: Received MQTT_PKT_TYPE_PUBLISH", client);
evt.type = MQTT_EVT_PUBLISH;
err_code = publish_decode(type_and_flags, var_length, buf,
&evt.param.publish);
evt.result = err_code;
client->internal.remaining_payload =
evt.param.publish.message.payload.len;
MQTT_TRC("PUB QoS:%02x, message len %08x, topic len %08x",
evt.param.publish.message.topic.qos,
evt.param.publish.message.payload.len,
evt.param.publish.message.topic.topic.size);
break;
case MQTT_PKT_TYPE_PUBACK:
MQTT_TRC("[CID %p]: Received MQTT_PKT_TYPE_PUBACK!", client);
evt.type = MQTT_EVT_PUBACK;
err_code = publish_ack_decode(buf, &evt.param.puback);
evt.result = err_code;
break;
case MQTT_PKT_TYPE_PUBREC:
MQTT_TRC("[CID %p]: Received MQTT_PKT_TYPE_PUBREC!", client);
evt.type = MQTT_EVT_PUBREC;
err_code = publish_receive_decode(buf, &evt.param.pubrec);
evt.result = err_code;
break;
case MQTT_PKT_TYPE_PUBREL:
MQTT_TRC("[CID %p]: Received MQTT_PKT_TYPE_PUBREL!", client);
evt.type = MQTT_EVT_PUBREL;
err_code = publish_release_decode(buf, &evt.param.pubrel);
evt.result = err_code;
break;
case MQTT_PKT_TYPE_PUBCOMP:
MQTT_TRC("[CID %p]: Received MQTT_PKT_TYPE_PUBCOMP!", client);
evt.type = MQTT_EVT_PUBCOMP;
err_code = publish_complete_decode(buf, &evt.param.pubcomp);
evt.result = err_code;
break;
case MQTT_PKT_TYPE_SUBACK:
MQTT_TRC("[CID %p]: Received MQTT_PKT_TYPE_SUBACK!", client);
evt.type = MQTT_EVT_SUBACK;
err_code = subscribe_ack_decode(buf, &evt.param.suback);
evt.result = err_code;
break;
case MQTT_PKT_TYPE_UNSUBACK:
MQTT_TRC("[CID %p]: Received MQTT_PKT_TYPE_UNSUBACK!", client);
evt.type = MQTT_EVT_UNSUBACK;
err_code = unsubscribe_ack_decode(buf, &evt.param.unsuback);
evt.result = err_code;
break;
case MQTT_PKT_TYPE_PINGRSP:
MQTT_TRC("[CID %p]: Received MQTT_PKT_TYPE_PINGRSP!", client);
if (client->unacked_ping <= 0) {
MQTT_TRC("Unexpected PINGRSP");
client->unacked_ping = 0;
} else {
client->unacked_ping--;
}
evt.type = MQTT_EVT_PINGRESP;
break;
default:
/* Nothing to notify. */
notify_event = false;
break;
}
if (notify_event == true) {
event_notify(client, &evt);
}
return err_code;
}
static int mqtt_read_message_chunk(struct mqtt_client *client,
struct buf_ctx *buf, u32_t length)
{
int remaining;
int len;
/* Calculate how much data we need to read from the transport,
* given the already buffered data.
*/
remaining = length - (buf->end - buf->cur);
if (remaining <= 0) {
return 0;
}
/* Check if read does not exceed the buffer. */
if (buf->end + remaining > client->rx_buf + client->rx_buf_size) {
MQTT_ERR("[CID %p]: Buffer too small to receive the message",
client);
return -ENOMEM;
}
len = mqtt_transport_read(client, buf->end, remaining, false);
if (len < 0) {
MQTT_TRC("[CID %p]: Transport read error: %d", client, len);
return len;
}
if (len == 0) {
MQTT_TRC("[CID %p]: Connection closed.", client);
return -ENOTCONN;
}
client->internal.rx_buf_datalen += len;
buf->end += len;
if (len < remaining) {
MQTT_TRC("[CID %p]: Message partially received.", client);
return -EAGAIN;
}
return 0;
}
static int mqtt_read_publish_var_header(struct mqtt_client *client,
u8_t type_and_flags,
struct buf_ctx *buf)
{
u8_t qos = (type_and_flags & MQTT_HEADER_QOS_MASK) >> 1;
int err_code;
u32_t variable_header_length;
/* Read topic length field. */
err_code = mqtt_read_message_chunk(client, buf, sizeof(u16_t));
if (err_code < 0) {
return err_code;
}
variable_header_length = *buf->cur << 8; /* MSB */
variable_header_length |= *(buf->cur + 1); /* LSB */
/* Add two bytes for topic length field. */
variable_header_length += sizeof(u16_t);
/* Add two bytes for message_id, if needed. */
if (qos > MQTT_QOS_0_AT_MOST_ONCE) {
variable_header_length += sizeof(u16_t);
}
/* Now we can read the whole header. */
err_code = mqtt_read_message_chunk(client, buf,
variable_header_length);
if (err_code < 0) {
return err_code;
}
return 0;
}
static int mqtt_read_and_parse_fixed_header(struct mqtt_client *client,
u8_t *type_and_flags,
u32_t *var_length,
struct buf_ctx *buf)
{
/* Read the mandatory part of the fixed header in first iteration. */
u8_t chunk_size = MQTT_FIXED_HEADER_MIN_SIZE;
int err_code;
do {
err_code = mqtt_read_message_chunk(client, buf, chunk_size);
if (err_code < 0) {
return err_code;
}
/* Reset to pointer to the beginning of the frame. */
buf->cur = client->rx_buf;
chunk_size = 1U;
err_code = fixed_header_decode(buf, type_and_flags, var_length);
} while (err_code == -EAGAIN);
return err_code;
}
int mqtt_handle_rx(struct mqtt_client *client)
{
int err_code;
u8_t type_and_flags;
u32_t var_length;
struct buf_ctx buf;
buf.cur = client->rx_buf;
buf.end = client->rx_buf + client->internal.rx_buf_datalen;
err_code = mqtt_read_and_parse_fixed_header(client, &type_and_flags,
&var_length, &buf);
if (err_code < 0) {
return (err_code == -EAGAIN) ? 0 : err_code;
}
if ((type_and_flags & 0xF0) == MQTT_PKT_TYPE_PUBLISH) {
err_code = mqtt_read_publish_var_header(client, type_and_flags,
&buf);
} else {
err_code = mqtt_read_message_chunk(client, &buf, var_length);
}
if (err_code < 0) {
return (err_code == -EAGAIN) ? 0 : err_code;
}
/* At this point, packet is ready to be passed to the application. */
err_code = mqtt_handle_packet(client, type_and_flags, var_length, &buf);
if (err_code < 0) {
return err_code;
}
client->internal.rx_buf_datalen = 0U;
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_3861_0 |
crossvul-cpp_data_bad_4658_0 | /*
* eap.c - Extensible Authentication Protocol for PPP (RFC 2284)
*
* Copyright (c) 2001 by Sun Microsystems, Inc.
* All rights reserved.
*
* Non-exclusive rights to redistribute, modify, translate, and use
* this software in source and binary forms, in whole or in part, is
* hereby granted, provided that the above copyright notice is
* duplicated in any source form, and that neither the name of the
* copyright holder nor the author is used to endorse or promote
* products derived from this software.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
* Original version by James Carlson
*
* This implementation of EAP supports MD5-Challenge and SRP-SHA1
* authentication styles. Note that support of MD5-Challenge is a
* requirement of RFC 2284, and that it's essentially just a
* reimplementation of regular RFC 1994 CHAP using EAP messages.
*
* As an authenticator ("server"), there are multiple phases for each
* style. In the first phase of each style, the unauthenticated peer
* name is queried using the EAP Identity request type. If the
* "remotename" option is used, then this phase is skipped, because
* the peer's name is presumed to be known.
*
* For MD5-Challenge, there are two phases, and the second phase
* consists of sending the challenge itself and handling the
* associated response.
*
* For SRP-SHA1, there are four phases. The second sends 's', 'N',
* and 'g'. The reply contains 'A'. The third sends 'B', and the
* reply contains 'M1'. The forth sends the 'M2' value.
*
* As an authenticatee ("client"), there's just a single phase --
* responding to the queries generated by the peer. EAP is an
* authenticator-driven protocol.
*
* Based on draft-ietf-pppext-eap-srp-03.txt.
*/
#define RCSID "$Id: eap.c,v 1.4 2004/11/09 22:39:25 paulus Exp $"
/*
* TODO:
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <pwd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <assert.h>
#include <errno.h>
#include "pppd.h"
#include "pathnames.h"
#include "md5.h"
#include "eap.h"
#ifdef USE_SRP
#include <t_pwd.h>
#include <t_server.h>
#include <t_client.h>
#include "pppcrypt.h"
#endif /* USE_SRP */
#ifndef SHA_DIGESTSIZE
#define SHA_DIGESTSIZE 20
#endif
eap_state eap_states[NUM_PPP]; /* EAP state; one for each unit */
#ifdef USE_SRP
static char *pn_secret = NULL; /* Pseudonym generating secret */
#endif
/*
* Command-line options.
*/
static option_t eap_option_list[] = {
{ "eap-restart", o_int, &eap_states[0].es_server.ea_timeout,
"Set retransmit timeout for EAP Requests (server)" },
{ "eap-max-sreq", o_int, &eap_states[0].es_server.ea_maxrequests,
"Set max number of EAP Requests sent (server)" },
{ "eap-timeout", o_int, &eap_states[0].es_client.ea_timeout,
"Set time limit for peer EAP authentication" },
{ "eap-max-rreq", o_int, &eap_states[0].es_client.ea_maxrequests,
"Set max number of EAP Requests allows (client)" },
{ "eap-interval", o_int, &eap_states[0].es_rechallenge,
"Set interval for EAP rechallenge" },
#ifdef USE_SRP
{ "srp-interval", o_int, &eap_states[0].es_lwrechallenge,
"Set interval for SRP lightweight rechallenge" },
{ "srp-pn-secret", o_string, &pn_secret,
"Long term pseudonym generation secret" },
{ "srp-use-pseudonym", o_bool, &eap_states[0].es_usepseudo,
"Use pseudonym if offered one by server", 1 },
#endif
{ NULL }
};
/*
* Protocol entry points.
*/
static void eap_init __P((int unit));
static void eap_input __P((int unit, u_char *inp, int inlen));
static void eap_protrej __P((int unit));
static void eap_lowerup __P((int unit));
static void eap_lowerdown __P((int unit));
static int eap_printpkt __P((u_char *inp, int inlen,
void (*)(void *arg, char *fmt, ...), void *arg));
struct protent eap_protent = {
PPP_EAP, /* protocol number */
eap_init, /* initialization procedure */
eap_input, /* process a received packet */
eap_protrej, /* process a received protocol-reject */
eap_lowerup, /* lower layer has gone up */
eap_lowerdown, /* lower layer has gone down */
NULL, /* open the protocol */
NULL, /* close the protocol */
eap_printpkt, /* print a packet in readable form */
NULL, /* process a received data packet */
1, /* protocol enabled */
"EAP", /* text name of protocol */
NULL, /* text name of corresponding data protocol */
eap_option_list, /* list of command-line options */
NULL, /* check requested options; assign defaults */
NULL, /* configure interface for demand-dial */
NULL /* say whether to bring up link for this pkt */
};
/*
* A well-known 2048 bit modulus.
*/
static const u_char wkmodulus[] = {
0xAC, 0x6B, 0xDB, 0x41, 0x32, 0x4A, 0x9A, 0x9B,
0xF1, 0x66, 0xDE, 0x5E, 0x13, 0x89, 0x58, 0x2F,
0xAF, 0x72, 0xB6, 0x65, 0x19, 0x87, 0xEE, 0x07,
0xFC, 0x31, 0x92, 0x94, 0x3D, 0xB5, 0x60, 0x50,
0xA3, 0x73, 0x29, 0xCB, 0xB4, 0xA0, 0x99, 0xED,
0x81, 0x93, 0xE0, 0x75, 0x77, 0x67, 0xA1, 0x3D,
0xD5, 0x23, 0x12, 0xAB, 0x4B, 0x03, 0x31, 0x0D,
0xCD, 0x7F, 0x48, 0xA9, 0xDA, 0x04, 0xFD, 0x50,
0xE8, 0x08, 0x39, 0x69, 0xED, 0xB7, 0x67, 0xB0,
0xCF, 0x60, 0x95, 0x17, 0x9A, 0x16, 0x3A, 0xB3,
0x66, 0x1A, 0x05, 0xFB, 0xD5, 0xFA, 0xAA, 0xE8,
0x29, 0x18, 0xA9, 0x96, 0x2F, 0x0B, 0x93, 0xB8,
0x55, 0xF9, 0x79, 0x93, 0xEC, 0x97, 0x5E, 0xEA,
0xA8, 0x0D, 0x74, 0x0A, 0xDB, 0xF4, 0xFF, 0x74,
0x73, 0x59, 0xD0, 0x41, 0xD5, 0xC3, 0x3E, 0xA7,
0x1D, 0x28, 0x1E, 0x44, 0x6B, 0x14, 0x77, 0x3B,
0xCA, 0x97, 0xB4, 0x3A, 0x23, 0xFB, 0x80, 0x16,
0x76, 0xBD, 0x20, 0x7A, 0x43, 0x6C, 0x64, 0x81,
0xF1, 0xD2, 0xB9, 0x07, 0x87, 0x17, 0x46, 0x1A,
0x5B, 0x9D, 0x32, 0xE6, 0x88, 0xF8, 0x77, 0x48,
0x54, 0x45, 0x23, 0xB5, 0x24, 0xB0, 0xD5, 0x7D,
0x5E, 0xA7, 0x7A, 0x27, 0x75, 0xD2, 0xEC, 0xFA,
0x03, 0x2C, 0xFB, 0xDB, 0xF5, 0x2F, 0xB3, 0x78,
0x61, 0x60, 0x27, 0x90, 0x04, 0xE5, 0x7A, 0xE6,
0xAF, 0x87, 0x4E, 0x73, 0x03, 0xCE, 0x53, 0x29,
0x9C, 0xCC, 0x04, 0x1C, 0x7B, 0xC3, 0x08, 0xD8,
0x2A, 0x56, 0x98, 0xF3, 0xA8, 0xD0, 0xC3, 0x82,
0x71, 0xAE, 0x35, 0xF8, 0xE9, 0xDB, 0xFB, 0xB6,
0x94, 0xB5, 0xC8, 0x03, 0xD8, 0x9F, 0x7A, 0xE4,
0x35, 0xDE, 0x23, 0x6D, 0x52, 0x5F, 0x54, 0x75,
0x9B, 0x65, 0xE3, 0x72, 0xFC, 0xD6, 0x8E, 0xF2,
0x0F, 0xA7, 0x11, 0x1F, 0x9E, 0x4A, 0xFF, 0x73
};
/* Local forward declarations. */
static void eap_server_timeout __P((void *arg));
/*
* Convert EAP state code to printable string for debug.
*/
static const char *
eap_state_name(esc)
enum eap_state_code esc;
{
static const char *state_names[] = { EAP_STATES };
return (state_names[(int)esc]);
}
/*
* eap_init - Initialize state for an EAP user. This is currently
* called once by main() during start-up.
*/
static void
eap_init(unit)
int unit;
{
eap_state *esp = &eap_states[unit];
BZERO(esp, sizeof (*esp));
esp->es_unit = unit;
esp->es_server.ea_timeout = EAP_DEFTIMEOUT;
esp->es_server.ea_maxrequests = EAP_DEFTRANSMITS;
esp->es_server.ea_id = (u_char)(drand48() * 0x100);
esp->es_client.ea_timeout = EAP_DEFREQTIME;
esp->es_client.ea_maxrequests = EAP_DEFALLOWREQ;
}
/*
* eap_client_timeout - Give up waiting for the peer to send any
* Request messages.
*/
static void
eap_client_timeout(arg)
void *arg;
{
eap_state *esp = (eap_state *) arg;
if (!eap_client_active(esp))
return;
error("EAP: timeout waiting for Request from peer");
auth_withpeer_fail(esp->es_unit, PPP_EAP);
esp->es_client.ea_state = eapBadAuth;
}
/*
* eap_authwithpeer - Authenticate to our peer (behave as client).
*
* Start client state and wait for requests. This is called only
* after eap_lowerup.
*/
void
eap_authwithpeer(unit, localname)
int unit;
char *localname;
{
eap_state *esp = &eap_states[unit];
/* Save the peer name we're given */
esp->es_client.ea_name = localname;
esp->es_client.ea_namelen = strlen(localname);
esp->es_client.ea_state = eapListen;
/*
* Start a timer so that if the other end just goes
* silent, we don't sit here waiting forever.
*/
if (esp->es_client.ea_timeout > 0)
TIMEOUT(eap_client_timeout, (void *)esp,
esp->es_client.ea_timeout);
}
/*
* Format a standard EAP Failure message and send it to the peer.
* (Server operation)
*/
static void
eap_send_failure(esp)
eap_state *esp;
{
u_char *outp;
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_FAILURE, outp);
esp->es_server.ea_id++;
PUTCHAR(esp->es_server.ea_id, outp);
PUTSHORT(EAP_HEADERLEN, outp);
output(esp->es_unit, outpacket_buf, EAP_HEADERLEN + PPP_HDRLEN);
esp->es_server.ea_state = eapBadAuth;
auth_peer_fail(esp->es_unit, PPP_EAP);
}
/*
* Format a standard EAP Success message and send it to the peer.
* (Server operation)
*/
static void
eap_send_success(esp)
eap_state *esp;
{
u_char *outp;
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_SUCCESS, outp);
esp->es_server.ea_id++;
PUTCHAR(esp->es_server.ea_id, outp);
PUTSHORT(EAP_HEADERLEN, outp);
output(esp->es_unit, outpacket_buf, PPP_HDRLEN + EAP_HEADERLEN);
auth_peer_success(esp->es_unit, PPP_EAP, 0,
esp->es_server.ea_peer, esp->es_server.ea_peerlen);
}
#ifdef USE_SRP
/*
* Set DES key according to pseudonym-generating secret and current
* date.
*/
static bool
pncrypt_setkey(int timeoffs)
{
struct tm *tp;
char tbuf[9];
SHA1_CTX ctxt;
u_char dig[SHA_DIGESTSIZE];
time_t reftime;
if (pn_secret == NULL)
return (0);
reftime = time(NULL) + timeoffs;
tp = localtime(&reftime);
SHA1Init(&ctxt);
SHA1Update(&ctxt, pn_secret, strlen(pn_secret));
strftime(tbuf, sizeof (tbuf), "%Y%m%d", tp);
SHA1Update(&ctxt, tbuf, strlen(tbuf));
SHA1Final(dig, &ctxt);
return (DesSetkey(dig));
}
static char base64[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
struct b64state {
u_int32_t bs_bits;
int bs_offs;
};
static int
b64enc(bs, inp, inlen, outp)
struct b64state *bs;
u_char *inp;
int inlen;
u_char *outp;
{
int outlen = 0;
while (inlen > 0) {
bs->bs_bits = (bs->bs_bits << 8) | *inp++;
inlen--;
bs->bs_offs += 8;
if (bs->bs_offs >= 24) {
*outp++ = base64[(bs->bs_bits >> 18) & 0x3F];
*outp++ = base64[(bs->bs_bits >> 12) & 0x3F];
*outp++ = base64[(bs->bs_bits >> 6) & 0x3F];
*outp++ = base64[bs->bs_bits & 0x3F];
outlen += 4;
bs->bs_offs = 0;
bs->bs_bits = 0;
}
}
return (outlen);
}
static int
b64flush(bs, outp)
struct b64state *bs;
u_char *outp;
{
int outlen = 0;
if (bs->bs_offs == 8) {
*outp++ = base64[(bs->bs_bits >> 2) & 0x3F];
*outp++ = base64[(bs->bs_bits << 4) & 0x3F];
outlen = 2;
} else if (bs->bs_offs == 16) {
*outp++ = base64[(bs->bs_bits >> 10) & 0x3F];
*outp++ = base64[(bs->bs_bits >> 4) & 0x3F];
*outp++ = base64[(bs->bs_bits << 2) & 0x3F];
outlen = 3;
}
bs->bs_offs = 0;
bs->bs_bits = 0;
return (outlen);
}
static int
b64dec(bs, inp, inlen, outp)
struct b64state *bs;
u_char *inp;
int inlen;
u_char *outp;
{
int outlen = 0;
char *cp;
while (inlen > 0) {
if ((cp = strchr(base64, *inp++)) == NULL)
break;
bs->bs_bits = (bs->bs_bits << 6) | (cp - base64);
inlen--;
bs->bs_offs += 6;
if (bs->bs_offs >= 8) {
*outp++ = bs->bs_bits >> (bs->bs_offs - 8);
outlen++;
bs->bs_offs -= 8;
}
}
return (outlen);
}
#endif /* USE_SRP */
/*
* Assume that current waiting server state is complete and figure
* next state to use based on available authentication data. 'status'
* indicates if there was an error in handling the last query. It is
* 0 for success and non-zero for failure.
*/
static void
eap_figure_next_state(esp, status)
eap_state *esp;
int status;
{
#ifdef USE_SRP
unsigned char secbuf[MAXWORDLEN], clear[8], *sp, *dp;
struct t_pw tpw;
struct t_confent *tce, mytce;
char *cp, *cp2;
struct t_server *ts;
int id, i, plen, toffs;
u_char vals[2];
struct b64state bs;
#endif /* USE_SRP */
esp->es_server.ea_timeout = esp->es_savedtime;
switch (esp->es_server.ea_state) {
case eapBadAuth:
return;
case eapIdentify:
#ifdef USE_SRP
/* Discard any previous session. */
ts = (struct t_server *)esp->es_server.ea_session;
if (ts != NULL) {
t_serverclose(ts);
esp->es_server.ea_session = NULL;
esp->es_server.ea_skey = NULL;
}
#endif /* USE_SRP */
if (status != 0) {
esp->es_server.ea_state = eapBadAuth;
break;
}
#ifdef USE_SRP
/* If we've got a pseudonym, try to decode to real name. */
if (esp->es_server.ea_peerlen > SRP_PSEUDO_LEN &&
strncmp(esp->es_server.ea_peer, SRP_PSEUDO_ID,
SRP_PSEUDO_LEN) == 0 &&
(esp->es_server.ea_peerlen - SRP_PSEUDO_LEN) * 3 / 4 <
sizeof (secbuf)) {
BZERO(&bs, sizeof (bs));
plen = b64dec(&bs,
esp->es_server.ea_peer + SRP_PSEUDO_LEN,
esp->es_server.ea_peerlen - SRP_PSEUDO_LEN,
secbuf);
toffs = 0;
for (i = 0; i < 5; i++) {
pncrypt_setkey(toffs);
toffs -= 86400;
if (!DesDecrypt(secbuf, clear)) {
dbglog("no DES here; cannot decode "
"pseudonym");
return;
}
id = *(unsigned char *)clear;
if (id + 1 <= plen && id + 9 > plen)
break;
}
if (plen % 8 == 0 && i < 5) {
/*
* Note that this is always shorter than the
* original stored string, so there's no need
* to realloc.
*/
if ((i = plen = *(unsigned char *)clear) > 7)
i = 7;
esp->es_server.ea_peerlen = plen;
dp = (unsigned char *)esp->es_server.ea_peer;
BCOPY(clear + 1, dp, i);
plen -= i;
dp += i;
sp = secbuf + 8;
while (plen > 0) {
(void) DesDecrypt(sp, dp);
sp += 8;
dp += 8;
plen -= 8;
}
esp->es_server.ea_peer[
esp->es_server.ea_peerlen] = '\0';
dbglog("decoded pseudonym to \"%.*q\"",
esp->es_server.ea_peerlen,
esp->es_server.ea_peer);
} else {
dbglog("failed to decode real name");
/* Stay in eapIdentfy state; requery */
break;
}
}
/* Look up user in secrets database. */
if (get_srp_secret(esp->es_unit, esp->es_server.ea_peer,
esp->es_server.ea_name, (char *)secbuf, 1) != 0) {
/* Set up default in case SRP entry is bad */
esp->es_server.ea_state = eapMD5Chall;
/* Get t_confent based on index in srp-secrets */
id = strtol((char *)secbuf, &cp, 10);
if (*cp++ != ':' || id < 0)
break;
if (id == 0) {
mytce.index = 0;
mytce.modulus.data = (u_char *)wkmodulus;
mytce.modulus.len = sizeof (wkmodulus);
mytce.generator.data = (u_char *)"\002";
mytce.generator.len = 1;
tce = &mytce;
} else if ((tce = gettcid(id)) != NULL) {
/*
* Client will have to verify this modulus/
* generator combination, and that will take
* a while. Lengthen the timeout here.
*/
if (esp->es_server.ea_timeout > 0 &&
esp->es_server.ea_timeout < 30)
esp->es_server.ea_timeout = 30;
} else {
break;
}
if ((cp2 = strchr(cp, ':')) == NULL)
break;
*cp2++ = '\0';
tpw.pebuf.name = esp->es_server.ea_peer;
tpw.pebuf.password.len = t_fromb64((char *)tpw.pwbuf,
cp);
tpw.pebuf.password.data = tpw.pwbuf;
tpw.pebuf.salt.len = t_fromb64((char *)tpw.saltbuf,
cp2);
tpw.pebuf.salt.data = tpw.saltbuf;
if ((ts = t_serveropenraw(&tpw.pebuf, tce)) == NULL)
break;
esp->es_server.ea_session = (void *)ts;
esp->es_server.ea_state = eapSRP1;
vals[0] = esp->es_server.ea_id + 1;
vals[1] = EAPT_SRP;
t_serveraddexdata(ts, vals, 2);
/* Generate B; must call before t_servergetkey() */
t_servergenexp(ts);
break;
}
#endif /* USE_SRP */
esp->es_server.ea_state = eapMD5Chall;
break;
case eapSRP1:
#ifdef USE_SRP
ts = (struct t_server *)esp->es_server.ea_session;
if (ts != NULL && status != 0) {
t_serverclose(ts);
esp->es_server.ea_session = NULL;
esp->es_server.ea_skey = NULL;
}
#endif /* USE_SRP */
if (status == 1) {
esp->es_server.ea_state = eapMD5Chall;
} else if (status != 0 || esp->es_server.ea_session == NULL) {
esp->es_server.ea_state = eapBadAuth;
} else {
esp->es_server.ea_state = eapSRP2;
}
break;
case eapSRP2:
#ifdef USE_SRP
ts = (struct t_server *)esp->es_server.ea_session;
if (ts != NULL && status != 0) {
t_serverclose(ts);
esp->es_server.ea_session = NULL;
esp->es_server.ea_skey = NULL;
}
#endif /* USE_SRP */
if (status != 0 || esp->es_server.ea_session == NULL) {
esp->es_server.ea_state = eapBadAuth;
} else {
esp->es_server.ea_state = eapSRP3;
}
break;
case eapSRP3:
case eapSRP4:
#ifdef USE_SRP
ts = (struct t_server *)esp->es_server.ea_session;
if (ts != NULL && status != 0) {
t_serverclose(ts);
esp->es_server.ea_session = NULL;
esp->es_server.ea_skey = NULL;
}
#endif /* USE_SRP */
if (status != 0 || esp->es_server.ea_session == NULL) {
esp->es_server.ea_state = eapBadAuth;
} else {
esp->es_server.ea_state = eapOpen;
}
break;
case eapMD5Chall:
if (status != 0) {
esp->es_server.ea_state = eapBadAuth;
} else {
esp->es_server.ea_state = eapOpen;
}
break;
default:
esp->es_server.ea_state = eapBadAuth;
break;
}
if (esp->es_server.ea_state == eapBadAuth)
eap_send_failure(esp);
}
/*
* Format an EAP Request message and send it to the peer. Message
* type depends on current state. (Server operation)
*/
static void
eap_send_request(esp)
eap_state *esp;
{
u_char *outp;
u_char *lenloc;
u_char *ptr;
int outlen;
int challen;
char *str;
#ifdef USE_SRP
struct t_server *ts;
u_char clear[8], cipher[8], dig[SHA_DIGESTSIZE], *optr, *cp;
int i, j;
struct b64state b64;
SHA1_CTX ctxt;
#endif /* USE_SRP */
/* Handle both initial auth and restart */
if (esp->es_server.ea_state < eapIdentify &&
esp->es_server.ea_state != eapInitial) {
esp->es_server.ea_state = eapIdentify;
if (explicit_remote) {
/*
* If we already know the peer's
* unauthenticated name, then there's no
* reason to ask. Go to next state instead.
*/
esp->es_server.ea_peer = remote_name;
esp->es_server.ea_peerlen = strlen(remote_name);
eap_figure_next_state(esp, 0);
}
}
if (esp->es_server.ea_maxrequests > 0 &&
esp->es_server.ea_requests >= esp->es_server.ea_maxrequests) {
if (esp->es_server.ea_responses > 0)
error("EAP: too many Requests sent");
else
error("EAP: no response to Requests");
eap_send_failure(esp);
return;
}
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_REQUEST, outp);
PUTCHAR(esp->es_server.ea_id, outp);
lenloc = outp;
INCPTR(2, outp);
switch (esp->es_server.ea_state) {
case eapIdentify:
PUTCHAR(EAPT_IDENTITY, outp);
str = "Name";
challen = strlen(str);
BCOPY(str, outp, challen);
INCPTR(challen, outp);
break;
case eapMD5Chall:
PUTCHAR(EAPT_MD5CHAP, outp);
/*
* pick a random challenge length between
* MIN_CHALLENGE_LENGTH and MAX_CHALLENGE_LENGTH
*/
challen = (drand48() *
(MAX_CHALLENGE_LENGTH - MIN_CHALLENGE_LENGTH)) +
MIN_CHALLENGE_LENGTH;
PUTCHAR(challen, outp);
esp->es_challen = challen;
ptr = esp->es_challenge;
while (--challen >= 0)
*ptr++ = (u_char) (drand48() * 0x100);
BCOPY(esp->es_challenge, outp, esp->es_challen);
INCPTR(esp->es_challen, outp);
BCOPY(esp->es_server.ea_name, outp, esp->es_server.ea_namelen);
INCPTR(esp->es_server.ea_namelen, outp);
break;
#ifdef USE_SRP
case eapSRP1:
PUTCHAR(EAPT_SRP, outp);
PUTCHAR(EAPSRP_CHALLENGE, outp);
PUTCHAR(esp->es_server.ea_namelen, outp);
BCOPY(esp->es_server.ea_name, outp, esp->es_server.ea_namelen);
INCPTR(esp->es_server.ea_namelen, outp);
ts = (struct t_server *)esp->es_server.ea_session;
assert(ts != NULL);
PUTCHAR(ts->s.len, outp);
BCOPY(ts->s.data, outp, ts->s.len);
INCPTR(ts->s.len, outp);
if (ts->g.len == 1 && ts->g.data[0] == 2) {
PUTCHAR(0, outp);
} else {
PUTCHAR(ts->g.len, outp);
BCOPY(ts->g.data, outp, ts->g.len);
INCPTR(ts->g.len, outp);
}
if (ts->n.len != sizeof (wkmodulus) ||
BCMP(ts->n.data, wkmodulus, sizeof (wkmodulus)) != 0) {
BCOPY(ts->n.data, outp, ts->n.len);
INCPTR(ts->n.len, outp);
}
break;
case eapSRP2:
PUTCHAR(EAPT_SRP, outp);
PUTCHAR(EAPSRP_SKEY, outp);
ts = (struct t_server *)esp->es_server.ea_session;
assert(ts != NULL);
BCOPY(ts->B.data, outp, ts->B.len);
INCPTR(ts->B.len, outp);
break;
case eapSRP3:
PUTCHAR(EAPT_SRP, outp);
PUTCHAR(EAPSRP_SVALIDATOR, outp);
PUTLONG(SRPVAL_EBIT, outp);
ts = (struct t_server *)esp->es_server.ea_session;
assert(ts != NULL);
BCOPY(t_serverresponse(ts), outp, SHA_DIGESTSIZE);
INCPTR(SHA_DIGESTSIZE, outp);
if (pncrypt_setkey(0)) {
/* Generate pseudonym */
optr = outp;
cp = (unsigned char *)esp->es_server.ea_peer;
if ((j = i = esp->es_server.ea_peerlen) > 7)
j = 7;
clear[0] = i;
BCOPY(cp, clear + 1, j);
i -= j;
cp += j;
if (!DesEncrypt(clear, cipher)) {
dbglog("no DES here; not generating pseudonym");
break;
}
BZERO(&b64, sizeof (b64));
outp++; /* space for pseudonym length */
outp += b64enc(&b64, cipher, 8, outp);
while (i >= 8) {
(void) DesEncrypt(cp, cipher);
outp += b64enc(&b64, cipher, 8, outp);
cp += 8;
i -= 8;
}
if (i > 0) {
BCOPY(cp, clear, i);
cp += i;
while (i < 8) {
*cp++ = drand48() * 0x100;
i++;
}
(void) DesEncrypt(clear, cipher);
outp += b64enc(&b64, cipher, 8, outp);
}
outp += b64flush(&b64, outp);
/* Set length and pad out to next 20 octet boundary */
i = outp - optr - 1;
*optr = i;
i %= SHA_DIGESTSIZE;
if (i != 0) {
while (i < SHA_DIGESTSIZE) {
*outp++ = drand48() * 0x100;
i++;
}
}
/* Obscure the pseudonym with SHA1 hash */
SHA1Init(&ctxt);
SHA1Update(&ctxt, &esp->es_server.ea_id, 1);
SHA1Update(&ctxt, esp->es_server.ea_skey,
SESSION_KEY_LEN);
SHA1Update(&ctxt, esp->es_server.ea_peer,
esp->es_server.ea_peerlen);
while (optr < outp) {
SHA1Final(dig, &ctxt);
cp = dig;
while (cp < dig + SHA_DIGESTSIZE)
*optr++ ^= *cp++;
SHA1Init(&ctxt);
SHA1Update(&ctxt, &esp->es_server.ea_id, 1);
SHA1Update(&ctxt, esp->es_server.ea_skey,
SESSION_KEY_LEN);
SHA1Update(&ctxt, optr - SHA_DIGESTSIZE,
SHA_DIGESTSIZE);
}
}
break;
case eapSRP4:
PUTCHAR(EAPT_SRP, outp);
PUTCHAR(EAPSRP_LWRECHALLENGE, outp);
challen = MIN_CHALLENGE_LENGTH +
((MAX_CHALLENGE_LENGTH - MIN_CHALLENGE_LENGTH) * drand48());
esp->es_challen = challen;
ptr = esp->es_challenge;
while (--challen >= 0)
*ptr++ = drand48() * 0x100;
BCOPY(esp->es_challenge, outp, esp->es_challen);
INCPTR(esp->es_challen, outp);
break;
#endif /* USE_SRP */
default:
return;
}
outlen = (outp - outpacket_buf) - PPP_HDRLEN;
PUTSHORT(outlen, lenloc);
output(esp->es_unit, outpacket_buf, outlen + PPP_HDRLEN);
esp->es_server.ea_requests++;
if (esp->es_server.ea_timeout > 0)
TIMEOUT(eap_server_timeout, esp, esp->es_server.ea_timeout);
}
/*
* eap_authpeer - Authenticate our peer (behave as server).
*
* Start server state and send first request. This is called only
* after eap_lowerup.
*/
void
eap_authpeer(unit, localname)
int unit;
char *localname;
{
eap_state *esp = &eap_states[unit];
/* Save the name we're given. */
esp->es_server.ea_name = localname;
esp->es_server.ea_namelen = strlen(localname);
esp->es_savedtime = esp->es_server.ea_timeout;
/* Lower layer up yet? */
if (esp->es_server.ea_state == eapInitial ||
esp->es_server.ea_state == eapPending) {
esp->es_server.ea_state = eapPending;
return;
}
esp->es_server.ea_state = eapPending;
/* ID number not updated here intentionally; hashed into M1 */
eap_send_request(esp);
}
/*
* eap_server_timeout - Retransmission timer for sending Requests
* expired.
*/
static void
eap_server_timeout(arg)
void *arg;
{
eap_state *esp = (eap_state *) arg;
if (!eap_server_active(esp))
return;
/* EAP ID number must not change on timeout. */
eap_send_request(esp);
}
/*
* When it's time to send rechallenge the peer, this timeout is
* called. Once the rechallenge is successful, the response handler
* will restart the timer. If it fails, then the link is dropped.
*/
static void
eap_rechallenge(arg)
void *arg;
{
eap_state *esp = (eap_state *)arg;
if (esp->es_server.ea_state != eapOpen &&
esp->es_server.ea_state != eapSRP4)
return;
esp->es_server.ea_requests = 0;
esp->es_server.ea_state = eapIdentify;
eap_figure_next_state(esp, 0);
esp->es_server.ea_id++;
eap_send_request(esp);
}
static void
srp_lwrechallenge(arg)
void *arg;
{
eap_state *esp = (eap_state *)arg;
if (esp->es_server.ea_state != eapOpen ||
esp->es_server.ea_type != EAPT_SRP)
return;
esp->es_server.ea_requests = 0;
esp->es_server.ea_state = eapSRP4;
esp->es_server.ea_id++;
eap_send_request(esp);
}
/*
* eap_lowerup - The lower layer is now up.
*
* This is called before either eap_authpeer or eap_authwithpeer. See
* link_established() in auth.c. All that's necessary here is to
* return to closed state so that those two routines will do the right
* thing.
*/
static void
eap_lowerup(unit)
int unit;
{
eap_state *esp = &eap_states[unit];
/* Discard any (possibly authenticated) peer name. */
if (esp->es_server.ea_peer != NULL &&
esp->es_server.ea_peer != remote_name)
free(esp->es_server.ea_peer);
esp->es_server.ea_peer = NULL;
if (esp->es_client.ea_peer != NULL)
free(esp->es_client.ea_peer);
esp->es_client.ea_peer = NULL;
esp->es_client.ea_state = eapClosed;
esp->es_server.ea_state = eapClosed;
}
/*
* eap_lowerdown - The lower layer is now down.
*
* Cancel all timeouts and return to initial state.
*/
static void
eap_lowerdown(unit)
int unit;
{
eap_state *esp = &eap_states[unit];
if (eap_client_active(esp) && esp->es_client.ea_timeout > 0) {
UNTIMEOUT(eap_client_timeout, (void *)esp);
}
if (eap_server_active(esp)) {
if (esp->es_server.ea_timeout > 0) {
UNTIMEOUT(eap_server_timeout, (void *)esp);
}
} else {
if ((esp->es_server.ea_state == eapOpen ||
esp->es_server.ea_state == eapSRP4) &&
esp->es_rechallenge > 0) {
UNTIMEOUT(eap_rechallenge, (void *)esp);
}
if (esp->es_server.ea_state == eapOpen &&
esp->es_lwrechallenge > 0) {
UNTIMEOUT(srp_lwrechallenge, (void *)esp);
}
}
esp->es_client.ea_state = esp->es_server.ea_state = eapInitial;
esp->es_client.ea_requests = esp->es_server.ea_requests = 0;
}
/*
* eap_protrej - Peer doesn't speak this protocol.
*
* This shouldn't happen. If it does, it represents authentication
* failure.
*/
static void
eap_protrej(unit)
int unit;
{
eap_state *esp = &eap_states[unit];
if (eap_client_active(esp)) {
error("EAP authentication failed due to Protocol-Reject");
auth_withpeer_fail(unit, PPP_EAP);
}
if (eap_server_active(esp)) {
error("EAP authentication of peer failed on Protocol-Reject");
auth_peer_fail(unit, PPP_EAP);
}
eap_lowerdown(unit);
}
/*
* Format and send a regular EAP Response message.
*/
static void
eap_send_response(esp, id, typenum, str, lenstr)
eap_state *esp;
u_char id;
u_char typenum;
u_char *str;
int lenstr;
{
u_char *outp;
int msglen;
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_RESPONSE, outp);
PUTCHAR(id, outp);
esp->es_client.ea_id = id;
msglen = EAP_HEADERLEN + sizeof (u_char) + lenstr;
PUTSHORT(msglen, outp);
PUTCHAR(typenum, outp);
if (lenstr > 0) {
BCOPY(str, outp, lenstr);
}
output(esp->es_unit, outpacket_buf, PPP_HDRLEN + msglen);
}
/*
* Format and send an MD5-Challenge EAP Response message.
*/
static void
eap_chap_response(esp, id, hash, name, namelen)
eap_state *esp;
u_char id;
u_char *hash;
char *name;
int namelen;
{
u_char *outp;
int msglen;
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_RESPONSE, outp);
PUTCHAR(id, outp);
esp->es_client.ea_id = id;
msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + MD5_SIGNATURE_SIZE +
namelen;
PUTSHORT(msglen, outp);
PUTCHAR(EAPT_MD5CHAP, outp);
PUTCHAR(MD5_SIGNATURE_SIZE, outp);
BCOPY(hash, outp, MD5_SIGNATURE_SIZE);
INCPTR(MD5_SIGNATURE_SIZE, outp);
if (namelen > 0) {
BCOPY(name, outp, namelen);
}
output(esp->es_unit, outpacket_buf, PPP_HDRLEN + msglen);
}
#ifdef USE_SRP
/*
* Format and send a SRP EAP Response message.
*/
static void
eap_srp_response(esp, id, subtypenum, str, lenstr)
eap_state *esp;
u_char id;
u_char subtypenum;
u_char *str;
int lenstr;
{
u_char *outp;
int msglen;
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_RESPONSE, outp);
PUTCHAR(id, outp);
esp->es_client.ea_id = id;
msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + lenstr;
PUTSHORT(msglen, outp);
PUTCHAR(EAPT_SRP, outp);
PUTCHAR(subtypenum, outp);
if (lenstr > 0) {
BCOPY(str, outp, lenstr);
}
output(esp->es_unit, outpacket_buf, PPP_HDRLEN + msglen);
}
/*
* Format and send a SRP EAP Client Validator Response message.
*/
static void
eap_srpval_response(esp, id, flags, str)
eap_state *esp;
u_char id;
u_int32_t flags;
u_char *str;
{
u_char *outp;
int msglen;
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_RESPONSE, outp);
PUTCHAR(id, outp);
esp->es_client.ea_id = id;
msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + sizeof (u_int32_t) +
SHA_DIGESTSIZE;
PUTSHORT(msglen, outp);
PUTCHAR(EAPT_SRP, outp);
PUTCHAR(EAPSRP_CVALIDATOR, outp);
PUTLONG(flags, outp);
BCOPY(str, outp, SHA_DIGESTSIZE);
output(esp->es_unit, outpacket_buf, PPP_HDRLEN + msglen);
}
#endif /* USE_SRP */
static void
eap_send_nak(esp, id, type)
eap_state *esp;
u_char id;
u_char type;
{
u_char *outp;
int msglen;
outp = outpacket_buf;
MAKEHEADER(outp, PPP_EAP);
PUTCHAR(EAP_RESPONSE, outp);
PUTCHAR(id, outp);
esp->es_client.ea_id = id;
msglen = EAP_HEADERLEN + 2 * sizeof (u_char);
PUTSHORT(msglen, outp);
PUTCHAR(EAPT_NAK, outp);
PUTCHAR(type, outp);
output(esp->es_unit, outpacket_buf, PPP_HDRLEN + msglen);
}
#ifdef USE_SRP
static char *
name_of_pn_file()
{
char *user, *path, *file;
struct passwd *pw;
size_t pl;
static bool pnlogged = 0;
pw = getpwuid(getuid());
if (pw == NULL || (user = pw->pw_dir) == NULL || user[0] == 0) {
errno = EINVAL;
return (NULL);
}
file = _PATH_PSEUDONYM;
pl = strlen(user) + strlen(file) + 2;
path = malloc(pl);
if (path == NULL)
return (NULL);
(void) slprintf(path, pl, "%s/%s", user, file);
if (!pnlogged) {
dbglog("pseudonym file: %s", path);
pnlogged = 1;
}
return (path);
}
static int
open_pn_file(modebits)
mode_t modebits;
{
char *path;
int fd, err;
if ((path = name_of_pn_file()) == NULL)
return (-1);
fd = open(path, modebits, S_IRUSR | S_IWUSR);
err = errno;
free(path);
errno = err;
return (fd);
}
static void
remove_pn_file()
{
char *path;
if ((path = name_of_pn_file()) != NULL) {
(void) unlink(path);
(void) free(path);
}
}
static void
write_pseudonym(esp, inp, len, id)
eap_state *esp;
u_char *inp;
int len, id;
{
u_char val;
u_char *datp, *digp;
SHA1_CTX ctxt;
u_char dig[SHA_DIGESTSIZE];
int dsize, fd, olen = len;
/*
* Do the decoding by working backwards. This eliminates the need
* to save the decoded output in a separate buffer.
*/
val = id;
while (len > 0) {
if ((dsize = len % SHA_DIGESTSIZE) == 0)
dsize = SHA_DIGESTSIZE;
len -= dsize;
datp = inp + len;
SHA1Init(&ctxt);
SHA1Update(&ctxt, &val, 1);
SHA1Update(&ctxt, esp->es_client.ea_skey, SESSION_KEY_LEN);
if (len > 0) {
SHA1Update(&ctxt, datp, SHA_DIGESTSIZE);
} else {
SHA1Update(&ctxt, esp->es_client.ea_name,
esp->es_client.ea_namelen);
}
SHA1Final(dig, &ctxt);
for (digp = dig; digp < dig + SHA_DIGESTSIZE; digp++)
*datp++ ^= *digp;
}
/* Now check that the result is sane */
if (olen <= 0 || *inp + 1 > olen) {
dbglog("EAP: decoded pseudonym is unusable <%.*B>", olen, inp);
return;
}
/* Save it away */
fd = open_pn_file(O_WRONLY | O_CREAT | O_TRUNC);
if (fd < 0) {
dbglog("EAP: error saving pseudonym: %m");
return;
}
len = write(fd, inp + 1, *inp);
if (close(fd) != -1 && len == *inp) {
dbglog("EAP: saved pseudonym");
esp->es_usedpseudo = 0;
} else {
dbglog("EAP: failed to save pseudonym");
remove_pn_file();
}
}
#endif /* USE_SRP */
/*
* eap_request - Receive EAP Request message (client mode).
*/
static void
eap_request(esp, inp, id, len)
eap_state *esp;
u_char *inp;
int id;
int len;
{
u_char typenum;
u_char vallen;
int secret_len;
char secret[MAXWORDLEN];
char rhostname[256];
MD5_CTX mdContext;
u_char hash[MD5_SIGNATURE_SIZE];
#ifdef USE_SRP
struct t_client *tc;
struct t_num sval, gval, Nval, *Ap, Bval;
u_char vals[2];
SHA1_CTX ctxt;
u_char dig[SHA_DIGESTSIZE];
int fd;
#endif /* USE_SRP */
/*
* Note: we update es_client.ea_id *only if* a Response
* message is being generated. Otherwise, we leave it the
* same for duplicate detection purposes.
*/
esp->es_client.ea_requests++;
if (esp->es_client.ea_maxrequests != 0 &&
esp->es_client.ea_requests > esp->es_client.ea_maxrequests) {
info("EAP: received too many Request messages");
if (esp->es_client.ea_timeout > 0) {
UNTIMEOUT(eap_client_timeout, (void *)esp);
}
auth_withpeer_fail(esp->es_unit, PPP_EAP);
return;
}
if (len <= 0) {
error("EAP: empty Request message discarded");
return;
}
GETCHAR(typenum, inp);
len--;
switch (typenum) {
case EAPT_IDENTITY:
if (len > 0)
info("EAP: Identity prompt \"%.*q\"", len, inp);
#ifdef USE_SRP
if (esp->es_usepseudo &&
(esp->es_usedpseudo == 0 ||
(esp->es_usedpseudo == 1 &&
id == esp->es_client.ea_id))) {
esp->es_usedpseudo = 1;
/* Try to get a pseudonym */
if ((fd = open_pn_file(O_RDONLY)) >= 0) {
strcpy(rhostname, SRP_PSEUDO_ID);
len = read(fd, rhostname + SRP_PSEUDO_LEN,
sizeof (rhostname) - SRP_PSEUDO_LEN);
/* XXX NAI unsupported */
if (len > 0) {
eap_send_response(esp, id, typenum,
rhostname, len + SRP_PSEUDO_LEN);
}
(void) close(fd);
if (len > 0)
break;
}
}
/* Stop using pseudonym now. */
if (esp->es_usepseudo && esp->es_usedpseudo != 2) {
remove_pn_file();
esp->es_usedpseudo = 2;
}
#endif /* USE_SRP */
eap_send_response(esp, id, typenum, esp->es_client.ea_name,
esp->es_client.ea_namelen);
break;
case EAPT_NOTIFICATION:
if (len > 0)
info("EAP: Notification \"%.*q\"", len, inp);
eap_send_response(esp, id, typenum, NULL, 0);
break;
case EAPT_NAK:
/*
* Avoid the temptation to send Response Nak in reply
* to Request Nak here. It can only lead to trouble.
*/
warn("EAP: unexpected Nak in Request; ignored");
/* Return because we're waiting for something real. */
return;
case EAPT_MD5CHAP:
if (len < 1) {
error("EAP: received MD5-Challenge with no data");
/* Bogus request; wait for something real. */
return;
}
GETCHAR(vallen, inp);
len--;
if (vallen < 8 || vallen > len) {
error("EAP: MD5-Challenge with bad length %d (8..%d)",
vallen, len);
/* Try something better. */
eap_send_nak(esp, id, EAPT_SRP);
break;
}
/* Not so likely to happen. */
if (vallen >= len + sizeof (rhostname)) {
dbglog("EAP: trimming really long peer name down");
BCOPY(inp + vallen, rhostname, sizeof (rhostname) - 1);
rhostname[sizeof (rhostname) - 1] = '\0';
} else {
BCOPY(inp + vallen, rhostname, len - vallen);
rhostname[len - vallen] = '\0';
}
/* In case the remote doesn't give us his name. */
if (explicit_remote ||
(remote_name[0] != '\0' && vallen == len))
strlcpy(rhostname, remote_name, sizeof (rhostname));
/*
* Get the secret for authenticating ourselves with
* the specified host.
*/
if (!get_secret(esp->es_unit, esp->es_client.ea_name,
rhostname, secret, &secret_len, 0)) {
dbglog("EAP: no MD5 secret for auth to %q", rhostname);
eap_send_nak(esp, id, EAPT_SRP);
break;
}
MD5_Init(&mdContext);
typenum = id;
MD5_Update(&mdContext, &typenum, 1);
MD5_Update(&mdContext, (u_char *)secret, secret_len);
BZERO(secret, sizeof (secret));
MD5_Update(&mdContext, inp, vallen);
MD5_Final(hash, &mdContext);
eap_chap_response(esp, id, hash, esp->es_client.ea_name,
esp->es_client.ea_namelen);
break;
#ifdef USE_SRP
case EAPT_SRP:
if (len < 1) {
error("EAP: received empty SRP Request");
/* Bogus request; wait for something real. */
return;
}
/* Get subtype */
GETCHAR(vallen, inp);
len--;
switch (vallen) {
case EAPSRP_CHALLENGE:
tc = NULL;
if (esp->es_client.ea_session != NULL) {
tc = (struct t_client *)esp->es_client.
ea_session;
/*
* If this is a new challenge, then start
* over with a new client session context.
* Otherwise, just resend last response.
*/
if (id != esp->es_client.ea_id) {
t_clientclose(tc);
esp->es_client.ea_session = NULL;
tc = NULL;
}
}
/* No session key just yet */
esp->es_client.ea_skey = NULL;
if (tc == NULL) {
GETCHAR(vallen, inp);
len--;
if (vallen >= len) {
error("EAP: badly-formed SRP Challenge"
" (name)");
/* Ignore badly-formed messages */
return;
}
BCOPY(inp, rhostname, vallen);
rhostname[vallen] = '\0';
INCPTR(vallen, inp);
len -= vallen;
/*
* In case the remote doesn't give us his name,
* use configured name.
*/
if (explicit_remote ||
(remote_name[0] != '\0' && vallen == 0)) {
strlcpy(rhostname, remote_name,
sizeof (rhostname));
}
if (esp->es_client.ea_peer != NULL)
free(esp->es_client.ea_peer);
esp->es_client.ea_peer = strdup(rhostname);
esp->es_client.ea_peerlen = strlen(rhostname);
GETCHAR(vallen, inp);
len--;
if (vallen >= len) {
error("EAP: badly-formed SRP Challenge"
" (s)");
/* Ignore badly-formed messages */
return;
}
sval.data = inp;
sval.len = vallen;
INCPTR(vallen, inp);
len -= vallen;
GETCHAR(vallen, inp);
len--;
if (vallen > len) {
error("EAP: badly-formed SRP Challenge"
" (g)");
/* Ignore badly-formed messages */
return;
}
/* If no generator present, then use value 2 */
if (vallen == 0) {
gval.data = (u_char *)"\002";
gval.len = 1;
} else {
gval.data = inp;
gval.len = vallen;
}
INCPTR(vallen, inp);
len -= vallen;
/*
* If no modulus present, then use well-known
* value.
*/
if (len == 0) {
Nval.data = (u_char *)wkmodulus;
Nval.len = sizeof (wkmodulus);
} else {
Nval.data = inp;
Nval.len = len;
}
tc = t_clientopen(esp->es_client.ea_name,
&Nval, &gval, &sval);
if (tc == NULL) {
eap_send_nak(esp, id, EAPT_MD5CHAP);
break;
}
esp->es_client.ea_session = (void *)tc;
/* Add Challenge ID & type to verifier */
vals[0] = id;
vals[1] = EAPT_SRP;
t_clientaddexdata(tc, vals, 2);
}
Ap = t_clientgenexp(tc);
eap_srp_response(esp, id, EAPSRP_CKEY, Ap->data,
Ap->len);
break;
case EAPSRP_SKEY:
tc = (struct t_client *)esp->es_client.ea_session;
if (tc == NULL) {
warn("EAP: peer sent Subtype 2 without 1");
eap_send_nak(esp, id, EAPT_MD5CHAP);
break;
}
if (esp->es_client.ea_skey != NULL) {
/*
* ID number should not change here. Warn
* if it does (but otherwise ignore).
*/
if (id != esp->es_client.ea_id) {
warn("EAP: ID changed from %d to %d "
"in SRP Subtype 2 rexmit",
esp->es_client.ea_id, id);
}
} else {
if (get_srp_secret(esp->es_unit,
esp->es_client.ea_name,
esp->es_client.ea_peer, secret, 0) == 0) {
/*
* Can't work with this peer because
* the secret is missing. Just give
* up.
*/
eap_send_nak(esp, id, EAPT_MD5CHAP);
break;
}
Bval.data = inp;
Bval.len = len;
t_clientpasswd(tc, secret);
BZERO(secret, sizeof (secret));
esp->es_client.ea_skey =
t_clientgetkey(tc, &Bval);
if (esp->es_client.ea_skey == NULL) {
/* Server is rogue; stop now */
error("EAP: SRP server is rogue");
goto client_failure;
}
}
eap_srpval_response(esp, id, SRPVAL_EBIT,
t_clientresponse(tc));
break;
case EAPSRP_SVALIDATOR:
tc = (struct t_client *)esp->es_client.ea_session;
if (tc == NULL || esp->es_client.ea_skey == NULL) {
warn("EAP: peer sent Subtype 3 without 1/2");
eap_send_nak(esp, id, EAPT_MD5CHAP);
break;
}
/*
* If we're already open, then this ought to be a
* duplicate. Otherwise, check that the server is
* who we think it is.
*/
if (esp->es_client.ea_state == eapOpen) {
if (id != esp->es_client.ea_id) {
warn("EAP: ID changed from %d to %d "
"in SRP Subtype 3 rexmit",
esp->es_client.ea_id, id);
}
} else {
len -= sizeof (u_int32_t) + SHA_DIGESTSIZE;
if (len < 0 || t_clientverify(tc, inp +
sizeof (u_int32_t)) != 0) {
error("EAP: SRP server verification "
"failed");
goto client_failure;
}
GETLONG(esp->es_client.ea_keyflags, inp);
/* Save pseudonym if user wants it. */
if (len > 0 && esp->es_usepseudo) {
INCPTR(SHA_DIGESTSIZE, inp);
write_pseudonym(esp, inp, len, id);
}
}
/*
* We've verified our peer. We're now mostly done,
* except for waiting on the regular EAP Success
* message.
*/
eap_srp_response(esp, id, EAPSRP_ACK, NULL, 0);
break;
case EAPSRP_LWRECHALLENGE:
if (len < 4) {
warn("EAP: malformed Lightweight rechallenge");
return;
}
SHA1Init(&ctxt);
vals[0] = id;
SHA1Update(&ctxt, vals, 1);
SHA1Update(&ctxt, esp->es_client.ea_skey,
SESSION_KEY_LEN);
SHA1Update(&ctxt, inp, len);
SHA1Update(&ctxt, esp->es_client.ea_name,
esp->es_client.ea_namelen);
SHA1Final(dig, &ctxt);
eap_srp_response(esp, id, EAPSRP_LWRECHALLENGE, dig,
SHA_DIGESTSIZE);
break;
default:
error("EAP: unknown SRP Subtype %d", vallen);
eap_send_nak(esp, id, EAPT_MD5CHAP);
break;
}
break;
#endif /* USE_SRP */
default:
info("EAP: unknown authentication type %d; Naking", typenum);
eap_send_nak(esp, id, EAPT_SRP);
break;
}
if (esp->es_client.ea_timeout > 0) {
UNTIMEOUT(eap_client_timeout, (void *)esp);
TIMEOUT(eap_client_timeout, (void *)esp,
esp->es_client.ea_timeout);
}
return;
#ifdef USE_SRP
client_failure:
esp->es_client.ea_state = eapBadAuth;
if (esp->es_client.ea_timeout > 0) {
UNTIMEOUT(eap_client_timeout, (void *)esp);
}
esp->es_client.ea_session = NULL;
t_clientclose(tc);
auth_withpeer_fail(esp->es_unit, PPP_EAP);
#endif /* USE_SRP */
}
/*
* eap_response - Receive EAP Response message (server mode).
*/
static void
eap_response(esp, inp, id, len)
eap_state *esp;
u_char *inp;
int id;
int len;
{
u_char typenum;
u_char vallen;
int secret_len;
char secret[MAXSECRETLEN];
char rhostname[256];
MD5_CTX mdContext;
u_char hash[MD5_SIGNATURE_SIZE];
#ifdef USE_SRP
struct t_server *ts;
struct t_num A;
SHA1_CTX ctxt;
u_char dig[SHA_DIGESTSIZE];
#endif /* USE_SRP */
if (esp->es_server.ea_id != id) {
dbglog("EAP: discarding Response %d; expected ID %d", id,
esp->es_server.ea_id);
return;
}
esp->es_server.ea_responses++;
if (len <= 0) {
error("EAP: empty Response message discarded");
return;
}
GETCHAR(typenum, inp);
len--;
switch (typenum) {
case EAPT_IDENTITY:
if (esp->es_server.ea_state != eapIdentify) {
dbglog("EAP discarding unwanted Identify \"%.q\"", len,
inp);
break;
}
info("EAP: unauthenticated peer name \"%.*q\"", len, inp);
if (esp->es_server.ea_peer != NULL &&
esp->es_server.ea_peer != remote_name)
free(esp->es_server.ea_peer);
esp->es_server.ea_peer = malloc(len + 1);
if (esp->es_server.ea_peer == NULL) {
esp->es_server.ea_peerlen = 0;
eap_figure_next_state(esp, 1);
break;
}
BCOPY(inp, esp->es_server.ea_peer, len);
esp->es_server.ea_peer[len] = '\0';
esp->es_server.ea_peerlen = len;
eap_figure_next_state(esp, 0);
break;
case EAPT_NOTIFICATION:
dbglog("EAP unexpected Notification; response discarded");
break;
case EAPT_NAK:
if (len < 1) {
info("EAP: Nak Response with no suggested protocol");
eap_figure_next_state(esp, 1);
break;
}
GETCHAR(vallen, inp);
len--;
if (!explicit_remote && esp->es_server.ea_state == eapIdentify){
/* Peer cannot Nak Identify Request */
eap_figure_next_state(esp, 1);
break;
}
switch (vallen) {
case EAPT_SRP:
/* Run through SRP validator selection again. */
esp->es_server.ea_state = eapIdentify;
eap_figure_next_state(esp, 0);
break;
case EAPT_MD5CHAP:
esp->es_server.ea_state = eapMD5Chall;
break;
default:
dbglog("EAP: peer requesting unknown Type %d", vallen);
switch (esp->es_server.ea_state) {
case eapSRP1:
case eapSRP2:
case eapSRP3:
esp->es_server.ea_state = eapMD5Chall;
break;
case eapMD5Chall:
case eapSRP4:
esp->es_server.ea_state = eapIdentify;
eap_figure_next_state(esp, 0);
break;
default:
break;
}
break;
}
break;
case EAPT_MD5CHAP:
if (esp->es_server.ea_state != eapMD5Chall) {
error("EAP: unexpected MD5-Response");
eap_figure_next_state(esp, 1);
break;
}
if (len < 1) {
error("EAP: received MD5-Response with no data");
eap_figure_next_state(esp, 1);
break;
}
GETCHAR(vallen, inp);
len--;
if (vallen != 16 || vallen > len) {
error("EAP: MD5-Response with bad length %d", vallen);
eap_figure_next_state(esp, 1);
break;
}
/* Not so likely to happen. */
if (vallen >= len + sizeof (rhostname)) {
dbglog("EAP: trimming really long peer name down");
BCOPY(inp + vallen, rhostname, sizeof (rhostname) - 1);
rhostname[sizeof (rhostname) - 1] = '\0';
} else {
BCOPY(inp + vallen, rhostname, len - vallen);
rhostname[len - vallen] = '\0';
}
/* In case the remote doesn't give us his name. */
if (explicit_remote ||
(remote_name[0] != '\0' && vallen == len))
strlcpy(rhostname, remote_name, sizeof (rhostname));
/*
* Get the secret for authenticating the specified
* host.
*/
if (!get_secret(esp->es_unit, rhostname,
esp->es_server.ea_name, secret, &secret_len, 1)) {
dbglog("EAP: no MD5 secret for auth of %q", rhostname);
eap_send_failure(esp);
break;
}
MD5_Init(&mdContext);
MD5_Update(&mdContext, &esp->es_server.ea_id, 1);
MD5_Update(&mdContext, (u_char *)secret, secret_len);
BZERO(secret, sizeof (secret));
MD5_Update(&mdContext, esp->es_challenge, esp->es_challen);
MD5_Final(hash, &mdContext);
if (BCMP(hash, inp, MD5_SIGNATURE_SIZE) != 0) {
eap_send_failure(esp);
break;
}
esp->es_server.ea_type = EAPT_MD5CHAP;
eap_send_success(esp);
eap_figure_next_state(esp, 0);
if (esp->es_rechallenge != 0)
TIMEOUT(eap_rechallenge, esp, esp->es_rechallenge);
break;
#ifdef USE_SRP
case EAPT_SRP:
if (len < 1) {
error("EAP: empty SRP Response");
eap_figure_next_state(esp, 1);
break;
}
GETCHAR(typenum, inp);
len--;
switch (typenum) {
case EAPSRP_CKEY:
if (esp->es_server.ea_state != eapSRP1) {
error("EAP: unexpected SRP Subtype 1 Response");
eap_figure_next_state(esp, 1);
break;
}
A.data = inp;
A.len = len;
ts = (struct t_server *)esp->es_server.ea_session;
assert(ts != NULL);
esp->es_server.ea_skey = t_servergetkey(ts, &A);
if (esp->es_server.ea_skey == NULL) {
/* Client's A value is bogus; terminate now */
error("EAP: bogus A value from client");
eap_send_failure(esp);
} else {
eap_figure_next_state(esp, 0);
}
break;
case EAPSRP_CVALIDATOR:
if (esp->es_server.ea_state != eapSRP2) {
error("EAP: unexpected SRP Subtype 2 Response");
eap_figure_next_state(esp, 1);
break;
}
if (len < sizeof (u_int32_t) + SHA_DIGESTSIZE) {
error("EAP: M1 length %d < %d", len,
sizeof (u_int32_t) + SHA_DIGESTSIZE);
eap_figure_next_state(esp, 1);
break;
}
GETLONG(esp->es_server.ea_keyflags, inp);
ts = (struct t_server *)esp->es_server.ea_session;
assert(ts != NULL);
if (t_serververify(ts, inp)) {
info("EAP: unable to validate client identity");
eap_send_failure(esp);
break;
}
eap_figure_next_state(esp, 0);
break;
case EAPSRP_ACK:
if (esp->es_server.ea_state != eapSRP3) {
error("EAP: unexpected SRP Subtype 3 Response");
eap_send_failure(esp);
break;
}
esp->es_server.ea_type = EAPT_SRP;
eap_send_success(esp);
eap_figure_next_state(esp, 0);
if (esp->es_rechallenge != 0)
TIMEOUT(eap_rechallenge, esp,
esp->es_rechallenge);
if (esp->es_lwrechallenge != 0)
TIMEOUT(srp_lwrechallenge, esp,
esp->es_lwrechallenge);
break;
case EAPSRP_LWRECHALLENGE:
if (esp->es_server.ea_state != eapSRP4) {
info("EAP: unexpected SRP Subtype 4 Response");
return;
}
if (len != SHA_DIGESTSIZE) {
error("EAP: bad Lightweight rechallenge "
"response");
return;
}
SHA1Init(&ctxt);
vallen = id;
SHA1Update(&ctxt, &vallen, 1);
SHA1Update(&ctxt, esp->es_server.ea_skey,
SESSION_KEY_LEN);
SHA1Update(&ctxt, esp->es_challenge, esp->es_challen);
SHA1Update(&ctxt, esp->es_server.ea_peer,
esp->es_server.ea_peerlen);
SHA1Final(dig, &ctxt);
if (BCMP(dig, inp, SHA_DIGESTSIZE) != 0) {
error("EAP: failed Lightweight rechallenge");
eap_send_failure(esp);
break;
}
esp->es_server.ea_state = eapOpen;
if (esp->es_lwrechallenge != 0)
TIMEOUT(srp_lwrechallenge, esp,
esp->es_lwrechallenge);
break;
}
break;
#endif /* USE_SRP */
default:
/* This can't happen. */
error("EAP: unknown Response type %d; ignored", typenum);
return;
}
if (esp->es_server.ea_timeout > 0) {
UNTIMEOUT(eap_server_timeout, (void *)esp);
}
if (esp->es_server.ea_state != eapBadAuth &&
esp->es_server.ea_state != eapOpen) {
esp->es_server.ea_id++;
eap_send_request(esp);
}
}
/*
* eap_success - Receive EAP Success message (client mode).
*/
static void
eap_success(esp, inp, id, len)
eap_state *esp;
u_char *inp;
int id;
int len;
{
if (esp->es_client.ea_state != eapOpen && !eap_client_active(esp)) {
dbglog("EAP unexpected success message in state %s (%d)",
eap_state_name(esp->es_client.ea_state),
esp->es_client.ea_state);
return;
}
if (esp->es_client.ea_timeout > 0) {
UNTIMEOUT(eap_client_timeout, (void *)esp);
}
if (len > 0) {
/* This is odd. The spec doesn't allow for this. */
PRINTMSG(inp, len);
}
esp->es_client.ea_state = eapOpen;
auth_withpeer_success(esp->es_unit, PPP_EAP, 0);
}
/*
* eap_failure - Receive EAP Failure message (client mode).
*/
static void
eap_failure(esp, inp, id, len)
eap_state *esp;
u_char *inp;
int id;
int len;
{
if (!eap_client_active(esp)) {
dbglog("EAP unexpected failure message in state %s (%d)",
eap_state_name(esp->es_client.ea_state),
esp->es_client.ea_state);
}
if (esp->es_client.ea_timeout > 0) {
UNTIMEOUT(eap_client_timeout, (void *)esp);
}
if (len > 0) {
/* This is odd. The spec doesn't allow for this. */
PRINTMSG(inp, len);
}
esp->es_client.ea_state = eapBadAuth;
error("EAP: peer reports authentication failure");
auth_withpeer_fail(esp->es_unit, PPP_EAP);
}
/*
* eap_input - Handle received EAP message.
*/
static void
eap_input(unit, inp, inlen)
int unit;
u_char *inp;
int inlen;
{
eap_state *esp = &eap_states[unit];
u_char code, id;
int len;
/*
* Parse header (code, id and length). If packet too short,
* drop it.
*/
if (inlen < EAP_HEADERLEN) {
error("EAP: packet too short: %d < %d", inlen, EAP_HEADERLEN);
return;
}
GETCHAR(code, inp);
GETCHAR(id, inp);
GETSHORT(len, inp);
if (len < EAP_HEADERLEN || len > inlen) {
error("EAP: packet has illegal length field %d (%d..%d)", len,
EAP_HEADERLEN, inlen);
return;
}
len -= EAP_HEADERLEN;
/* Dispatch based on message code */
switch (code) {
case EAP_REQUEST:
eap_request(esp, inp, id, len);
break;
case EAP_RESPONSE:
eap_response(esp, inp, id, len);
break;
case EAP_SUCCESS:
eap_success(esp, inp, id, len);
break;
case EAP_FAILURE:
eap_failure(esp, inp, id, len);
break;
default: /* XXX Need code reject */
/* Note: it's not legal to send EAP Nak here. */
warn("EAP: unknown code %d received", code);
break;
}
}
/*
* eap_printpkt - print the contents of an EAP packet.
*/
static char *eap_codenames[] = {
"Request", "Response", "Success", "Failure"
};
static char *eap_typenames[] = {
"Identity", "Notification", "Nak", "MD5-Challenge",
"OTP", "Generic-Token", NULL, NULL,
"RSA", "DSS", "KEA", "KEA-Validate",
"TLS", "Defender", "Windows 2000", "Arcot",
"Cisco", "Nokia", "SRP"
};
static int
eap_printpkt(inp, inlen, printer, arg)
u_char *inp;
int inlen;
void (*printer) __P((void *, char *, ...));
void *arg;
{
int code, id, len, rtype, vallen;
u_char *pstart;
u_int32_t uval;
if (inlen < EAP_HEADERLEN)
return (0);
pstart = inp;
GETCHAR(code, inp);
GETCHAR(id, inp);
GETSHORT(len, inp);
if (len < EAP_HEADERLEN || len > inlen)
return (0);
if (code >= 1 && code <= sizeof(eap_codenames) / sizeof(char *))
printer(arg, " %s", eap_codenames[code-1]);
else
printer(arg, " code=0x%x", code);
printer(arg, " id=0x%x", id);
len -= EAP_HEADERLEN;
switch (code) {
case EAP_REQUEST:
if (len < 1) {
printer(arg, " <missing type>");
break;
}
GETCHAR(rtype, inp);
len--;
if (rtype >= 1 &&
rtype <= sizeof (eap_typenames) / sizeof (char *))
printer(arg, " %s", eap_typenames[rtype-1]);
else
printer(arg, " type=0x%x", rtype);
switch (rtype) {
case EAPT_IDENTITY:
case EAPT_NOTIFICATION:
if (len > 0) {
printer(arg, " <Message ");
print_string((char *)inp, len, printer, arg);
printer(arg, ">");
INCPTR(len, inp);
len = 0;
} else {
printer(arg, " <No message>");
}
break;
case EAPT_MD5CHAP:
if (len <= 0)
break;
GETCHAR(vallen, inp);
len--;
if (vallen > len)
goto truncated;
printer(arg, " <Value%.*B>", vallen, inp);
INCPTR(vallen, inp);
len -= vallen;
if (len > 0) {
printer(arg, " <Name ");
print_string((char *)inp, len, printer, arg);
printer(arg, ">");
INCPTR(len, inp);
len = 0;
} else {
printer(arg, " <No name>");
}
break;
case EAPT_SRP:
if (len < 3)
goto truncated;
GETCHAR(vallen, inp);
len--;
printer(arg, "-%d", vallen);
switch (vallen) {
case EAPSRP_CHALLENGE:
GETCHAR(vallen, inp);
len--;
if (vallen >= len)
goto truncated;
if (vallen > 0) {
printer(arg, " <Name ");
print_string((char *)inp, vallen, printer,
arg);
printer(arg, ">");
} else {
printer(arg, " <No name>");
}
INCPTR(vallen, inp);
len -= vallen;
GETCHAR(vallen, inp);
len--;
if (vallen >= len)
goto truncated;
printer(arg, " <s%.*B>", vallen, inp);
INCPTR(vallen, inp);
len -= vallen;
GETCHAR(vallen, inp);
len--;
if (vallen > len)
goto truncated;
if (vallen == 0) {
printer(arg, " <Default g=2>");
} else {
printer(arg, " <g%.*B>", vallen, inp);
}
INCPTR(vallen, inp);
len -= vallen;
if (len == 0) {
printer(arg, " <Default N>");
} else {
printer(arg, " <N%.*B>", len, inp);
INCPTR(len, inp);
len = 0;
}
break;
case EAPSRP_SKEY:
printer(arg, " <B%.*B>", len, inp);
INCPTR(len, inp);
len = 0;
break;
case EAPSRP_SVALIDATOR:
if (len < sizeof (u_int32_t))
break;
GETLONG(uval, inp);
len -= sizeof (u_int32_t);
if (uval & SRPVAL_EBIT) {
printer(arg, " E");
uval &= ~SRPVAL_EBIT;
}
if (uval != 0) {
printer(arg, " f<%X>", uval);
}
if ((vallen = len) > SHA_DIGESTSIZE)
vallen = SHA_DIGESTSIZE;
printer(arg, " <M2%.*B%s>", len, inp,
len < SHA_DIGESTSIZE ? "?" : "");
INCPTR(vallen, inp);
len -= vallen;
if (len > 0) {
printer(arg, " <PN%.*B>", len, inp);
INCPTR(len, inp);
len = 0;
}
break;
case EAPSRP_LWRECHALLENGE:
printer(arg, " <Challenge%.*B>", len, inp);
INCPTR(len, inp);
len = 0;
break;
}
break;
}
break;
case EAP_RESPONSE:
if (len < 1)
break;
GETCHAR(rtype, inp);
len--;
if (rtype >= 1 &&
rtype <= sizeof (eap_typenames) / sizeof (char *))
printer(arg, " %s", eap_typenames[rtype-1]);
else
printer(arg, " type=0x%x", rtype);
switch (rtype) {
case EAPT_IDENTITY:
if (len > 0) {
printer(arg, " <Name ");
print_string((char *)inp, len, printer, arg);
printer(arg, ">");
INCPTR(len, inp);
len = 0;
}
break;
case EAPT_NAK:
if (len <= 0) {
printer(arg, " <missing hint>");
break;
}
GETCHAR(rtype, inp);
len--;
printer(arg, " <Suggested-type %02X", rtype);
if (rtype >= 1 &&
rtype < sizeof (eap_typenames) / sizeof (char *))
printer(arg, " (%s)", eap_typenames[rtype-1]);
printer(arg, ">");
break;
case EAPT_MD5CHAP:
if (len <= 0) {
printer(arg, " <missing length>");
break;
}
GETCHAR(vallen, inp);
len--;
if (vallen > len)
goto truncated;
printer(arg, " <Value%.*B>", vallen, inp);
INCPTR(vallen, inp);
len -= vallen;
if (len > 0) {
printer(arg, " <Name ");
print_string((char *)inp, len, printer, arg);
printer(arg, ">");
INCPTR(len, inp);
len = 0;
} else {
printer(arg, " <No name>");
}
break;
case EAPT_SRP:
if (len < 1)
goto truncated;
GETCHAR(vallen, inp);
len--;
printer(arg, "-%d", vallen);
switch (vallen) {
case EAPSRP_CKEY:
printer(arg, " <A%.*B>", len, inp);
INCPTR(len, inp);
len = 0;
break;
case EAPSRP_CVALIDATOR:
if (len < sizeof (u_int32_t))
break;
GETLONG(uval, inp);
len -= sizeof (u_int32_t);
if (uval & SRPVAL_EBIT) {
printer(arg, " E");
uval &= ~SRPVAL_EBIT;
}
if (uval != 0) {
printer(arg, " f<%X>", uval);
}
printer(arg, " <M1%.*B%s>", len, inp,
len == SHA_DIGESTSIZE ? "" : "?");
INCPTR(len, inp);
len = 0;
break;
case EAPSRP_ACK:
break;
case EAPSRP_LWRECHALLENGE:
printer(arg, " <Response%.*B%s>", len, inp,
len == SHA_DIGESTSIZE ? "" : "?");
if ((vallen = len) > SHA_DIGESTSIZE)
vallen = SHA_DIGESTSIZE;
INCPTR(vallen, inp);
len -= vallen;
break;
}
break;
}
break;
case EAP_SUCCESS: /* No payload expected for these! */
case EAP_FAILURE:
break;
truncated:
printer(arg, " <truncated>");
break;
}
if (len > 8)
printer(arg, "%8B...", inp);
else if (len > 0)
printer(arg, "%.*B", len, inp);
INCPTR(len, inp);
return (inp - pstart);
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_4658_0 |
crossvul-cpp_data_bad_1199_0 | /* FriBidi
* fribidi-bidi.c - bidirectional algorithm
*
* Authors:
* Behdad Esfahbod, 2001, 2002, 2004
* Dov Grobgeld, 1999, 2000, 2017
*
* Copyright (C) 2004 Sharif FarsiWeb, Inc
* Copyright (C) 2001,2002 Behdad Esfahbod
* Copyright (C) 1999,2000,2017 Dov Grobgeld
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library, in a file named COPYING; if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA
*
* For licensing issues, contact <fribidi.license@gmail.com>.
*/
#include "common.h"
#include <fribidi-bidi.h>
#include <fribidi-mirroring.h>
#include <fribidi-brackets.h>
#include <fribidi-unicode.h>
#include "bidi-types.h"
#include "run.h"
/*
* This file implements most of Unicode Standard Annex #9, Tracking Number 13.
*/
#ifndef MAX
# define MAX(a,b) ((a) > (b) ? (a) : (b))
#endif /* !MAX */
/* Some convenience macros */
#define RL_TYPE(list) ((list)->type)
#define RL_LEN(list) ((list)->len)
#define RL_LEVEL(list) ((list)->level)
/* "Within this scope, bidirectional types EN and AN are treated as R" */
#define RL_TYPE_AN_EN_AS_RTL(list) ( \
(((list)->type == FRIBIDI_TYPE_AN) || ((list)->type == FRIBIDI_TYPE_EN) | ((list)->type == FRIBIDI_TYPE_RTL)) ? FRIBIDI_TYPE_RTL : (list)->type)
#define RL_BRACKET_TYPE(list) ((list)->bracket_type)
#define RL_ISOLATE_LEVEL(list) ((list)->isolate_level)
#define LOCAL_BRACKET_SIZE 16
/* Pairing nodes are used for holding a pair of open/close brackets as
described in BD16. */
struct _FriBidiPairingNodeStruct {
FriBidiRun *open;
FriBidiRun *close;
struct _FriBidiPairingNodeStruct *next;
};
typedef struct _FriBidiPairingNodeStruct FriBidiPairingNode;
static FriBidiRun *
merge_with_prev (
FriBidiRun *second
)
{
FriBidiRun *first;
fribidi_assert (second);
fribidi_assert (second->next);
first = second->prev;
fribidi_assert (first);
first->next = second->next;
first->next->prev = first;
RL_LEN (first) += RL_LEN (second);
if (second->next_isolate)
second->next_isolate->prev_isolate = first;
first->next_isolate = second->next_isolate;
fribidi_free (second);
return first;
}
static void
compact_list (
FriBidiRun *list
)
{
fribidi_assert (list);
if (list->next)
for_run_list (list, list)
if (RL_TYPE (list->prev) == RL_TYPE (list)
&& RL_LEVEL (list->prev) == RL_LEVEL (list)
&& RL_BRACKET_TYPE(list) == FRIBIDI_NO_BRACKET /* Don't join brackets! */
&& RL_BRACKET_TYPE(list->prev) == FRIBIDI_NO_BRACKET
)
list = merge_with_prev (list);
}
static void
compact_neutrals (
FriBidiRun *list
)
{
fribidi_assert (list);
if (list->next)
{
for_run_list (list, list)
{
if (RL_LEVEL (list->prev) == RL_LEVEL (list)
&&
((RL_TYPE (list->prev) == RL_TYPE (list)
|| (FRIBIDI_IS_NEUTRAL (RL_TYPE (list->prev))
&& FRIBIDI_IS_NEUTRAL (RL_TYPE (list)))))
&& RL_BRACKET_TYPE(list) == FRIBIDI_NO_BRACKET /* Don't join brackets! */
&& RL_BRACKET_TYPE(list->prev) == FRIBIDI_NO_BRACKET
)
list = merge_with_prev (list);
}
}
}
/* Search for an adjacent run in the forward or backward direction.
It uses the next_isolate and prev_isolate run for short circuited searching.
*/
/* The static sentinel is used to signal the end of an isolating
sequence */
static FriBidiRun sentinel = { NULL, NULL, 0,0, FRIBIDI_TYPE_SENTINEL, -1,-1,FRIBIDI_NO_BRACKET, NULL, NULL };
static FriBidiRun *get_adjacent_run(FriBidiRun *list, fribidi_boolean forward, fribidi_boolean skip_neutral)
{
FriBidiRun *ppp = forward ? list->next_isolate : list->prev_isolate;
if (!ppp)
return &sentinel;
while (ppp)
{
FriBidiCharType ppp_type = RL_TYPE (ppp);
if (ppp_type == FRIBIDI_TYPE_SENTINEL)
break;
/* Note that when sweeping forward we continue one run
beyond the PDI to see what lies behind. When looking
backwards, this is not necessary as the leading isolate
run has already been assigned the resolved level. */
if (ppp->isolate_level > list->isolate_level /* <- How can this be true? */
|| (forward && ppp_type == FRIBIDI_TYPE_PDI)
|| (skip_neutral && !FRIBIDI_IS_STRONG(ppp_type)))
{
ppp = forward ? ppp->next_isolate : ppp->prev_isolate;
if (!ppp)
ppp = &sentinel;
continue;
}
break;
}
return ppp;
}
#ifdef DEBUG
/*======================================================================
* For debugging, define some functions for printing the types and the
* levels.
*----------------------------------------------------------------------*/
static char char_from_level_array[] = {
'$', /* -1 == FRIBIDI_SENTINEL, indicating
* start or end of string. */
/* 0-61 == 0-9,a-z,A-Z are the the only valid levels before resolving
* implicits. after that the level @ may be appear too. */
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D',
'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
'Y', 'Z',
/* TBD - insert another 125-64 levels */
'@', /* 62 == only must appear after resolving
* implicits. */
'!', /* 63 == FRIBIDI_LEVEL_INVALID, internal error,
* this level shouldn't be seen. */
'*', '*', '*', '*', '*' /* >= 64 == overflows, this levels and higher
* levels show a real bug!. */
};
#define fribidi_char_from_level(level) char_from_level_array[(level) + 1]
static void
print_types_re (
const FriBidiRun *pp
)
{
fribidi_assert (pp);
MSG (" Run types : ");
for_run_list (pp, pp)
{
MSG6 ("%d:%d(%s)[%d,%d] ",
pp->pos, pp->len, fribidi_get_bidi_type_name (pp->type), pp->level, pp->isolate_level);
}
MSG ("\n");
}
static void
print_resolved_levels (
const FriBidiRun *pp
)
{
fribidi_assert (pp);
MSG (" Res. levels: ");
for_run_list (pp, pp)
{
register FriBidiStrIndex i;
for (i = RL_LEN (pp); i; i--)
MSG2 ("%c", fribidi_char_from_level (RL_LEVEL (pp)));
}
MSG ("\n");
}
static void
print_resolved_types (
const FriBidiRun *pp
)
{
fribidi_assert (pp);
MSG (" Res. types : ");
for_run_list (pp, pp)
{
FriBidiStrIndex i;
for (i = RL_LEN (pp); i; i--)
MSG2 ("%s ", fribidi_get_bidi_type_name (pp->type));
}
MSG ("\n");
}
static void
print_bidi_string (
/* input */
const FriBidiCharType *bidi_types,
const FriBidiStrIndex len
)
{
register FriBidiStrIndex i;
fribidi_assert (bidi_types);
MSG (" Org. types : ");
for (i = 0; i < len; i++)
MSG2 ("%s ", fribidi_get_bidi_type_name (bidi_types[i]));
MSG ("\n");
}
static void print_pairing_nodes(FriBidiPairingNode *nodes)
{
MSG ("Pairs: ");
while (nodes)
{
MSG3 ("(%d, %d) ", nodes->open->pos, nodes->close->pos);
nodes = nodes->next;
}
MSG ("\n");
}
#endif /* DEBUG */
/*=========================================================================
* define macros for push and pop the status in to / out of the stack
*-------------------------------------------------------------------------*/
/* There are a few little points in pushing into and popping from the status
stack:
1. when the embedding level is not valid (more than
FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL=125), you must reject it, and not to push
into the stack, but when you see a PDF, you must find the matching code,
and if it was pushed in the stack, pop it, it means you must pop if and
only if you have pushed the matching code, the over_pushed var counts the
number of rejected codes so far.
2. there's a more confusing point too, when the embedding level is exactly
FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL-1=124, an LRO, LRE, or LRI is rejected
because the new level would be FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL+1=126, that
is invalid; but an RLO, RLE, or RLI is accepted because the new level is
FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL=125, that is valid, so the rejected codes
may be not continuous in the logical order, in fact there are at most two
continuous intervals of codes, with an RLO, RLE, or RLI between them. To
support this case, the first_interval var counts the number of rejected
codes in the first interval, when it is 0, means that there is only one
interval.
*/
/* a. If this new level would be valid, then this embedding code is valid.
Remember (push) the current embedding level and override status.
Reset current level to this new level, and reset the override status to
new_override.
b. If the new level would not be valid, then this code is invalid. Don't
change the current level or override status.
*/
#define PUSH_STATUS \
FRIBIDI_BEGIN_STMT \
if LIKELY(over_pushed == 0 \
&& isolate_overflow == 0 \
&& new_level <= FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL) \
{ \
if UNLIKELY(level == FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL - 1) \
first_interval = over_pushed; \
status_stack[stack_size].level = level; \
status_stack[stack_size].isolate_level = isolate_level; \
status_stack[stack_size].isolate = isolate; \
status_stack[stack_size].override = override; \
stack_size++; \
level = new_level; \
override = new_override; \
} else if LIKELY(isolate_overflow == 0) \
over_pushed++; \
FRIBIDI_END_STMT
/* If there was a valid matching code, restore (pop) the last remembered
(pushed) embedding level and directional override.
*/
#define POP_STATUS \
FRIBIDI_BEGIN_STMT \
if (stack_size) \
{ \
if UNLIKELY(over_pushed > first_interval) \
over_pushed--; \
else \
{ \
if LIKELY(over_pushed == first_interval) \
first_interval = 0; \
stack_size--; \
level = status_stack[stack_size].level; \
override = status_stack[stack_size].override; \
isolate = status_stack[stack_size].isolate; \
isolate_level = status_stack[stack_size].isolate_level; \
} \
} \
FRIBIDI_END_STMT
/* Return the type of previous run or the SOR, if already at the start of
a level run. */
#define PREV_TYPE_OR_SOR(pp) \
( \
RL_LEVEL(pp->prev) == RL_LEVEL(pp) ? \
RL_TYPE(pp->prev) : \
FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(pp->prev), RL_LEVEL(pp))) \
)
/* Return the type of next run or the EOR, if already at the end of
a level run. */
#define NEXT_TYPE_OR_EOR(pp) \
( \
RL_LEVEL(pp->next) == RL_LEVEL(pp) ? \
RL_TYPE(pp->next) : \
FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(pp->next), RL_LEVEL(pp))) \
)
/* Return the embedding direction of a link. */
#define FRIBIDI_EMBEDDING_DIRECTION(link) \
FRIBIDI_LEVEL_TO_DIR(RL_LEVEL(link))
FRIBIDI_ENTRY FriBidiParType
fribidi_get_par_direction (
/* input */
const FriBidiCharType *bidi_types,
const FriBidiStrIndex len
)
{
register FriBidiStrIndex i;
fribidi_assert (bidi_types);
for (i = 0; i < len; i++)
if (FRIBIDI_IS_LETTER (bidi_types[i]))
return FRIBIDI_IS_RTL (bidi_types[i]) ? FRIBIDI_PAR_RTL :
FRIBIDI_PAR_LTR;
return FRIBIDI_PAR_ON;
}
/* Push a new entry to the pairing linked list */
static FriBidiPairingNode * pairing_nodes_push(FriBidiPairingNode *nodes,
FriBidiRun *open,
FriBidiRun *close)
{
FriBidiPairingNode *node = fribidi_malloc(sizeof(FriBidiPairingNode));
node->open = open;
node->close = close;
node->next = nodes;
nodes = node;
return nodes;
}
/* Sort by merge sort */
static void pairing_nodes_front_back_split(FriBidiPairingNode *source,
/* output */
FriBidiPairingNode **front,
FriBidiPairingNode **back)
{
FriBidiPairingNode *pfast, *pslow;
if (!source || !source->next)
{
*front = source;
*back = NULL;
}
else
{
pslow = source;
pfast = source->next;
while (pfast)
{
pfast= pfast->next;
if (pfast)
{
pfast = pfast->next;
pslow = pslow->next;
}
}
*front = source;
*back = pslow->next;
pslow->next = NULL;
}
}
static FriBidiPairingNode *
pairing_nodes_sorted_merge(FriBidiPairingNode *nodes1,
FriBidiPairingNode *nodes2)
{
FriBidiPairingNode *res = NULL;
if (!nodes1)
return nodes2;
if (!nodes2)
return nodes1;
if (nodes1->open->pos < nodes2->open->pos)
{
res = nodes1;
res->next = pairing_nodes_sorted_merge(nodes1->next, nodes2);
}
else
{
res = nodes2;
res->next = pairing_nodes_sorted_merge(nodes1, nodes2->next);
}
return res;
}
static void sort_pairing_nodes(FriBidiPairingNode **nodes)
{
FriBidiPairingNode *front, *back;
/* 0 or 1 node case */
if (!*nodes || !(*nodes)->next)
return;
pairing_nodes_front_back_split(*nodes, &front, &back);
sort_pairing_nodes(&front);
sort_pairing_nodes(&back);
*nodes = pairing_nodes_sorted_merge(front, back);
}
static void free_pairing_nodes(FriBidiPairingNode *nodes)
{
while (nodes)
{
FriBidiPairingNode *p = nodes;
nodes = nodes->next;
fribidi_free(p);
}
}
FRIBIDI_ENTRY FriBidiLevel
fribidi_get_par_embedding_levels_ex (
/* input */
const FriBidiCharType *bidi_types,
const FriBidiBracketType *bracket_types,
const FriBidiStrIndex len,
/* input and output */
FriBidiParType *pbase_dir,
/* output */
FriBidiLevel *embedding_levels
)
{
FriBidiLevel base_level_per_iso_level[FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL];
FriBidiLevel base_level, max_level = 0;
FriBidiParType base_dir;
FriBidiRun *main_run_list = NULL, *explicits_list = NULL, *pp;
fribidi_boolean status = false;
int max_iso_level = 0;
if UNLIKELY
(!len)
{
status = true;
goto out;
}
DBG ("in fribidi_get_par_embedding_levels");
fribidi_assert (bidi_types);
fribidi_assert (pbase_dir);
fribidi_assert (embedding_levels);
/* Determinate character types */
{
/* Get run-length encoded character types */
main_run_list = run_list_encode_bidi_types (bidi_types, bracket_types, len);
if UNLIKELY
(!main_run_list) goto out;
}
/* Find base level */
/* If no strong base_dir was found, resort to the weak direction
that was passed on input. */
base_level = FRIBIDI_DIR_TO_LEVEL (*pbase_dir);
if (!FRIBIDI_IS_STRONG (*pbase_dir))
/* P2. P3. Search for first strong character and use its direction as
base direction */
{
int valid_isolate_count = 0;
for_run_list (pp, main_run_list)
{
if (RL_TYPE(pp) == FRIBIDI_TYPE_PDI)
{
/* Ignore if there is no matching isolate */
if (valid_isolate_count>0)
valid_isolate_count--;
}
else if (FRIBIDI_IS_ISOLATE(RL_TYPE(pp)))
valid_isolate_count++;
else if (valid_isolate_count==0 && FRIBIDI_IS_LETTER (RL_TYPE (pp)))
{
base_level = FRIBIDI_DIR_TO_LEVEL (RL_TYPE (pp));
*pbase_dir = FRIBIDI_LEVEL_TO_DIR (base_level);
break;
}
}
}
base_dir = FRIBIDI_LEVEL_TO_DIR (base_level);
DBG2 (" base level : %c", fribidi_char_from_level (base_level));
DBG2 (" base dir : %s", fribidi_get_bidi_type_name (base_dir));
base_level_per_iso_level[0] = base_level;
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_types_re (main_run_list);
}
# endif /* DEBUG */
/* Explicit Levels and Directions */
DBG ("explicit levels and directions");
{
FriBidiLevel level, new_level = 0;
int isolate_level = 0;
FriBidiCharType override, new_override;
FriBidiStrIndex i;
int stack_size, over_pushed, first_interval;
int valid_isolate_count = 0;
int isolate_overflow = 0;
int isolate = 0; /* The isolate status flag */
struct
{
FriBidiCharType override; /* only LTR, RTL and ON are valid */
FriBidiLevel level;
int isolate;
int isolate_level;
} status_stack[FRIBIDI_BIDI_MAX_RESOLVED_LEVELS];
FriBidiRun temp_link;
FriBidiRun *run_per_isolate_level[FRIBIDI_BIDI_MAX_RESOLVED_LEVELS];
memset(run_per_isolate_level, 0, sizeof(run_per_isolate_level[0])
* FRIBIDI_BIDI_MAX_RESOLVED_LEVELS);
/* explicits_list is a list like main_run_list, that holds the explicit
codes that are removed from main_run_list, to reinsert them later by
calling the shadow_run_list.
*/
explicits_list = new_run_list ();
if UNLIKELY
(!explicits_list) goto out;
/* X1. Begin by setting the current embedding level to the paragraph
embedding level. Set the directional override status to neutral,
and directional isolate status to false.
Process each character iteratively, applying rules X2 through X8.
Only embedding levels from 0 to 123 are valid in this phase. */
level = base_level;
override = FRIBIDI_TYPE_ON;
/* stack */
stack_size = 0;
over_pushed = 0;
first_interval = 0;
valid_isolate_count = 0;
isolate_overflow = 0;
for_run_list (pp, main_run_list)
{
FriBidiCharType this_type = RL_TYPE (pp);
RL_ISOLATE_LEVEL (pp) = isolate_level;
if (FRIBIDI_IS_EXPLICIT_OR_BN (this_type))
{
if (FRIBIDI_IS_STRONG (this_type))
{ /* LRE, RLE, LRO, RLO */
/* 1. Explicit Embeddings */
/* X2. With each RLE, compute the least greater odd
embedding level. */
/* X3. With each LRE, compute the least greater even
embedding level. */
/* 2. Explicit Overrides */
/* X4. With each RLO, compute the least greater odd
embedding level. */
/* X5. With each LRO, compute the least greater even
embedding level. */
new_override = FRIBIDI_EXPLICIT_TO_OVERRIDE_DIR (this_type);
for (i = RL_LEN (pp); i; i--)
{
new_level =
((level + FRIBIDI_DIR_TO_LEVEL (this_type) + 2) & ~1) -
FRIBIDI_DIR_TO_LEVEL (this_type);
isolate = 0;
PUSH_STATUS;
}
}
else if (this_type == FRIBIDI_TYPE_PDF)
{
/* 3. Terminating Embeddings and overrides */
/* X7. With each PDF, determine the matching embedding or
override code. */
for (i = RL_LEN (pp); i; i--)
{
if (stack_size && status_stack[stack_size-1].isolate != 0)
break;
POP_STATUS;
}
}
/* X9. Remove all RLE, LRE, RLO, LRO, PDF, and BN codes. */
/* Remove element and add it to explicits_list */
RL_LEVEL (pp) = FRIBIDI_SENTINEL;
temp_link.next = pp->next;
move_node_before (pp, explicits_list);
pp = &temp_link;
}
else if (this_type == FRIBIDI_TYPE_PDI)
/* X6a. pop the direction of the stack */
{
for (i = RL_LEN (pp); i; i--)
{
if (isolate_overflow > 0)
{
isolate_overflow--;
RL_LEVEL (pp) = level;
}
else if (valid_isolate_count > 0)
{
/* Pop away all LRE,RLE,LRO, RLO levels
from the stack, as these are implicitly
terminated by the PDI */
while (stack_size && !status_stack[stack_size-1].isolate)
POP_STATUS;
over_pushed = 0; /* The PDI resets the overpushed! */
POP_STATUS;
isolate_level-- ;
valid_isolate_count--;
RL_LEVEL (pp) = level;
RL_ISOLATE_LEVEL (pp) = isolate_level;
}
else
{
/* Ignore isolated PDI's by turning them into ON's */
RL_TYPE (pp) = FRIBIDI_TYPE_ON;
RL_LEVEL (pp) = level;
}
}
}
else if (FRIBIDI_IS_ISOLATE(this_type))
{
/* TBD support RL_LEN > 1 */
new_override = FRIBIDI_TYPE_ON;
isolate = 1;
if (this_type == FRIBIDI_TYPE_LRI)
new_level = level + 2 - (level%2);
else if (this_type == FRIBIDI_TYPE_RLI)
new_level = level + 1 + (level%2);
else if (this_type == FRIBIDI_TYPE_FSI)
{
/* Search for a local strong character until we
meet the corresponding PDI or the end of the
paragraph */
FriBidiRun *fsi_pp;
int isolate_count = 0;
int fsi_base_level = 0;
for_run_list (fsi_pp, pp)
{
if (RL_TYPE(fsi_pp) == FRIBIDI_TYPE_PDI)
{
isolate_count--;
if (valid_isolate_count < 0)
break;
}
else if (FRIBIDI_IS_ISOLATE(RL_TYPE(fsi_pp)))
isolate_count++;
else if (isolate_count==0 && FRIBIDI_IS_LETTER (RL_TYPE (fsi_pp)))
{
fsi_base_level = FRIBIDI_DIR_TO_LEVEL (RL_TYPE (fsi_pp));
break;
}
}
/* Same behavior like RLI and LRI above */
if (FRIBIDI_LEVEL_IS_RTL (fsi_base_level))
new_level = level + 1 + (level%2);
else
new_level = level + 2 - (level%2);
}
RL_LEVEL (pp) = level;
RL_ISOLATE_LEVEL (pp) = isolate_level++;
base_level_per_iso_level[isolate_level] = new_level;
if (!FRIBIDI_IS_NEUTRAL (override))
RL_TYPE (pp) = override;
if (new_level <= FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL)
{
valid_isolate_count++;
PUSH_STATUS;
level = new_level;
}
else
isolate_overflow += 1;
}
else if (this_type == FRIBIDI_TYPE_BS)
{
/* X8. All explicit directional embeddings and overrides are
completely terminated at the end of each paragraph. Paragraph
separators are not included in the embedding. */
break;
}
else
{
/* X6. For all types besides RLE, LRE, RLO, LRO, and PDF:
a. Set the level of the current character to the current
embedding level.
b. Whenever the directional override status is not neutral,
reset the current character type to the directional override
status. */
RL_LEVEL (pp) = level;
if (!FRIBIDI_IS_NEUTRAL (override))
RL_TYPE (pp) = override;
}
}
/* Build the isolate_level connections */
for_run_list (pp, main_run_list)
{
int isolate_level = RL_ISOLATE_LEVEL (pp);
if (run_per_isolate_level[isolate_level])
{
run_per_isolate_level[isolate_level]->next_isolate = pp;
pp->prev_isolate = run_per_isolate_level[isolate_level];
}
run_per_isolate_level[isolate_level] = pp;
}
/* Implementing X8. It has no effect on a single paragraph! */
level = base_level;
override = FRIBIDI_TYPE_ON;
stack_size = 0;
over_pushed = 0;
}
/* X10. The remaining rules are applied to each run of characters at the
same level. For each run, determine the start-of-level-run (sor) and
end-of-level-run (eor) type, either L or R. This depends on the
higher of the two levels on either side of the boundary (at the start
or end of the paragraph, the level of the 'other' run is the base
embedding level). If the higher level is odd, the type is R, otherwise
it is L. */
/* Resolving Implicit Levels can be done out of X10 loop, so only change
of Resolving Weak Types and Resolving Neutral Types is needed. */
compact_list (main_run_list);
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_types_re (main_run_list);
print_bidi_string (bidi_types, len);
print_resolved_levels (main_run_list);
print_resolved_types (main_run_list);
}
# endif /* DEBUG */
/* 4. Resolving weak types. Also calculate the maximum isolate level */
max_iso_level = 0;
DBG ("resolving weak types");
{
int last_strong_stack[FRIBIDI_BIDI_MAX_RESOLVED_LEVELS];
FriBidiCharType prev_type_orig;
fribidi_boolean w4;
last_strong_stack[0] = base_dir;
for_run_list (pp, main_run_list)
{
register FriBidiCharType prev_type, this_type, next_type;
FriBidiRun *ppp_prev, *ppp_next;
int iso_level;
ppp_prev = get_adjacent_run(pp, false, false);
ppp_next = get_adjacent_run(pp, true, false);
this_type = RL_TYPE (pp);
iso_level = RL_ISOLATE_LEVEL(pp);
if (iso_level > max_iso_level)
max_iso_level = iso_level;
if (RL_LEVEL(ppp_prev) == RL_LEVEL(pp))
prev_type = RL_TYPE(ppp_prev);
else
prev_type = FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(ppp_prev), RL_LEVEL(pp)));
if (RL_LEVEL(ppp_next) == RL_LEVEL(pp))
next_type = RL_TYPE(ppp_next);
else
next_type = FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(ppp_next), RL_LEVEL(pp)));
if (FRIBIDI_IS_STRONG (prev_type))
last_strong_stack[iso_level] = prev_type;
/* W1. NSM
Examine each non-spacing mark (NSM) in the level run, and change the
type of the NSM to the type of the previous character. If the NSM
is at the start of the level run, it will get the type of sor. */
/* Implementation note: it is important that if the previous character
is not sor, then we should merge this run with the previous,
because of rules like W5, that we assume all of a sequence of
adjacent ETs are in one FriBidiRun. */
if (this_type == FRIBIDI_TYPE_NSM)
{
/* New rule in Unicode 6.3 */
if (FRIBIDI_IS_ISOLATE (RL_TYPE (pp->prev)))
RL_TYPE(pp) = FRIBIDI_TYPE_ON;
if (RL_LEVEL (ppp_prev) == RL_LEVEL (pp))
{
if (ppp_prev == pp->prev)
pp = merge_with_prev (pp);
}
else
RL_TYPE (pp) = prev_type;
if (prev_type == next_type && RL_LEVEL (pp) == RL_LEVEL (pp->next))
{
if (ppp_next == pp->next)
pp = merge_with_prev (pp->next);
}
continue; /* As we know the next condition cannot be true. */
}
/* W2: European numbers. */
if (this_type == FRIBIDI_TYPE_EN && last_strong_stack[iso_level] == FRIBIDI_TYPE_AL)
{
RL_TYPE (pp) = FRIBIDI_TYPE_AN;
/* Resolving dependency of loops for rules W1 and W2, so we
can merge them in one loop. */
if (next_type == FRIBIDI_TYPE_NSM)
RL_TYPE (ppp_next) = FRIBIDI_TYPE_AN;
}
}
last_strong_stack[0] = base_dir;
/* Resolving dependency of loops for rules W4 and W5, W5 may
want to prevent W4 to take effect in the next turn, do this
through "w4". */
w4 = true;
/* Resolving dependency of loops for rules W4 and W5 with W7,
W7 may change an EN to L but it sets the prev_type_orig if needed,
so W4 and W5 in next turn can still do their works. */
prev_type_orig = FRIBIDI_TYPE_ON;
/* Each isolate level has its own memory of the last strong character */
for_run_list (pp, main_run_list)
{
register FriBidiCharType prev_type, this_type, next_type;
int iso_level;
FriBidiRun *ppp_prev, *ppp_next;
this_type = RL_TYPE (pp);
iso_level = RL_ISOLATE_LEVEL(pp);
ppp_prev = get_adjacent_run(pp, false, false);
ppp_next = get_adjacent_run(pp, true, false);
if (RL_LEVEL(ppp_prev) == RL_LEVEL(pp))
prev_type = RL_TYPE(ppp_prev);
else
prev_type = FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(ppp_prev), RL_LEVEL(pp)));
if (RL_LEVEL(ppp_next) == RL_LEVEL(pp))
next_type = RL_TYPE(ppp_next);
else
next_type = FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(ppp_next), RL_LEVEL(pp)));
if (FRIBIDI_IS_STRONG (prev_type))
last_strong_stack[iso_level] = prev_type;
/* W2 ??? */
/* W3: Change ALs to R. */
if (this_type == FRIBIDI_TYPE_AL)
{
RL_TYPE (pp) = FRIBIDI_TYPE_RTL;
w4 = true;
prev_type_orig = FRIBIDI_TYPE_ON;
continue;
}
/* W4. A single european separator changes to a european number.
A single common separator between two numbers of the same type
changes to that type. */
if (w4
&& RL_LEN (pp) == 1 && FRIBIDI_IS_ES_OR_CS (this_type)
&& FRIBIDI_IS_NUMBER (prev_type_orig)
&& prev_type_orig == next_type
&& (prev_type_orig == FRIBIDI_TYPE_EN
|| this_type == FRIBIDI_TYPE_CS))
{
RL_TYPE (pp) = prev_type;
this_type = RL_TYPE (pp);
}
w4 = true;
/* W5. A sequence of European terminators adjacent to European
numbers changes to All European numbers. */
if (this_type == FRIBIDI_TYPE_ET
&& (prev_type_orig == FRIBIDI_TYPE_EN
|| next_type == FRIBIDI_TYPE_EN))
{
RL_TYPE (pp) = FRIBIDI_TYPE_EN;
w4 = false;
this_type = RL_TYPE (pp);
}
/* W6. Otherwise change separators and terminators to other neutral. */
if (FRIBIDI_IS_NUMBER_SEPARATOR_OR_TERMINATOR (this_type))
RL_TYPE (pp) = FRIBIDI_TYPE_ON;
/* W7. Change european numbers to L. */
if (this_type == FRIBIDI_TYPE_EN && last_strong_stack[iso_level] == FRIBIDI_TYPE_LTR)
{
RL_TYPE (pp) = FRIBIDI_TYPE_LTR;
prev_type_orig = (RL_LEVEL (pp) == RL_LEVEL (pp->next) ?
FRIBIDI_TYPE_EN : FRIBIDI_TYPE_ON);
}
else
prev_type_orig = PREV_TYPE_OR_SOR (pp->next);
}
}
compact_neutrals (main_run_list);
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_resolved_levels (main_run_list);
print_resolved_types (main_run_list);
}
# endif /* DEBUG */
/* 5. Resolving Neutral Types */
DBG ("resolving neutral types - N0");
{
/* BD16 - Build list of all pairs*/
int num_iso_levels = max_iso_level + 1;
FriBidiPairingNode *pairing_nodes = NULL;
FriBidiRun *local_bracket_stack[FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL][LOCAL_BRACKET_SIZE];
FriBidiRun **bracket_stack[FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL];
int bracket_stack_size[FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL];
int last_level = RL_LEVEL(main_run_list);
int last_iso_level = 0;
memset(bracket_stack, 0, sizeof(bracket_stack[0])*num_iso_levels);
memset(bracket_stack_size, 0, sizeof(bracket_stack_size[0])*num_iso_levels);
/* populate the bracket_size. The first LOCAL_BRACKET_SIZE entries
of the stack are one the stack. Allocate the rest of the entries.
*/
{
int iso_level;
for (iso_level=0; iso_level < LOCAL_BRACKET_SIZE; iso_level++)
bracket_stack[iso_level] = local_bracket_stack[iso_level];
for (iso_level=LOCAL_BRACKET_SIZE; iso_level < num_iso_levels; iso_level++)
bracket_stack[iso_level] = fribidi_malloc (sizeof (bracket_stack[0])
* FRIBIDI_BIDI_MAX_NESTED_BRACKET_PAIRS);
}
/* Build the bd16 pair stack. */
for_run_list (pp, main_run_list)
{
int level = RL_LEVEL(pp);
int iso_level = RL_ISOLATE_LEVEL(pp);
FriBidiBracketType brack_prop = RL_BRACKET_TYPE(pp);
/* Interpret the isolating run sequence as such that they
end at a change in the level, unless the iso_level has been
raised. */
if (level != last_level && last_iso_level == iso_level)
bracket_stack_size[last_iso_level] = 0;
if (brack_prop!= FRIBIDI_NO_BRACKET
&& RL_TYPE(pp)==FRIBIDI_TYPE_ON)
{
if (FRIBIDI_IS_BRACKET_OPEN(brack_prop))
{
if (bracket_stack_size[iso_level]==FRIBIDI_BIDI_MAX_NESTED_BRACKET_PAIRS)
break;
/* push onto the pair stack */
bracket_stack[iso_level][bracket_stack_size[iso_level]++] = pp;
}
else
{
int stack_idx = bracket_stack_size[iso_level] - 1;
while (stack_idx >= 0)
{
FriBidiBracketType se_brack_prop = RL_BRACKET_TYPE(bracket_stack[iso_level][stack_idx]);
if (FRIBIDI_BRACKET_ID(se_brack_prop) == FRIBIDI_BRACKET_ID(brack_prop))
{
bracket_stack_size[iso_level] = stack_idx;
pairing_nodes = pairing_nodes_push(pairing_nodes,
bracket_stack[iso_level][stack_idx],
pp);
break;
}
stack_idx--;
}
}
}
last_level = level;
last_iso_level = iso_level;
}
/* The list must now be sorted for the next algo to work! */
sort_pairing_nodes(&pairing_nodes);
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_pairing_nodes (pairing_nodes);
}
# endif /* DEBUG */
/* Start the N0 */
{
FriBidiPairingNode *ppairs = pairing_nodes;
while (ppairs)
{
int iso_level = ppairs->open->isolate_level;
int embedding_level = base_level_per_iso_level[iso_level];
/* Find matching strong. */
fribidi_boolean found = false;
FriBidiRun *ppn;
for (ppn = ppairs->open; ppn!= ppairs->close; ppn = ppn->next)
{
FriBidiCharType this_type = RL_TYPE_AN_EN_AS_RTL(ppn);
/* Calculate level like in resolve implicit levels below to prevent
embedded levels not to match the base_level */
int this_level = RL_LEVEL (ppn) +
(FRIBIDI_LEVEL_IS_RTL (RL_LEVEL(ppn)) ^ FRIBIDI_DIR_TO_LEVEL (this_type));
/* N0b */
if (FRIBIDI_IS_STRONG (this_type) && this_level == embedding_level)
{
RL_TYPE(ppairs->open) = RL_TYPE(ppairs->close) = this_level%2 ? FRIBIDI_TYPE_RTL : FRIBIDI_TYPE_LTR;
found = true;
break;
}
}
/* N0c */
/* Search for any strong type preceding and within the bracket pair */
if (!found)
{
/* Search for a preceding strong */
int prec_strong_level = embedding_level; /* TBDov! Extract from Isolate level in effect */
int iso_level = RL_ISOLATE_LEVEL(ppairs->open);
for (ppn = ppairs->open->prev; ppn->type != FRIBIDI_TYPE_SENTINEL; ppn=ppn->prev)
{
FriBidiCharType this_type = RL_TYPE_AN_EN_AS_RTL(ppn);
if (FRIBIDI_IS_STRONG (this_type) && RL_ISOLATE_LEVEL(ppn) == iso_level)
{
prec_strong_level = RL_LEVEL (ppn) +
(FRIBIDI_LEVEL_IS_RTL (RL_LEVEL(ppn)) ^ FRIBIDI_DIR_TO_LEVEL (this_type));
break;
}
}
for (ppn = ppairs->open; ppn!= ppairs->close; ppn = ppn->next)
{
FriBidiCharType this_type = RL_TYPE_AN_EN_AS_RTL(ppn);
if (FRIBIDI_IS_STRONG (this_type) && RL_ISOLATE_LEVEL(ppn) == iso_level)
{
/* By constraint this is opposite the embedding direction,
since we did not match the N0b rule. We must now
compare with the preceding strong to establish whether
to apply N0c1 (opposite) or N0c2 embedding */
RL_TYPE(ppairs->open) = RL_TYPE(ppairs->close) = prec_strong_level % 2 ? FRIBIDI_TYPE_RTL : FRIBIDI_TYPE_LTR;
RL_LEVEL(ppairs->open) = RL_LEVEL(ppairs->close) = prec_strong_level;
found = true;
break;
}
}
}
ppairs = ppairs->next;
}
free_pairing_nodes(pairing_nodes);
if (num_iso_levels >= LOCAL_BRACKET_SIZE)
{
int i;
/* Only need to free the non static members */
for (i=LOCAL_BRACKET_SIZE; i<num_iso_levels; i++)
fribidi_free(bracket_stack[i]);
}
/* Remove the bracket property and re-compact */
{
const FriBidiBracketType NoBracket = FRIBIDI_NO_BRACKET;
for_run_list (pp, main_run_list)
pp->bracket_type = NoBracket;
compact_neutrals (main_run_list);
}
}
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_resolved_levels (main_run_list);
print_resolved_types (main_run_list);
}
# endif /* DEBUG */
}
DBG ("resolving neutral types - N1+N2");
{
for_run_list (pp, main_run_list)
{
FriBidiCharType prev_type, this_type, next_type;
FriBidiRun *ppp_prev, *ppp_next;
ppp_prev = get_adjacent_run(pp, false, false);
ppp_next = get_adjacent_run(pp, true, false);
/* "European and Arabic numbers are treated as though they were R"
FRIBIDI_CHANGE_NUMBER_TO_RTL does this. */
this_type = FRIBIDI_CHANGE_NUMBER_TO_RTL (RL_TYPE (pp));
if (RL_LEVEL(ppp_prev) == RL_LEVEL(pp))
prev_type = FRIBIDI_CHANGE_NUMBER_TO_RTL (RL_TYPE(ppp_prev));
else
prev_type = FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(ppp_prev), RL_LEVEL(pp)));
if (RL_LEVEL(ppp_next) == RL_LEVEL(pp))
next_type = FRIBIDI_CHANGE_NUMBER_TO_RTL (RL_TYPE(ppp_next));
else
next_type = FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(ppp_next), RL_LEVEL(pp)));
if (FRIBIDI_IS_NEUTRAL (this_type))
RL_TYPE (pp) = (prev_type == next_type) ?
/* N1. */ prev_type :
/* N2. */ FRIBIDI_EMBEDDING_DIRECTION (pp);
}
}
compact_list (main_run_list);
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_resolved_levels (main_run_list);
print_resolved_types (main_run_list);
}
# endif /* DEBUG */
/* 6. Resolving implicit levels */
DBG ("resolving implicit levels");
{
max_level = base_level;
for_run_list (pp, main_run_list)
{
FriBidiCharType this_type;
int level;
this_type = RL_TYPE (pp);
level = RL_LEVEL (pp);
/* I1. Even */
/* I2. Odd */
if (FRIBIDI_IS_NUMBER (this_type))
RL_LEVEL (pp) = (level + 2) & ~1;
else
RL_LEVEL (pp) =
level +
(FRIBIDI_LEVEL_IS_RTL (level) ^ FRIBIDI_DIR_TO_LEVEL (this_type));
if (RL_LEVEL (pp) > max_level)
max_level = RL_LEVEL (pp);
}
}
compact_list (main_run_list);
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_bidi_string (bidi_types, len);
print_resolved_levels (main_run_list);
print_resolved_types (main_run_list);
}
# endif /* DEBUG */
/* Reinsert the explicit codes & BN's that are already removed, from the
explicits_list to main_run_list. */
DBG ("reinserting explicit codes");
if UNLIKELY
(explicits_list->next != explicits_list)
{
register FriBidiRun *p;
register fribidi_boolean stat =
shadow_run_list (main_run_list, explicits_list, true);
explicits_list = NULL;
if UNLIKELY
(!stat) goto out;
/* Set level of inserted explicit chars to that of their previous
* char, such that they do not affect reordering. */
p = main_run_list->next;
if (p != main_run_list && p->level == FRIBIDI_SENTINEL)
p->level = base_level;
for_run_list (p, main_run_list) if (p->level == FRIBIDI_SENTINEL)
p->level = p->prev->level;
}
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_types_re (main_run_list);
print_resolved_levels (main_run_list);
print_resolved_types (main_run_list);
}
# endif /* DEBUG */
DBG ("reset the embedding levels, 1, 2, 3.");
{
register int j, state, pos;
register FriBidiCharType char_type;
register FriBidiRun *p, *q, *list;
/* L1. Reset the embedding levels of some chars:
1. segment separators,
2. paragraph separators,
3. any sequence of whitespace characters preceding a segment
separator or paragraph separator, and
4. any sequence of whitespace characters and/or isolate formatting
characters at the end of the line.
... (to be continued in fribidi_reorder_line()). */
list = new_run_list ();
if UNLIKELY
(!list) goto out;
q = list;
state = 1;
pos = len - 1;
for (j = len - 1; j >= -1; j--)
{
/* close up the open link at the end */
if (j >= 0)
char_type = bidi_types[j];
else
char_type = FRIBIDI_TYPE_ON;
if (!state && FRIBIDI_IS_SEPARATOR (char_type))
{
state = 1;
pos = j;
}
else if (state &&
!(FRIBIDI_IS_EXPLICIT_OR_SEPARATOR_OR_BN_OR_WS(char_type)
|| FRIBIDI_IS_ISOLATE(char_type)))
{
state = 0;
p = new_run ();
if UNLIKELY
(!p)
{
free_run_list (list);
goto out;
}
p->pos = j + 1;
p->len = pos - j;
p->type = base_dir;
p->level = base_level;
move_node_before (p, q);
q = p;
}
}
if UNLIKELY
(!shadow_run_list (main_run_list, list, false)) goto out;
}
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_types_re (main_run_list);
print_resolved_levels (main_run_list);
print_resolved_types (main_run_list);
}
# endif /* DEBUG */
{
FriBidiStrIndex pos = 0;
for_run_list (pp, main_run_list)
{
register FriBidiStrIndex l;
register FriBidiLevel level = pp->level;
for (l = pp->len; l; l--)
embedding_levels[pos++] = level;
}
}
status = true;
out:
DBG ("leaving fribidi_get_par_embedding_levels");
if (main_run_list)
free_run_list (main_run_list);
if UNLIKELY
(explicits_list) free_run_list (explicits_list);
return status ? max_level + 1 : 0;
}
static void
bidi_string_reverse (
FriBidiChar *str,
const FriBidiStrIndex len
)
{
FriBidiStrIndex i;
fribidi_assert (str);
for (i = 0; i < len / 2; i++)
{
FriBidiChar tmp = str[i];
str[i] = str[len - 1 - i];
str[len - 1 - i] = tmp;
}
}
static void
index_array_reverse (
FriBidiStrIndex *arr,
const FriBidiStrIndex len
)
{
FriBidiStrIndex i;
fribidi_assert (arr);
for (i = 0; i < len / 2; i++)
{
FriBidiStrIndex tmp = arr[i];
arr[i] = arr[len - 1 - i];
arr[len - 1 - i] = tmp;
}
}
FRIBIDI_ENTRY FriBidiLevel
fribidi_reorder_line (
/* input */
FriBidiFlags flags, /* reorder flags */
const FriBidiCharType *bidi_types,
const FriBidiStrIndex len,
const FriBidiStrIndex off,
const FriBidiParType base_dir,
/* input and output */
FriBidiLevel *embedding_levels,
FriBidiChar *visual_str,
/* output */
FriBidiStrIndex *map
)
{
fribidi_boolean status = false;
FriBidiLevel max_level = 0;
if UNLIKELY
(len == 0)
{
status = true;
goto out;
}
DBG ("in fribidi_reorder_line");
fribidi_assert (bidi_types);
fribidi_assert (embedding_levels);
DBG ("reset the embedding levels, 4. whitespace at the end of line");
{
register FriBidiStrIndex i;
/* L1. Reset the embedding levels of some chars:
4. any sequence of white space characters at the end of the line. */
for (i = off + len - 1; i >= off &&
FRIBIDI_IS_EXPLICIT_OR_BN_OR_WS (bidi_types[i]); i--)
embedding_levels[i] = FRIBIDI_DIR_TO_LEVEL (base_dir);
}
/* 7. Reordering resolved levels */
{
register FriBidiLevel level;
register FriBidiStrIndex i;
/* Reorder both the outstring and the order array */
{
if (FRIBIDI_TEST_BITS (flags, FRIBIDI_FLAG_REORDER_NSM))
{
/* L3. Reorder NSMs. */
for (i = off + len - 1; i >= off; i--)
if (FRIBIDI_LEVEL_IS_RTL (embedding_levels[i])
&& bidi_types[i] == FRIBIDI_TYPE_NSM)
{
register FriBidiStrIndex seq_end = i;
level = embedding_levels[i];
for (i--; i >= off &&
FRIBIDI_IS_EXPLICIT_OR_BN_OR_NSM (bidi_types[i])
&& embedding_levels[i] == level; i--)
;
if (i < off || embedding_levels[i] != level)
{
i++;
DBG ("warning: NSM(s) at the beginning of level run");
}
if (visual_str)
{
bidi_string_reverse (visual_str + i, seq_end - i + 1);
}
if (map)
{
index_array_reverse (map + i, seq_end - i + 1);
}
}
}
/* Find max_level of the line. We don't reuse the paragraph
* max_level, both for a cleaner API, and that the line max_level
* may be far less than paragraph max_level. */
for (i = off + len - 1; i >= off; i--)
if (embedding_levels[i] > max_level)
max_level = embedding_levels[i];
/* L2. Reorder. */
for (level = max_level; level > 0; level--)
for (i = off + len - 1; i >= off; i--)
if (embedding_levels[i] >= level)
{
/* Find all stretches that are >= level_idx */
register FriBidiStrIndex seq_end = i;
for (i--; i >= off && embedding_levels[i] >= level; i--)
;
if (visual_str)
bidi_string_reverse (visual_str + i + 1, seq_end - i);
if (map)
index_array_reverse (map + i + 1, seq_end - i);
}
}
}
status = true;
out:
return status ? max_level + 1 : 0;
}
/* Editor directions:
* vim:textwidth=78:tabstop=8:shiftwidth=2:autoindent:cindent
*/
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_1199_0 |
crossvul-cpp_data_bad_3927_2 | /*!
* \file lr1110-se.c
*
* \brief LR1110 Secure Element hardware implementation
*
* \copyright Revised BSD License, see section \ref LICENSE.
*
* \code
* ______ _
* / _____) _ | |
* ( (____ _____ ____ _| |_ _____ ____| |__
* \____ \| ___ | (_ _) ___ |/ ___) _ \
* _____) ) ____| | | || |_| ____( (___| | | |
* (______/|_____)_|_|_| \__)_____)\____)_| |_|
* (C)2019-2019 Semtech
*
* \endcode
*
* \authors Semtech WSP Applications Team
*/
#include <stdlib.h>
#include <stdint.h>
#include "lr1110.h"
#include "lr1110_system.h"
#include "lr1110_crypto_engine.h"
#include "secure-element.h"
#include "se-identity.h"
#include "lr1110-se-hal.h"
/*!
* Number of supported crypto keys
*/
#define NUM_OF_KEYS 23
/*
* CMAC/AES Message Integrity Code (MIC) Block B0 size
*/
#define MIC_BLOCK_BX_SIZE 16
/*
* Maximum size of the message that can be handled by the crypto operations
*/
#define CRYPTO_MAXMESSAGE_SIZE 256
/*
* Maximum size of the buffer for crypto operations
*/
#define CRYPTO_BUFFER_SIZE CRYPTO_MAXMESSAGE_SIZE + MIC_BLOCK_BX_SIZE
/*!
* Secure-element LoRaWAN identity local storage.
*/
typedef struct sSecureElementNvCtx
{
/*
* DevEUI storage
*/
uint8_t DevEui[SE_EUI_SIZE];
/*
* Join EUI storage
*/
uint8_t JoinEui[SE_EUI_SIZE];
/*
* PIN of the LR1110
*/
uint8_t Pin[SE_PIN_SIZE];
} SecureElementNvCtx_t;
static SecureElementNvCtx_t SeContext = {
/*!
* end-device IEEE EUI (big endian)
*
* \remark In this application the value is automatically generated by calling
* BoardGetUniqueId function
*/
.DevEui = LORAWAN_DEVICE_EUI,
/*!
* App/Join server IEEE EUI (big endian)
*/
.JoinEui = LORAWAN_JOIN_EUI,
/*!
* Secure-element pin (big endian)
*/
.Pin = SECURE_ELEMENT_PIN,
};
static SecureElementNvmEvent SeNvmCtxChanged;
/*!
* LR1110 radio context
*/
extern lr1110_t LR1110;
/*!
* Converts key ids from SecureElement to LR1110
*
* \param [IN] key_id SecureElement key id to be converted
*
* \retval key_id Converted LR1110 key id
*/
static lr1110_crypto_keys_idx_t convert_key_id_from_se_to_lr1110( KeyIdentifier_t key_id );
/*!
* Dummy callback in case if the user provides NULL function pointer
*/
static void DummyCB( void )
{
return;
}
SecureElementStatus_t SecureElementInit( SecureElementNvmEvent seNvmCtxChanged )
{
lr1110_crypto_status_t status = LR1110_CRYPTO_STATUS_ERROR;
// Assign callback
if( seNvmCtxChanged != 0 )
{
SeNvmCtxChanged = seNvmCtxChanged;
}
else
{
SeNvmCtxChanged = DummyCB;
}
lr1110_crypto_restore_from_flash( &LR1110, &status );
#if defined( SECURE_ELEMENT_PRE_PROVISIONED )
// Read LR1110 pre-provisioned identity
lr1110_system_read_uid( &LR1110, SeContext.DevEui );
lr1110_system_read_join_eui( &LR1110, SeContext.JoinEui );
lr1110_system_read_pin( &LR1110, SeContext.Pin );
#else
#if( STATIC_DEVICE_EUI == 0 )
// Get a DevEUI from MCU unique ID
LR1110SeHalGetUniqueId( SeContext.DevEui );
#endif
#endif
SeNvmCtxChanged( );
return ( SecureElementStatus_t ) status;
}
SecureElementStatus_t SecureElementRestoreNvmCtx( void* seNvmCtx )
{
lr1110_crypto_status_t status = LR1110_CRYPTO_STATUS_ERROR;
if( seNvmCtx == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
// Restore lr1110 crypto context
lr1110_crypto_restore_from_flash( &LR1110, &status );
// Restore nvm context
memcpy1( ( uint8_t* ) &SeContext, ( uint8_t* ) seNvmCtx, sizeof( SeContext ) );
return ( SecureElementStatus_t ) status;
}
void* SecureElementGetNvmCtx( size_t* seNvmCtxSize )
{
*seNvmCtxSize = sizeof( SeContext );
return &SeContext;
}
SecureElementStatus_t SecureElementSetKey( KeyIdentifier_t keyID, uint8_t* key )
{
if( key == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
SecureElementStatus_t status = SECURE_ELEMENT_ERROR;
if( ( keyID == MC_KEY_0 ) || ( keyID == MC_KEY_1 ) || ( keyID == MC_KEY_2 ) || ( keyID == MC_KEY_3 ) )
{ // Decrypt the key if its a Mckey
lr1110_crypto_derive_and_store_key( &LR1110, ( lr1110_crypto_status_t* ) &status,
convert_key_id_from_se_to_lr1110( MC_KE_KEY ),
convert_key_id_from_se_to_lr1110( keyID ), key );
if( status == SECURE_ELEMENT_SUCCESS )
{
lr1110_crypto_store_to_flash( &LR1110, ( lr1110_crypto_status_t* ) &status );
}
return status;
}
else
{
lr1110_crypto_set_key( &LR1110, ( lr1110_crypto_status_t* ) &status, convert_key_id_from_se_to_lr1110( keyID ),
key );
if( status == SECURE_ELEMENT_SUCCESS )
{
lr1110_crypto_store_to_flash( &LR1110, ( lr1110_crypto_status_t* ) &status );
}
return status;
}
}
SecureElementStatus_t SecureElementComputeAesCmac( uint8_t* micBxBuffer, uint8_t* buffer, uint16_t size,
KeyIdentifier_t keyID, uint32_t* cmac )
{
SecureElementStatus_t status = SECURE_ELEMENT_ERROR;
uint16_t localSize = size;
uint8_t* localbuffer = buffer;
if( micBxBuffer != NULL )
{
uint8_t micBuff[CRYPTO_BUFFER_SIZE];
memset1( micBuff, 0, CRYPTO_BUFFER_SIZE );
memcpy1( micBuff, micBxBuffer, MIC_BLOCK_BX_SIZE );
memcpy1( ( micBuff + MIC_BLOCK_BX_SIZE ), buffer, size );
localSize += MIC_BLOCK_BX_SIZE;
localbuffer = micBuff;
}
lr1110_crypto_compute_aes_cmac( &LR1110, ( lr1110_crypto_status_t* ) &status,
convert_key_id_from_se_to_lr1110( keyID ), localbuffer, localSize,
( uint8_t* ) cmac );
return status;
}
SecureElementStatus_t SecureElementVerifyAesCmac( uint8_t* buffer, uint16_t size, uint32_t expectedCmac,
KeyIdentifier_t keyID )
{
SecureElementStatus_t status = SECURE_ELEMENT_ERROR;
if( buffer == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
lr1110_crypto_verify_aes_cmac( &LR1110, ( lr1110_crypto_status_t* ) &status,
convert_key_id_from_se_to_lr1110( keyID ), buffer, size,
( uint8_t* ) &expectedCmac );
return status;
}
SecureElementStatus_t SecureElementAesEncrypt( uint8_t* buffer, uint16_t size, KeyIdentifier_t keyID,
uint8_t* encBuffer )
{
SecureElementStatus_t status = SECURE_ELEMENT_ERROR;
if( ( buffer == NULL ) || ( encBuffer == NULL ) )
{
return SECURE_ELEMENT_ERROR_NPE;
}
lr1110_crypto_aes_encrypt_01( &LR1110, ( lr1110_crypto_status_t* ) &status,
convert_key_id_from_se_to_lr1110( keyID ), buffer, size, encBuffer );
return status;
}
SecureElementStatus_t SecureElementDeriveAndStoreKey( Version_t version, uint8_t* input, KeyIdentifier_t rootKeyID,
KeyIdentifier_t targetKeyID )
{
SecureElementStatus_t status = SECURE_ELEMENT_ERROR;
if( input == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
lr1110_crypto_derive_and_store_key( &LR1110, ( lr1110_crypto_status_t* ) &status,
convert_key_id_from_se_to_lr1110( rootKeyID ),
convert_key_id_from_se_to_lr1110( targetKeyID ), input );
lr1110_crypto_store_to_flash( &LR1110, ( lr1110_crypto_status_t* ) &status );
return status;
}
SecureElementStatus_t SecureElementProcessJoinAccept( JoinReqIdentifier_t joinReqType, uint8_t* joinEui,
uint16_t devNonce, uint8_t* encJoinAccept,
uint8_t encJoinAcceptSize, uint8_t* decJoinAccept,
uint8_t* versionMinor )
{
SecureElementStatus_t status = SECURE_ELEMENT_ERROR;
if( ( encJoinAccept == NULL ) || ( decJoinAccept == NULL ) || ( versionMinor == NULL ) )
{
return SECURE_ELEMENT_ERROR_NPE;
}
// Determine decryption key
KeyIdentifier_t encKeyID = NWK_KEY;
if( joinReqType != JOIN_REQ )
{
encKeyID = J_S_ENC_KEY;
}
// - Header buffer to be used for MIC computation
// - LoRaWAN 1.0.x : micHeader = [MHDR(1)]
// - LoRaWAN 1.1.x : micHeader = [JoinReqType(1), JoinEUI(8), DevNonce(2), MHDR(1)]
// Try first to process LoRaWAN 1.0.x JoinAccept
uint8_t micHeader10[1] = { 0x20 };
// cmac = aes128_cmac(NwkKey, MHDR | JoinNonce | NetID | DevAddr | DLSettings | RxDelay | CFList |
// CFListType)
lr1110_crypto_process_join_accept(
&LR1110, ( lr1110_crypto_status_t* ) &status, convert_key_id_from_se_to_lr1110( encKeyID ),
convert_key_id_from_se_to_lr1110( NWK_KEY ), ( lr1110_crypto_lorawan_version_t ) 0, micHeader10,
encJoinAccept + 1, encJoinAcceptSize - 1, decJoinAccept + 1 );
if( status == SECURE_ELEMENT_SUCCESS )
{
*versionMinor = ( ( decJoinAccept[11] & 0x80 ) == 0x80 ) ? 1 : 0;
if( *versionMinor == 0 )
{
// Network server is operating according to LoRaWAN 1.0.x
return SECURE_ELEMENT_SUCCESS;
}
}
#if( USE_LRWAN_1_1_X_CRYPTO == 1 )
// 1.0.x trial failed. Trying to process LoRaWAN 1.1.x JoinAccept
uint8_t micHeader11[JOIN_ACCEPT_MIC_COMPUTATION_OFFSET] = { 0 };
uint16_t bufItr = 0;
// cmac = aes128_cmac(JSIntKey, JoinReqType | JoinEUI | DevNonce | MHDR | JoinNonce | NetID | DevAddr |
// DLSettings | RxDelay | CFList | CFListType)
micHeader11[bufItr++] = ( uint8_t ) joinReqType;
memcpyr( micHeader11 + bufItr, joinEui, LORAMAC_JOIN_EUI_FIELD_SIZE );
bufItr += LORAMAC_JOIN_EUI_FIELD_SIZE;
micHeader11[bufItr++] = devNonce & 0xFF;
micHeader11[bufItr++] = ( devNonce >> 8 ) & 0xFF;
micHeader11[bufItr++] = 0x20;
lr1110_crypto_process_join_accept(
&LR1110, ( lr1110_crypto_status_t* ) &status, convert_key_id_from_se_to_lr1110( encKeyID ),
convert_key_id_from_se_to_lr1110( J_S_INT_KEY ), ( lr1110_crypto_lorawan_version_t ) 1, micHeader11,
encJoinAccept + 1, encJoinAcceptSize - 1, decJoinAccept + 1 );
if( status == SECURE_ELEMENT_SUCCESS )
{
*versionMinor = ( ( decJoinAccept[11] & 0x80 ) == 0x80 ) ? 1 : 0;
if( *versionMinor == 1 )
{
// Network server is operating according to LoRaWAN 1.1.x
return SECURE_ELEMENT_SUCCESS;
}
}
#endif
return status;
}
SecureElementStatus_t SecureElementRandomNumber( uint32_t* randomNum )
{
if( randomNum == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
*randomNum = LR1110SeHalGetRandomNumber( );
return SECURE_ELEMENT_SUCCESS;
}
SecureElementStatus_t SecureElementSetDevEui( uint8_t* devEui )
{
if( devEui == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
memcpy1( SeContext.DevEui, devEui, SE_EUI_SIZE );
SeNvmCtxChanged( );
return SECURE_ELEMENT_SUCCESS;
}
uint8_t* SecureElementGetDevEui( void )
{
return SeContext.DevEui;
}
SecureElementStatus_t SecureElementSetJoinEui( uint8_t* joinEui )
{
if( joinEui == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
memcpy1( SeContext.JoinEui, joinEui, SE_EUI_SIZE );
SeNvmCtxChanged( );
return SECURE_ELEMENT_SUCCESS;
}
uint8_t* SecureElementGetJoinEui( void )
{
return SeContext.JoinEui;
}
SecureElementStatus_t SecureElementSetPin( uint8_t* pin )
{
if( pin == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
memcpy1( SeContext.Pin, pin, SE_PIN_SIZE );
SeNvmCtxChanged( );
return SECURE_ELEMENT_SUCCESS;
}
uint8_t* SecureElementGetPin( void )
{
return SeContext.Pin;
}
static lr1110_crypto_keys_idx_t convert_key_id_from_se_to_lr1110( KeyIdentifier_t key_id )
{
lr1110_crypto_keys_idx_t id = LR1110_CRYPTO_KEYS_IDX_GP0;
switch( key_id )
{
case APP_KEY:
id = LR1110_CRYPTO_KEYS_IDX_APP_KEY;
break;
case NWK_KEY:
id = LR1110_CRYPTO_KEYS_IDX_NWK_KEY;
break;
case J_S_INT_KEY:
id = LR1110_CRYPTO_KEYS_IDX_J_S_INT_KEY;
break;
case J_S_ENC_KEY:
id = LR1110_CRYPTO_KEYS_IDX_J_S_ENC_KEY;
break;
case F_NWK_S_INT_KEY:
id = LR1110_CRYPTO_KEYS_IDX_F_NWK_S_INT_KEY;
break;
case S_NWK_S_INT_KEY:
id = LR1110_CRYPTO_KEYS_IDX_S_NWK_S_INT_KEY;
break;
case NWK_S_ENC_KEY:
id = LR1110_CRYPTO_KEYS_IDX_NWK_S_ENC_KEY;
break;
case APP_S_KEY:
id = LR1110_CRYPTO_KEYS_IDX_APP_S_KEY;
break;
case MC_ROOT_KEY:
id = LR1110_CRYPTO_KEYS_IDX_GP_KE_KEY_5;
break;
case MC_KE_KEY:
id = LR1110_CRYPTO_KEYS_IDX_GP_KE_KEY_4;
break;
case MC_KEY_0:
id = LR1110_CRYPTO_KEYS_IDX_GP_KE_KEY_0;
break;
case MC_APP_S_KEY_0:
id = LR1110_CRYPTO_KEYS_IDX_MC_APP_S_KEY_0;
break;
case MC_NWK_S_KEY_0:
id = LR1110_CRYPTO_KEYS_IDX_MC_NWK_S_KEY_0;
break;
case MC_KEY_1:
id = LR1110_CRYPTO_KEYS_IDX_GP_KE_KEY_1;
break;
case MC_APP_S_KEY_1:
id = LR1110_CRYPTO_KEYS_IDX_MC_APP_S_KEY_1;
break;
case MC_NWK_S_KEY_1:
id = LR1110_CRYPTO_KEYS_IDX_MC_NWK_S_KEY_1;
break;
case MC_KEY_2:
id = LR1110_CRYPTO_KEYS_IDX_GP_KE_KEY_2;
break;
case MC_APP_S_KEY_2:
id = LR1110_CRYPTO_KEYS_IDX_MC_APP_S_KEY_2;
break;
case MC_NWK_S_KEY_2:
id = LR1110_CRYPTO_KEYS_IDX_MC_NWK_S_KEY_2;
break;
case MC_KEY_3:
id = LR1110_CRYPTO_KEYS_IDX_GP_KE_KEY_3;
break;
case MC_APP_S_KEY_3:
id = LR1110_CRYPTO_KEYS_IDX_MC_APP_S_KEY_3;
break;
case MC_NWK_S_KEY_3:
id = LR1110_CRYPTO_KEYS_IDX_MC_NWK_S_KEY_3;
break;
case SLOT_RAND_ZERO_KEY:
id = LR1110_CRYPTO_KEYS_IDX_GP0;
break;
default:
id = LR1110_CRYPTO_KEYS_IDX_GP1;
break;
}
return id;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_3927_2 |
crossvul-cpp_data_good_4424_0 | /*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright 2020, Joyent, Inc.
*/
#include <syslog.h>
#include <dlfcn.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <stdlib.h>
#include <strings.h>
#include <malloc.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <security/pam_appl.h>
#include <security/pam_modules.h>
#include <sys/mman.h>
#include <libintl.h>
#include "pam_impl.h"
static char *pam_snames [PAM_NUM_MODULE_TYPES] = {
PAM_ACCOUNT_NAME,
PAM_AUTH_NAME,
PAM_PASSWORD_NAME,
PAM_SESSION_NAME
};
static char *pam_inames [PAM_MAX_ITEMS] = {
/* NONE */ NULL,
/* PAM_SERVICE */ "service",
/* PAM_USER */ "user",
/* PAM_TTY */ "tty",
/* PAM_RHOST */ "rhost",
/* PAM_CONV */ "conv",
/* PAM_AUTHTOK */ "authtok",
/* PAM_OLDAUTHTOK */ "oldauthtok",
/* PAM_RUSER */ "ruser",
/* PAM_USER_PROMPT */ "user_prompt",
/* PAM_REPOSITORY */ "repository",
/* PAM_RESOURCE */ "resource",
/* PAM_AUSER */ "auser",
/* Undefined Items */
};
/*
* This extra definition is needed in order to build this library
* on pre-64-bit-aware systems.
*/
#if !defined(_LFS64_LARGEFILE)
#define stat64 stat
#endif /* !defined(_LFS64_LARGEFILE) */
/* functions to dynamically load modules */
static int load_modules(pam_handle_t *, int, char *, pamtab_t *);
static void *open_module(pam_handle_t *, char *);
static int load_function(void *, char *, int (**func)());
/* functions to read and store the pam.conf configuration file */
static int open_pam_conf(struct pam_fh **, pam_handle_t *, char *);
static void close_pam_conf(struct pam_fh *);
static int read_pam_conf(pam_handle_t *, char *);
static int get_pam_conf_entry(struct pam_fh *, pam_handle_t *,
pamtab_t **);
static char *read_next_token(char **);
static char *nextline(struct pam_fh *, pam_handle_t *, int *);
static int verify_pam_conf(pamtab_t *, char *);
/* functions to clean up and free memory */
static void clean_up(pam_handle_t *);
static void free_pamconf(pamtab_t *);
static void free_pam_conf_info(pam_handle_t *);
static void free_env(env_list *);
/* convenience functions for I18N/L10N communication */
static void free_resp(int, struct pam_response *);
static int do_conv(pam_handle_t *, int, int,
char messages[PAM_MAX_NUM_MSG][PAM_MAX_MSG_SIZE], void *,
struct pam_response **);
static int log_priority; /* pam_trace syslog priority & facility */
static int pam_debug = 0;
static char *
pam_trace_iname(int item_type, char *iname_buf)
{
char *name;
if (item_type <= 0 ||
item_type >= PAM_MAX_ITEMS ||
(name = pam_inames[item_type]) == NULL) {
(void) sprintf(iname_buf, "%d", item_type);
return (iname_buf);
}
return (name);
}
static char *
pam_trace_fname(int flag)
{
if (flag & PAM_BINDING)
return (PAM_BINDING_NAME);
if (flag & PAM_INCLUDE)
return (PAM_INCLUDE_NAME);
if (flag & PAM_OPTIONAL)
return (PAM_OPTIONAL_NAME);
if (flag & PAM_REQUIRED)
return (PAM_REQUIRED_NAME);
if (flag & PAM_REQUISITE)
return (PAM_REQUISITE_NAME);
if (flag & PAM_SUFFICIENT)
return (PAM_SUFFICIENT_NAME);
return ("bad flag name");
}
static char *
pam_trace_cname(pam_handle_t *pamh)
{
if (pamh->pam_conf_name[pamh->include_depth] == NULL)
return ("NULL");
return (pamh->pam_conf_name[pamh->include_depth]);
}
#include <deflt.h>
#include <stdarg.h>
/*
* pam_settrace - setup configuration for pam tracing
*
* turn on PAM debug if "magic" file exists
* if exists (original), pam_debug = PAM_DEBUG_DEFAULT,
* log_priority = LOG_DEBUG(7) and log_facility = LOG_AUTH(4).
*
* if has contents, keywork=value pairs:
*
* "log_priority=" 0-7, the pam_trace syslog priority to use
* (see sys/syslog.h)
* "log_facility=" 0-23, the pam_trace syslog facility to use
* (see sys/syslog.h)
* "debug_flags=" PAM_DEBUG_DEFAULT (0x0001), log traditional
* (original) debugging.
* Plus the logical or of:
* PAM_DEBUG_ITEM (0x0002), log item values and
* pam_get_item.
* PAM_DEBUG_MODULE (0x0004), log module return status.
* PAM_DEBUG_CONF (0x0008), log pam.conf parsing.
* PAM_DEBUG_DATA (0x0010), get/set_data.
* PAM_DEBUG_CONV (0x0020), conversation/response.
*
* If compiled with DEBUG:
* PAM_DEBUG_AUTHTOK (0x8000), display AUTHTOK value if
* PAM_DEBUG_ITEM is set and results from
* PAM_PROMPT_ECHO_OFF responses.
* USE CAREFULLY, THIS EXPOSES THE USER'S PASSWORDS.
*
* or set to 0 and off even if PAM_DEBUG file exists.
*
* Output has the general form:
* <whatever was set syslog> PAM[<pid>]: <interface>(<handle> and other info)
* <whatever was set syslog> PAM[<pid>]: details requested for <interface> call
* Where: <pid> is the process ID of the calling process.
* <handle> is the Hex value of the pam_handle associated with the
* call.
*/
static void
pam_settrace()
{
void *defp;
if ((defp = defopen_r(PAM_DEBUG)) != NULL) {
char *arg;
int code;
int facility = LOG_AUTH;
pam_debug = PAM_DEBUG_DEFAULT;
log_priority = LOG_DEBUG;
(void) defcntl_r(DC_SETFLAGS, DC_CASE, defp);
if ((arg = defread_r(LOG_PRIORITY, defp)) != NULL) {
code = (int)strtol(arg, NULL, 10);
if ((code & ~LOG_PRIMASK) == 0) {
log_priority = code;
}
}
if ((arg = defread_r(LOG_FACILITY, defp)) != NULL) {
code = (int)strtol(arg, NULL, 10);
if (code < LOG_NFACILITIES) {
facility = code << 3;
}
}
if ((arg = defread_r(DEBUG_FLAGS, defp)) != NULL) {
pam_debug = (int)strtol(arg, NULL, 0);
}
defclose_r(defp);
log_priority |= facility;
}
}
/*
* pam_trace - logs tracing messages
*
* flag = debug_flags from /etc/pam_debug
* format and args = message to print (PAM[<pid>]: is prepended).
*
* global log_priority = pam_trace syslog (log_priority | log_facility)
* from /etc/pam_debug
*/
/*PRINTFLIKE2*/
static void
pam_trace(int flag, char *format, ...)
{
va_list args;
char message[1024];
int savemask;
if ((pam_debug & flag) == 0)
return;
savemask = setlogmask(LOG_MASK(log_priority & LOG_PRIMASK));
(void) snprintf(message, sizeof (message), "PAM[%ld]: %s",
(long)getpid(), format);
va_start(args, format);
(void) vsyslog(log_priority, message, args);
va_end(args);
(void) setlogmask(savemask);
}
/*
* __pam_log - logs PAM syslog messages
*
* priority = message priority
* format and args = message to log
*/
/*PRINTFLIKE2*/
void
__pam_log(int priority, const char *format, ...)
{
va_list args;
int savemask = setlogmask(LOG_MASK(priority & LOG_PRIMASK));
va_start(args, format);
(void) vsyslog(priority, format, args);
va_end(args);
(void) setlogmask(savemask);
}
/*
* pam_XXXXX routines
*
* These are the entry points to the authentication switch
*/
/*
* pam_start - initiate an authentication transaction and
* set parameter values to be used during the
* transaction
*/
int
pam_start(const char *service, const char *user,
const struct pam_conv *pam_conv, pam_handle_t **pamh)
{
int err;
*pamh = calloc(1, sizeof (struct pam_handle));
pam_settrace();
pam_trace(PAM_DEBUG_DEFAULT,
"pam_start(%s,%s,%p:%p) - debug = %x",
service ? service : "NULL", user ? user : "NULL", (void *)pam_conv,
(void *)*pamh, pam_debug);
if (*pamh == NULL)
return (PAM_BUF_ERR);
(*pamh)->pam_inmodule = RO_OK; /* OK to set RO items */
if ((err = pam_set_item(*pamh, PAM_SERVICE, (void *)service))
!= PAM_SUCCESS) {
clean_up(*pamh);
*pamh = NULL;
return (err);
}
if ((err = pam_set_item(*pamh, PAM_USER, (void *)user))
!= PAM_SUCCESS) {
clean_up(*pamh);
*pamh = NULL;
return (err);
}
if ((err = pam_set_item(*pamh, PAM_CONV, (void *)pam_conv))
!= PAM_SUCCESS) {
clean_up(*pamh);
*pamh = NULL;
return (err);
}
(*pamh)->pam_inmodule = RW_OK;
return (PAM_SUCCESS);
}
/*
* pam_end - terminate an authentication transaction
*/
int
pam_end(pam_handle_t *pamh, int pam_status)
{
struct pam_module_data *psd, *p;
fd_list *expired;
fd_list *traverse;
env_list *env_expired;
env_list *env_traverse;
pam_trace(PAM_DEBUG_DEFAULT,
"pam_end(%p): status = %s", (void *)pamh,
pam_strerror(pamh, pam_status));
if (pamh == NULL)
return (PAM_SYSTEM_ERR);
/* call the cleanup routines for module specific data */
psd = pamh->ssd;
while (psd) {
if (psd->cleanup) {
psd->cleanup(pamh, psd->data, pam_status);
}
p = psd;
psd = p->next;
free(p->module_data_name);
free(p);
}
pamh->ssd = NULL;
/* dlclose all module fds */
traverse = pamh->fd;
while (traverse) {
expired = traverse;
traverse = traverse->next;
(void) dlclose(expired->mh);
free(expired);
}
pamh->fd = 0;
/* remove all environment variables */
env_traverse = pamh->pam_env;
while (env_traverse) {
env_expired = env_traverse;
env_traverse = env_traverse->next;
free_env(env_expired);
}
clean_up(pamh);
return (PAM_SUCCESS);
}
/*
* pam_set_item - set the value of a parameter that can be
* retrieved via a call to pam_get_item()
*/
int
pam_set_item(pam_handle_t *pamh, int item_type, const void *item)
{
struct pam_item *pip;
int size;
char iname_buf[PAM_MAX_MSG_SIZE];
if (((pam_debug & PAM_DEBUG_ITEM) == 0) || (pamh == NULL)) {
pam_trace(PAM_DEBUG_DEFAULT,
"pam_set_item(%p:%s)", (void *)pamh,
pam_trace_iname(item_type, iname_buf));
}
if (pamh == NULL)
return (PAM_SYSTEM_ERR);
/* check read only items */
if ((item_type == PAM_SERVICE) && (pamh->pam_inmodule != RO_OK))
return (PAM_PERM_DENIED);
/*
* Check that item_type is within valid range
*/
if (item_type <= 0 || item_type >= PAM_MAX_ITEMS)
return (PAM_SYMBOL_ERR);
pip = &(pamh->ps_item[item_type]);
switch (item_type) {
case PAM_AUTHTOK:
case PAM_OLDAUTHTOK:
if (pip->pi_addr != NULL)
(void) memset(pip->pi_addr, 0, pip->pi_size);
/*FALLTHROUGH*/
case PAM_SERVICE:
case PAM_USER:
case PAM_TTY:
case PAM_RHOST:
case PAM_RUSER:
case PAM_USER_PROMPT:
case PAM_RESOURCE:
case PAM_AUSER:
if (pip->pi_addr != NULL) {
free(pip->pi_addr);
}
if (item == NULL) {
pip->pi_addr = NULL;
pip->pi_size = 0;
} else {
pip->pi_addr = strdup((char *)item);
if (pip->pi_addr == NULL) {
pip->pi_size = 0;
return (PAM_BUF_ERR);
}
pip->pi_size = strlen(pip->pi_addr);
}
break;
case PAM_CONV:
if (pip->pi_addr != NULL)
free(pip->pi_addr);
size = sizeof (struct pam_conv);
if ((pip->pi_addr = calloc(1, size)) == NULL)
return (PAM_BUF_ERR);
if (item != NULL)
(void) memcpy(pip->pi_addr, item, (unsigned int) size);
else
(void) memset(pip->pi_addr, 0, size);
pip->pi_size = size;
break;
case PAM_REPOSITORY:
if (pip->pi_addr != NULL) {
pam_repository_t *auth_rep;
auth_rep = (pam_repository_t *)pip->pi_addr;
if (auth_rep->type != NULL)
free(auth_rep->type);
if (auth_rep->scope != NULL)
free(auth_rep->scope);
free(auth_rep);
}
if (item != NULL) {
pam_repository_t *s, *d;
size = sizeof (struct pam_repository);
pip->pi_addr = calloc(1, size);
if (pip->pi_addr == NULL)
return (PAM_BUF_ERR);
s = (struct pam_repository *)item;
d = (struct pam_repository *)pip->pi_addr;
d->type = strdup(s->type);
if (d->type == NULL)
return (PAM_BUF_ERR);
d->scope = malloc(s->scope_len);
if (d->scope == NULL)
return (PAM_BUF_ERR);
(void) memcpy(d->scope, s->scope, s->scope_len);
d->scope_len = s->scope_len;
}
pip->pi_size = size;
break;
default:
return (PAM_SYMBOL_ERR);
}
switch (item_type) {
case PAM_CONV:
pam_trace(PAM_DEBUG_ITEM, "pam_set_item(%p:%s)=%p",
(void *)pamh,
pam_trace_iname(item_type, iname_buf),
item ? (void *)((struct pam_conv *)item)->conv :
(void *)0);
break;
case PAM_REPOSITORY:
pam_trace(PAM_DEBUG_ITEM, "pam_set_item(%p:%s)=%s",
(void *)pamh,
pam_trace_iname(item_type, iname_buf),
item ? (((struct pam_repository *)item)->type ?
((struct pam_repository *)item)->type : "NULL") :
"NULL");
break;
case PAM_AUTHTOK:
case PAM_OLDAUTHTOK:
#ifdef DEBUG
if (pam_debug & PAM_DEBUG_AUTHTOK)
pam_trace(PAM_DEBUG_ITEM,
"pam_set_item(%p:%s)=%s", (void *)pamh,
pam_trace_iname(item_type, iname_buf),
item ? (char *)item : "NULL");
else
#endif /* DEBUG */
pam_trace(PAM_DEBUG_ITEM,
"pam_set_item(%p:%s)=%s", (void *)pamh,
pam_trace_iname(item_type, iname_buf),
item ? "********" : "NULL");
break;
default:
pam_trace(PAM_DEBUG_ITEM, "pam_set_item(%p:%s)=%s",
(void *)pamh,
pam_trace_iname(item_type, iname_buf),
item ? (char *)item : "NULL");
}
return (PAM_SUCCESS);
}
/*
* pam_get_item - read the value of a parameter specified in
* the call to pam_set_item()
*/
int
pam_get_item(const pam_handle_t *pamh, int item_type, void **item)
{
struct pam_item *pip;
char iname_buf[PAM_MAX_MSG_SIZE];
if (((pam_debug & PAM_DEBUG_ITEM) == 0) || (pamh == NULL)) {
pam_trace(PAM_DEBUG_ITEM, "pam_get_item(%p:%s)",
(void *)pamh, pam_trace_iname(item_type, iname_buf));
}
if (pamh == NULL)
return (PAM_SYSTEM_ERR);
if (item_type <= 0 || item_type >= PAM_MAX_ITEMS)
return (PAM_SYMBOL_ERR);
if ((pamh->pam_inmodule != WO_OK) &&
((item_type == PAM_AUTHTOK || item_type == PAM_OLDAUTHTOK))) {
__pam_log(LOG_AUTH | LOG_NOTICE, "pam_get_item(%s) called from "
"a non module context",
pam_trace_iname(item_type, iname_buf));
return (PAM_PERM_DENIED);
}
pip = (struct pam_item *)&(pamh->ps_item[item_type]);
*item = pip->pi_addr;
switch (item_type) {
case PAM_CONV:
pam_trace(PAM_DEBUG_ITEM, "pam_get_item(%p:%s)=%p",
(void *)pamh,
pam_trace_iname(item_type, iname_buf),
(void *)((struct pam_conv *)*item)->conv);
break;
case PAM_REPOSITORY:
pam_trace(PAM_DEBUG_ITEM, "pam_get_item(%p:%s)=%s",
(void *)pamh,
pam_trace_iname(item_type, iname_buf),
*item ? (((struct pam_repository *)*item)->type ?
((struct pam_repository *)*item)->type : "NULL") :
"NULL");
break;
case PAM_AUTHTOK:
case PAM_OLDAUTHTOK:
#ifdef DEBUG
if (pam_debug & PAM_DEBUG_AUTHTOK)
pam_trace(PAM_DEBUG_ITEM,
"pam_get_item(%p:%s)=%s", (void *)pamh,
pam_trace_iname(item_type, iname_buf),
*item ? *(char **)item : "NULL");
else
#endif /* DEBUG */
pam_trace(PAM_DEBUG_ITEM,
"pam_get_item(%p:%s)=%s", (void *)pamh,
pam_trace_iname(item_type, iname_buf),
*item ? "********" : "NULL");
break;
default:
pam_trace(PAM_DEBUG_ITEM, "pam_get_item(%p:%s)=%s",
(void *)pamh,
pam_trace_iname(item_type, iname_buf),
*item ? *(char **)item : "NULL");
}
return (PAM_SUCCESS);
}
/*
* parse_user_name - process the user response: ignore
* '\t' or ' ' before or after a user name.
* user_input is a null terminated string.
* *ret_username will be the user name.
*/
static int
parse_user_name(char *user_input, char **ret_username)
{
register char *ptr;
register int index = 0;
char username[PAM_MAX_RESP_SIZE];
/* Set the default value for *ret_username */
*ret_username = NULL;
/*
* Set the initial value for username - this is a buffer holds
* the user name.
*/
bzero((void *)username, PAM_MAX_RESP_SIZE);
/*
* The user_input is guaranteed to be terminated by a null character.
*/
ptr = user_input;
/* Skip all the leading whitespaces if there are any. */
while ((*ptr == ' ') || (*ptr == '\t'))
ptr++;
if (*ptr == '\0') {
/*
* We should never get here since the user_input we got
* in pam_get_user() is not all whitespaces nor just "\0".
*/
return (PAM_BUF_ERR);
}
/*
* username will be the first string we get from user_input
* - we skip leading whitespaces and ignore trailing whitespaces
*/
while (*ptr != '\0') {
if ((*ptr == ' ') || (*ptr == '\t') ||
(index >= PAM_MAX_RESP_SIZE)) {
break;
} else {
username[index] = *ptr;
index++;
ptr++;
}
}
/* ret_username will be freed in pam_get_user(). */
if (index >= PAM_MAX_RESP_SIZE ||
(*ret_username = strdup(username)) == NULL)
return (PAM_BUF_ERR);
return (PAM_SUCCESS);
}
/*
* Get the value of PAM_USER. If not set, then use the convenience function
* to prompt for the user. Use prompt if specified, else use PAM_USER_PROMPT
* if it is set, else use default.
*/
#define WHITESPACE 0
#define USERNAME 1
int
pam_get_user(pam_handle_t *pamh, char **user, const char *prompt_override)
{
int status;
char *prompt = NULL;
char *real_username;
struct pam_response *ret_resp = NULL;
char messages[PAM_MAX_NUM_MSG][PAM_MAX_MSG_SIZE];
pam_trace(PAM_DEBUG_DEFAULT,
"pam_get_user(%p, %p, %s)", (void *)pamh, (void *)*user,
prompt_override ? prompt_override : "NULL");
if (pamh == NULL)
return (PAM_SYSTEM_ERR);
if ((status = pam_get_item(pamh, PAM_USER, (void **)user))
!= PAM_SUCCESS) {
return (status);
}
/* if the user is set, return it */
if (*user != NULL && *user[0] != '\0') {
return (PAM_SUCCESS);
}
/*
* if the module is requesting a special prompt, use it.
* else use PAM_USER_PROMPT.
*/
if (prompt_override != NULL) {
prompt = (char *)prompt_override;
} else {
status = pam_get_item(pamh, PAM_USER_PROMPT, (void**)&prompt);
if (status != PAM_SUCCESS) {
return (status);
}
}
/* if the prompt is not set, use default */
if (prompt == NULL || prompt[0] == '\0') {
prompt = dgettext(TEXT_DOMAIN, "Please enter user name: ");
}
/* prompt for the user */
(void) strncpy(messages[0], prompt, sizeof (messages[0]));
for (;;) {
int state = WHITESPACE;
status = do_conv(pamh, PAM_PROMPT_ECHO_ON, 1, messages,
NULL, &ret_resp);
if (status != PAM_SUCCESS) {
return (status);
}
if (ret_resp->resp && ret_resp->resp[0] != '\0') {
int len = strlen(ret_resp->resp);
int i;
for (i = 0; i < len; i++) {
if ((ret_resp->resp[i] != ' ') &&
(ret_resp->resp[i] != '\t')) {
state = USERNAME;
break;
}
}
if (state == USERNAME)
break;
}
/* essentially empty response, try again */
free_resp(1, ret_resp);
ret_resp = NULL;
}
/* set PAM_USER */
/* Parse the user input to get the user name. */
status = parse_user_name(ret_resp->resp, &real_username);
if (status != PAM_SUCCESS) {
if (real_username != NULL)
free(real_username);
free_resp(1, ret_resp);
return (status);
}
status = pam_set_item(pamh, PAM_USER, real_username);
free(real_username);
free_resp(1, ret_resp);
if (status != PAM_SUCCESS) {
return (status);
}
/*
* finally, get PAM_USER. We have to call pam_get_item to get
* the value of user because pam_set_item mallocs the memory.
*/
status = pam_get_item(pamh, PAM_USER, (void**)user);
return (status);
}
/*
* Set module specific data
*/
int
pam_set_data(pam_handle_t *pamh, const char *module_data_name, void *data,
void (*cleanup)(pam_handle_t *pamh, void *data, int pam_end_status))
{
struct pam_module_data *psd;
pam_trace(PAM_DEBUG_DATA,
"pam_set_data(%p:%s:%d)=%p", (void *)pamh,
(module_data_name != NULL) ? module_data_name : "NULL",
(pamh != NULL) ? pamh->pam_inmodule : -1, data);
if (pamh == NULL || (pamh->pam_inmodule != WO_OK) ||
module_data_name == NULL) {
return (PAM_SYSTEM_ERR);
}
/* check if module data already exists */
for (psd = pamh->ssd; psd; psd = psd->next) {
if (strcmp(psd->module_data_name, module_data_name) == 0) {
/* clean up original data before setting the new data */
if (psd->cleanup) {
psd->cleanup(pamh, psd->data, PAM_SUCCESS);
}
psd->data = (void *)data;
psd->cleanup = cleanup;
return (PAM_SUCCESS);
}
}
psd = malloc(sizeof (struct pam_module_data));
if (psd == NULL)
return (PAM_BUF_ERR);
psd->module_data_name = strdup(module_data_name);
if (psd->module_data_name == NULL) {
free(psd);
return (PAM_BUF_ERR);
}
psd->data = (void *)data;
psd->cleanup = cleanup;
psd->next = pamh->ssd;
pamh->ssd = psd;
return (PAM_SUCCESS);
}
/*
* get module specific data
*/
int
pam_get_data(const pam_handle_t *pamh, const char *module_data_name,
const void **data)
{
struct pam_module_data *psd;
if (pamh == NULL || (pamh->pam_inmodule != WO_OK) ||
module_data_name == NULL) {
pam_trace(PAM_DEBUG_DATA,
"pam_get_data(%p:%s:%d)=%p", (void *)pamh,
module_data_name ? module_data_name : "NULL",
pamh->pam_inmodule, *data);
return (PAM_SYSTEM_ERR);
}
for (psd = pamh->ssd; psd; psd = psd->next) {
if (strcmp(psd->module_data_name, module_data_name) == 0) {
*data = psd->data;
pam_trace(PAM_DEBUG_DATA,
"pam_get_data(%p:%s)=%p", (void *)pamh,
module_data_name, *data);
return (PAM_SUCCESS);
}
}
pam_trace(PAM_DEBUG_DATA,
"pam_get_data(%p:%s)=%s", (void *)pamh, module_data_name,
"PAM_NO_MODULE_DATA");
return (PAM_NO_MODULE_DATA);
}
/*
* PAM equivalent to strerror()
*/
/* ARGSUSED */
const char *
pam_strerror(pam_handle_t *pamh, int errnum)
{
switch (errnum) {
case PAM_SUCCESS:
return (dgettext(TEXT_DOMAIN, "Success"));
case PAM_OPEN_ERR:
return (dgettext(TEXT_DOMAIN, "Dlopen failure"));
case PAM_SYMBOL_ERR:
return (dgettext(TEXT_DOMAIN, "Symbol not found"));
case PAM_SERVICE_ERR:
return (dgettext(TEXT_DOMAIN,
"Error in underlying service module"));
case PAM_SYSTEM_ERR:
return (dgettext(TEXT_DOMAIN, "System error"));
case PAM_BUF_ERR:
return (dgettext(TEXT_DOMAIN, "Memory buffer error"));
case PAM_CONV_ERR:
return (dgettext(TEXT_DOMAIN, "Conversation failure"));
case PAM_PERM_DENIED:
return (dgettext(TEXT_DOMAIN, "Permission denied"));
case PAM_MAXTRIES:
return (dgettext(TEXT_DOMAIN,
"Maximum number of attempts exceeded"));
case PAM_AUTH_ERR:
return (dgettext(TEXT_DOMAIN, "Authentication failed"));
case PAM_NEW_AUTHTOK_REQD:
return (dgettext(TEXT_DOMAIN, "Get new authentication token"));
case PAM_CRED_INSUFFICIENT:
return (dgettext(TEXT_DOMAIN, "Insufficient credentials"));
case PAM_AUTHINFO_UNAVAIL:
return (dgettext(TEXT_DOMAIN,
"Can not retrieve authentication info"));
case PAM_USER_UNKNOWN:
return (dgettext(TEXT_DOMAIN, "No account present for user"));
case PAM_CRED_UNAVAIL:
return (dgettext(TEXT_DOMAIN,
"Can not retrieve user credentials"));
case PAM_CRED_EXPIRED:
return (dgettext(TEXT_DOMAIN,
"User credentials have expired"));
case PAM_CRED_ERR:
return (dgettext(TEXT_DOMAIN,
"Failure setting user credentials"));
case PAM_ACCT_EXPIRED:
return (dgettext(TEXT_DOMAIN, "User account has expired"));
case PAM_AUTHTOK_EXPIRED:
return (dgettext(TEXT_DOMAIN, "User password has expired"));
case PAM_SESSION_ERR:
return (dgettext(TEXT_DOMAIN,
"Can not make/remove entry for session"));
case PAM_AUTHTOK_ERR:
return (dgettext(TEXT_DOMAIN,
"Authentication token manipulation error"));
case PAM_AUTHTOK_RECOVERY_ERR:
return (dgettext(TEXT_DOMAIN,
"Authentication token can not be recovered"));
case PAM_AUTHTOK_LOCK_BUSY:
return (dgettext(TEXT_DOMAIN,
"Authentication token lock busy"));
case PAM_AUTHTOK_DISABLE_AGING:
return (dgettext(TEXT_DOMAIN,
"Authentication token aging disabled"));
case PAM_NO_MODULE_DATA:
return (dgettext(TEXT_DOMAIN,
"Module specific data not found"));
case PAM_IGNORE:
return (dgettext(TEXT_DOMAIN, "Ignore module"));
case PAM_ABORT:
return (dgettext(TEXT_DOMAIN, "General PAM failure "));
case PAM_TRY_AGAIN:
return (dgettext(TEXT_DOMAIN,
"Unable to complete operation. Try again"));
default:
return (dgettext(TEXT_DOMAIN, "Unknown error"));
}
}
static void *
sm_name(int ind)
{
switch (ind) {
case PAM_AUTHENTICATE:
return (PAM_SM_AUTHENTICATE);
case PAM_SETCRED:
return (PAM_SM_SETCRED);
case PAM_ACCT_MGMT:
return (PAM_SM_ACCT_MGMT);
case PAM_OPEN_SESSION:
return (PAM_SM_OPEN_SESSION);
case PAM_CLOSE_SESSION:
return (PAM_SM_CLOSE_SESSION);
case PAM_CHAUTHTOK:
return (PAM_SM_CHAUTHTOK);
}
return (NULL);
}
static int
(*func(pamtab_t *modulep, int ind))()
{
void *funcp;
if ((funcp = modulep->function_ptr) == NULL)
return (NULL);
switch (ind) {
case PAM_AUTHENTICATE:
return (((struct auth_module *)funcp)->pam_sm_authenticate);
case PAM_SETCRED:
return (((struct auth_module *)funcp)->pam_sm_setcred);
case PAM_ACCT_MGMT:
return (((struct account_module *)funcp)->pam_sm_acct_mgmt);
case PAM_OPEN_SESSION:
return (((struct session_module *)funcp)->pam_sm_open_session);
case PAM_CLOSE_SESSION:
return (((struct session_module *)funcp)->pam_sm_close_session);
case PAM_CHAUTHTOK:
return (((struct password_module *)funcp)->pam_sm_chauthtok);
}
return (NULL);
}
/*
* Run through the PAM service module stack for the given module type.
*/
static int
run_stack(pam_handle_t *pamh, int flags, int type, int def_err, int ind,
char *function_name)
{
int err = PAM_SYSTEM_ERR; /* preset */
int optional_error = 0;
int required_error = 0;
int success = 0;
pamtab_t *modulep;
int (*sm_func)();
if (pamh == NULL)
return (PAM_SYSTEM_ERR);
/* read initial entries from pam.conf */
if ((err = read_pam_conf(pamh, PAM_CONFIG)) != PAM_SUCCESS) {
return (err);
}
if ((modulep =
pamh->pam_conf_info[pamh->include_depth][type]) == NULL) {
__pam_log(LOG_AUTH | LOG_ERR, "%s no initial module present",
pam_trace_cname(pamh));
goto exit_return;
}
pamh->pam_inmodule = WO_OK; /* OK to get AUTHTOK */
include:
pam_trace(PAM_DEBUG_MODULE,
"[%d:%s]:run_stack:%s(%p, %x): %s", pamh->include_depth,
pam_trace_cname(pamh), function_name, (void *)pamh, flags,
modulep ? modulep->module_path : "NULL");
while (modulep != NULL) {
if (modulep->pam_flag & PAM_INCLUDE) {
/* save the return location */
pamh->pam_conf_modulep[pamh->include_depth] =
modulep->next;
pam_trace(PAM_DEBUG_MODULE,
"setting for include[%d:%p]",
pamh->include_depth, (void *)modulep->next);
if (pamh->include_depth++ >= PAM_MAX_INCLUDE) {
__pam_log(LOG_AUTH | LOG_ERR,
"run_stack: includes too deep %d "
"found trying to include %s from %s, %d "
"allowed", pamh->include_depth,
modulep->module_path, pamh->pam_conf_name
[PAM_MAX_INCLUDE] == NULL ? "NULL" :
pamh->pam_conf_name[PAM_MAX_INCLUDE],
PAM_MAX_INCLUDE);
goto exit_return;
}
if ((err = read_pam_conf(pamh,
modulep->module_path)) != PAM_SUCCESS) {
__pam_log(LOG_AUTH | LOG_ERR,
"run_stack[%d:%s]: can't read included "
"conf %s", pamh->include_depth,
pam_trace_cname(pamh),
modulep->module_path);
goto exit_return;
}
if ((modulep = pamh->pam_conf_info
[pamh->include_depth][type]) == NULL) {
__pam_log(LOG_AUTH | LOG_ERR,
"run_stack[%d:%s]: no include module "
"present %s", pamh->include_depth,
pam_trace_cname(pamh), function_name);
goto exit_return;
}
if (modulep->pam_flag & PAM_INCLUDE) {
/* first line another include */
goto include;
}
pam_trace(PAM_DEBUG_DEFAULT, "include[%d:%s]"
"(%p, %s)=%s", pamh->include_depth,
pam_trace_cname(pamh), (void *)pamh,
function_name, modulep->module_path);
if ((err = load_modules(pamh, type, sm_name(ind),
pamh->pam_conf_info
[pamh->include_depth][type])) != PAM_SUCCESS) {
pam_trace(PAM_DEBUG_DEFAULT,
"[%d:%s]:%s(%p, %x): load_modules failed",
pamh->include_depth, pam_trace_cname(pamh),
function_name, (void *)pamh, flags);
goto exit_return;
}
if ((modulep = pamh->pam_conf_info
[pamh->include_depth][type]) == NULL) {
__pam_log(LOG_AUTH | LOG_ERR,
"%s no initial module present",
pam_trace_cname(pamh));
goto exit_return;
}
} else if ((err = load_modules(pamh, type, sm_name(ind),
modulep)) != PAM_SUCCESS) {
pam_trace(PAM_DEBUG_DEFAULT,
"[%d:%s]:%s(%p, %x): load_modules failed",
pamh->include_depth, pam_trace_cname(pamh),
function_name, (void *)pamh, flags);
goto exit_return;
} /* PAM_INCLUDE */
sm_func = func(modulep, ind);
if (sm_func) {
err = sm_func(pamh, flags, modulep->module_argc,
(const char **)modulep->module_argv);
pam_trace(PAM_DEBUG_MODULE,
"[%d:%s]:%s(%p, %x): %s returned %s",
pamh->include_depth, pam_trace_cname(pamh),
function_name, (void *)pamh, flags,
modulep->module_path, pam_strerror(pamh, err));
switch (err) {
case PAM_IGNORE:
/* do nothing */
break;
case PAM_SUCCESS:
if ((modulep->pam_flag & PAM_SUFFI_BIND) &&
!required_error) {
pamh->pam_inmodule = RW_OK;
pam_trace(PAM_DEBUG_MODULE,
"[%d:%s]:%s(%p, %x): %s: success",
pamh->include_depth,
pam_trace_cname(pamh),
function_name, (void *)pamh, flags,
(modulep->pam_flag & PAM_BINDING) ?
PAM_BINDING_NAME :
PAM_SUFFICIENT_NAME);
goto exit_return;
}
success = 1;
break;
case PAM_TRY_AGAIN:
/*
* We need to return immediately, and
* we shouldn't reset the AUTHTOK item
* since it is not an error per-se.
*/
pamh->pam_inmodule = RW_OK;
pam_trace(PAM_DEBUG_MODULE,
"[%d:%s]:%s(%p, %x): TRY_AGAIN: %s",
pamh->include_depth, pam_trace_cname(pamh),
function_name, (void *)pamh, flags,
pam_strerror(pamh, required_error ?
required_error : err));
err = required_error ? required_error : err;
goto exit_return;
default:
if (modulep->pam_flag & PAM_REQUISITE) {
pamh->pam_inmodule = RW_OK;
pam_trace(PAM_DEBUG_MODULE,
"[%d:%s]:%s(%p, %x): requisite: %s",
pamh->include_depth,
pam_trace_cname(pamh),
function_name, (void *)pamh, flags,
pam_strerror(pamh,
required_error ? required_error :
err));
err = required_error ?
required_error : err;
goto exit_return;
} else if (modulep->pam_flag & PAM_REQRD_BIND) {
if (!required_error)
required_error = err;
} else {
if (!optional_error)
optional_error = err;
}
pam_trace(PAM_DEBUG_DEFAULT,
"[%d:%s]:%s(%p, %x): error %s",
pamh->include_depth, pam_trace_cname(pamh),
function_name, (void *)pamh, flags,
pam_strerror(pamh, err));
break;
}
}
modulep = modulep->next;
}
pam_trace(PAM_DEBUG_MODULE, "[%d:%s]:stack_end:%s(%p, %x): %s %s: %s",
pamh->include_depth, pam_trace_cname(pamh), function_name,
(void *)pamh, flags, pamh->include_depth ? "included" : "final",
required_error ? "required" : success ? "success" :
optional_error ? "optional" : "default",
pam_strerror(pamh, required_error ? required_error :
success ? PAM_SUCCESS : optional_error ? optional_error : def_err));
if (pamh->include_depth > 0) {
free_pam_conf_info(pamh);
pamh->include_depth--;
/* continue at next entry */
modulep = pamh->pam_conf_modulep[pamh->include_depth];
pam_trace(PAM_DEBUG_MODULE, "looping for include[%d:%p]",
pamh->include_depth, (void *)modulep);
goto include;
}
free_pam_conf_info(pamh);
pamh->pam_inmodule = RW_OK;
if (required_error != 0)
return (required_error);
else if (success != 0)
return (PAM_SUCCESS);
else if (optional_error != 0)
return (optional_error);
else
return (def_err);
exit_return:
/*
* All done at whatever depth we're at.
* Go back to not having read /etc/pam.conf
*/
while (pamh->include_depth > 0) {
free_pam_conf_info(pamh);
pamh->include_depth--;
}
free_pam_conf_info(pamh);
pamh->pam_inmodule = RW_OK;
return (err);
}
/*
* pam_authenticate - authenticate a user
*/
int
pam_authenticate(pam_handle_t *pamh, int flags)
{
int retval;
retval = run_stack(pamh, flags, PAM_AUTH_MODULE, PAM_AUTH_ERR,
PAM_AUTHENTICATE, "pam_authenticate");
if (retval != PAM_SUCCESS)
(void) pam_set_item(pamh, PAM_AUTHTOK, NULL);
return (retval);
}
/*
* pam_setcred - modify or retrieve user credentials
*/
int
pam_setcred(pam_handle_t *pamh, int flags)
{
int retval;
retval = run_stack(pamh, flags, PAM_AUTH_MODULE, PAM_CRED_ERR,
PAM_SETCRED, "pam_setcred");
if (retval != PAM_SUCCESS)
(void) pam_set_item(pamh, PAM_AUTHTOK, NULL);
return (retval);
}
/*
* pam_acct_mgmt - check password aging, account expiration
*/
int
pam_acct_mgmt(pam_handle_t *pamh, int flags)
{
int retval;
retval = run_stack(pamh, flags, PAM_ACCOUNT_MODULE, PAM_ACCT_EXPIRED,
PAM_ACCT_MGMT, "pam_acct_mgmt");
if (retval != PAM_SUCCESS &&
retval != PAM_NEW_AUTHTOK_REQD) {
(void) pam_set_item(pamh, PAM_AUTHTOK, NULL);
}
return (retval);
}
/*
* pam_open_session - begin session management
*/
int
pam_open_session(pam_handle_t *pamh, int flags)
{
int retval;
retval = run_stack(pamh, flags, PAM_SESSION_MODULE, PAM_SESSION_ERR,
PAM_OPEN_SESSION, "pam_open_session");
if (retval != PAM_SUCCESS)
(void) pam_set_item(pamh, PAM_AUTHTOK, NULL);
return (retval);
}
/*
* pam_close_session - terminate session management
*/
int
pam_close_session(pam_handle_t *pamh, int flags)
{
int retval;
retval = run_stack(pamh, flags, PAM_SESSION_MODULE, PAM_SESSION_ERR,
PAM_CLOSE_SESSION, "pam_close_session");
if (retval != PAM_SUCCESS)
(void) pam_set_item(pamh, PAM_AUTHTOK, NULL);
return (retval);
}
/*
* pam_chauthtok - change user authentication token
*/
int
pam_chauthtok(pam_handle_t *pamh, int flags)
{
int retval;
/* do not let apps use PAM_PRELIM_CHECK or PAM_UPDATE_AUTHTOK */
if (flags & (PAM_PRELIM_CHECK | PAM_UPDATE_AUTHTOK)) {
pam_trace(PAM_DEBUG_DEFAULT,
"pam_chauthtok(%p, %x): %s", (void *)pamh, flags,
pam_strerror(pamh, PAM_SYMBOL_ERR));
return (PAM_SYMBOL_ERR);
}
/* 1st pass: PRELIM CHECK */
retval = run_stack(pamh, flags | PAM_PRELIM_CHECK, PAM_PASSWORD_MODULE,
PAM_AUTHTOK_ERR, PAM_CHAUTHTOK, "pam_chauthtok-prelim");
if (retval == PAM_TRY_AGAIN)
return (retval);
if (retval != PAM_SUCCESS) {
(void) pam_set_item(pamh, PAM_AUTHTOK, NULL);
return (retval);
}
/* 2nd pass: UPDATE AUTHTOK */
retval = run_stack(pamh, flags | PAM_UPDATE_AUTHTOK,
PAM_PASSWORD_MODULE, PAM_AUTHTOK_ERR, PAM_CHAUTHTOK,
"pam_chauthtok-update");
if (retval != PAM_SUCCESS)
(void) pam_set_item(pamh, PAM_AUTHTOK, NULL);
return (retval);
}
/*
* pam_putenv - add an environment variable to the PAM handle
* if name_value == 'NAME=VALUE' then set variable to the value
* if name_value == 'NAME=' then set variable to an empty value
* if name_value == 'NAME' then delete the variable
*/
int
pam_putenv(pam_handle_t *pamh, const char *name_value)
{
int error = PAM_SYSTEM_ERR;
char *equal_sign = 0;
char *name = NULL, *value = NULL, *tmp_value = NULL;
env_list *traverse, *trail;
pam_trace(PAM_DEBUG_DEFAULT,
"pam_putenv(%p, %s)", (void *)pamh,
name_value ? name_value : "NULL");
if (pamh == NULL || name_value == NULL)
goto out;
/* see if we were passed 'NAME=VALUE', 'NAME=', or 'NAME' */
if ((equal_sign = strchr(name_value, '=')) != 0) {
if ((name = calloc(equal_sign - name_value + 1,
sizeof (char))) == 0) {
error = PAM_BUF_ERR;
goto out;
}
(void) strncpy(name, name_value, equal_sign - name_value);
if ((value = strdup(++equal_sign)) == 0) {
error = PAM_BUF_ERR;
goto out;
}
} else {
if ((name = strdup(name_value)) == 0) {
error = PAM_BUF_ERR;
goto out;
}
}
/* check to see if we already have this variable in the PAM handle */
traverse = pamh->pam_env;
trail = traverse;
while (traverse && strncmp(traverse->name, name, strlen(name))) {
trail = traverse;
traverse = traverse->next;
}
if (traverse) {
/* found a match */
if (value == 0) {
/* remove the env variable */
if (pamh->pam_env == traverse)
pamh->pam_env = traverse->next;
else
trail->next = traverse->next;
free_env(traverse);
} else if (strlen(value) == 0) {
/* set env variable to empty value */
if ((tmp_value = strdup("")) == 0) {
error = PAM_SYSTEM_ERR;
goto out;
}
free(traverse->value);
traverse->value = tmp_value;
} else {
/* set the new value */
if ((tmp_value = strdup(value)) == 0) {
error = PAM_SYSTEM_ERR;
goto out;
}
free(traverse->value);
traverse->value = tmp_value;
}
} else if (traverse == 0 && value) {
/*
* could not find a match in the PAM handle.
* add the new value if there is one
*/
if ((traverse = calloc(1, sizeof (env_list))) == 0) {
error = PAM_BUF_ERR;
goto out;
}
if ((traverse->name = strdup(name)) == 0) {
free_env(traverse);
error = PAM_BUF_ERR;
goto out;
}
if ((traverse->value = strdup(value)) == 0) {
free_env(traverse);
error = PAM_BUF_ERR;
goto out;
}
if (trail == 0) {
/* new head of list */
pamh->pam_env = traverse;
} else {
/* adding to end of list */
trail->next = traverse;
}
}
error = PAM_SUCCESS;
out:
if (error != PAM_SUCCESS) {
if (traverse) {
if (traverse->name)
free(traverse->name);
if (traverse->value)
free(traverse->value);
free(traverse);
}
}
if (name)
free(name);
if (value)
free(value);
return (error);
}
/*
* pam_getenv - retrieve an environment variable from the PAM handle
*/
char *
pam_getenv(pam_handle_t *pamh, const char *name)
{
int error = PAM_SYSTEM_ERR;
env_list *traverse;
pam_trace(PAM_DEBUG_DEFAULT,
"pam_getenv(%p, %p)", (void *)pamh, (void *)name);
if (pamh == NULL || name == NULL)
goto out;
/* check to see if we already have this variable in the PAM handle */
traverse = pamh->pam_env;
while (traverse && strncmp(traverse->name, name, strlen(name))) {
traverse = traverse->next;
}
error = (traverse ? PAM_SUCCESS : PAM_SYSTEM_ERR);
pam_trace(PAM_DEBUG_DEFAULT,
"pam_getenv(%p, %s)=%s", (void *)pamh, name,
traverse ? traverse->value : "NULL");
out:
return (error ? NULL : strdup(traverse->value));
}
/*
* pam_getenvlist - retrieve all environment variables from the PAM handle
* in a NULL terminated array. On error, return NULL.
*/
char **
pam_getenvlist(pam_handle_t *pamh)
{
int error = PAM_SYSTEM_ERR;
char **list = 0;
int length = 0;
env_list *traverse;
char *tenv;
size_t tenv_size;
pam_trace(PAM_DEBUG_DEFAULT,
"pam_getenvlist(%p)", (void *)pamh);
if (pamh == NULL)
goto out;
/* find out how many environment variables we have */
traverse = pamh->pam_env;
while (traverse) {
length++;
traverse = traverse->next;
}
/* allocate the array we will return to the caller */
if ((list = calloc(length + 1, sizeof (char *))) == NULL) {
error = PAM_BUF_ERR;
goto out;
}
/* add the variables one by one */
length = 0;
traverse = pamh->pam_env;
while (traverse != NULL) {
tenv_size = strlen(traverse->name) +
strlen(traverse->value) + 2; /* name=val\0 */
if ((tenv = malloc(tenv_size)) == NULL) {
error = PAM_BUF_ERR;
goto out;
}
/*LINTED*/
(void) sprintf(tenv, "%s=%s", traverse->name, traverse->value);
list[length++] = tenv;
traverse = traverse->next;
}
list[length] = NULL;
error = PAM_SUCCESS;
out:
if (error != PAM_SUCCESS) {
/* free the partially constructed list */
if (list) {
length = 0;
while (list[length] != NULL) {
free(list[length]);
length++;
}
free(list);
}
}
return (error ? NULL : list);
}
/*
* Routines to load a requested module on demand
*/
/*
* load_modules - load the requested module.
* if the dlopen or dlsym fail, then
* the module is ignored.
*/
static int
load_modules(pam_handle_t *pamh, int type, char *function_name,
pamtab_t *pam_entry)
{
void *mh;
struct auth_module *authp;
struct account_module *accountp;
struct session_module *sessionp;
struct password_module *passwdp;
int loading_functions = 0; /* are we currently loading functions? */
pam_trace(PAM_DEBUG_MODULE, "load_modules[%d:%s](%p, %s)=%s:%s",
pamh->include_depth, pam_trace_cname(pamh), (void *)pamh,
function_name, pam_trace_fname(pam_entry->pam_flag),
pam_entry->module_path);
while (pam_entry != NULL) {
pam_trace(PAM_DEBUG_DEFAULT,
"while load_modules[%d:%s](%p, %s)=%s",
pamh->include_depth, pam_trace_cname(pamh), (void *)pamh,
function_name, pam_entry->module_path);
if (pam_entry->pam_flag & PAM_INCLUDE) {
pam_trace(PAM_DEBUG_DEFAULT,
"done load_modules[%d:%s](%p, %s)=%s",
pamh->include_depth, pam_trace_cname(pamh),
(void *)pamh, function_name,
pam_entry->module_path);
return (PAM_SUCCESS);
}
switch (type) {
case PAM_AUTH_MODULE:
/* if the function has already been loaded, return */
authp = pam_entry->function_ptr;
if (!loading_functions &&
(((strcmp(function_name, PAM_SM_AUTHENTICATE)
== 0) && authp && authp->pam_sm_authenticate) ||
((strcmp(function_name, PAM_SM_SETCRED) == 0) &&
authp && authp->pam_sm_setcred))) {
return (PAM_SUCCESS);
}
/* function has not been loaded yet */
loading_functions = 1;
if (authp == NULL) {
authp = calloc(1, sizeof (struct auth_module));
if (authp == NULL)
return (PAM_BUF_ERR);
}
/* if open_module fails, return error */
if ((mh = open_module(pamh,
pam_entry->module_path)) == NULL) {
__pam_log(LOG_AUTH | LOG_ERR,
"load_modules[%d:%s]: can not open module "
"%s", pamh->include_depth,
pam_trace_cname(pamh),
pam_entry->module_path);
free(authp);
return (PAM_OPEN_ERR);
}
/* load the authentication function */
if (strcmp(function_name, PAM_SM_AUTHENTICATE) == 0) {
if (load_function(mh, PAM_SM_AUTHENTICATE,
&authp->pam_sm_authenticate)
!= PAM_SUCCESS) {
/* return error if dlsym fails */
free(authp);
return (PAM_SYMBOL_ERR);
}
/* load the setcred function */
} else if (strcmp(function_name, PAM_SM_SETCRED) == 0) {
if (load_function(mh, PAM_SM_SETCRED,
&authp->pam_sm_setcred) != PAM_SUCCESS) {
/* return error if dlsym fails */
free(authp);
return (PAM_SYMBOL_ERR);
}
}
pam_entry->function_ptr = authp;
break;
case PAM_ACCOUNT_MODULE:
accountp = pam_entry->function_ptr;
if (!loading_functions &&
(strcmp(function_name, PAM_SM_ACCT_MGMT) == 0) &&
accountp && accountp->pam_sm_acct_mgmt) {
return (PAM_SUCCESS);
}
/*
* If functions are added to the account module,
* verify that one of the other functions hasn't
* already loaded it. See PAM_AUTH_MODULE code.
*/
loading_functions = 1;
accountp = calloc(1, sizeof (struct account_module));
if (accountp == NULL)
return (PAM_BUF_ERR);
/* if open_module fails, return error */
if ((mh = open_module(pamh,
pam_entry->module_path)) == NULL) {
__pam_log(LOG_AUTH | LOG_ERR,
"load_modules[%d:%s]: can not open module "
"%s", pamh->include_depth,
pam_trace_cname(pamh),
pam_entry->module_path);
free(accountp);
return (PAM_OPEN_ERR);
}
if (load_function(mh, PAM_SM_ACCT_MGMT,
&accountp->pam_sm_acct_mgmt) != PAM_SUCCESS) {
__pam_log(LOG_AUTH | LOG_ERR,
"load_modules[%d:%s]: pam_sm_acct_mgmt() "
"missing", pamh->include_depth,
pam_trace_cname(pamh));
free(accountp);
return (PAM_SYMBOL_ERR);
}
pam_entry->function_ptr = accountp;
break;
case PAM_SESSION_MODULE:
sessionp = pam_entry->function_ptr;
if (!loading_functions &&
(((strcmp(function_name,
PAM_SM_OPEN_SESSION) == 0) &&
sessionp && sessionp->pam_sm_open_session) ||
((strcmp(function_name,
PAM_SM_CLOSE_SESSION) == 0) &&
sessionp && sessionp->pam_sm_close_session))) {
return (PAM_SUCCESS);
}
loading_functions = 1;
if (sessionp == NULL) {
sessionp = calloc(1,
sizeof (struct session_module));
if (sessionp == NULL)
return (PAM_BUF_ERR);
}
/* if open_module fails, return error */
if ((mh = open_module(pamh,
pam_entry->module_path)) == NULL) {
__pam_log(LOG_AUTH | LOG_ERR,
"load_modules[%d:%s]: can not open module "
"%s", pamh->include_depth,
pam_trace_cname(pamh),
pam_entry->module_path);
free(sessionp);
return (PAM_OPEN_ERR);
}
if ((strcmp(function_name, PAM_SM_OPEN_SESSION) == 0) &&
load_function(mh, PAM_SM_OPEN_SESSION,
&sessionp->pam_sm_open_session) != PAM_SUCCESS) {
free(sessionp);
return (PAM_SYMBOL_ERR);
} else if ((strcmp(function_name,
PAM_SM_CLOSE_SESSION) == 0) &&
load_function(mh, PAM_SM_CLOSE_SESSION,
&sessionp->pam_sm_close_session) != PAM_SUCCESS) {
free(sessionp);
return (PAM_SYMBOL_ERR);
}
pam_entry->function_ptr = sessionp;
break;
case PAM_PASSWORD_MODULE:
passwdp = pam_entry->function_ptr;
if (!loading_functions &&
(strcmp(function_name, PAM_SM_CHAUTHTOK) == 0) &&
passwdp && passwdp->pam_sm_chauthtok) {
return (PAM_SUCCESS);
}
/*
* If functions are added to the password module,
* verify that one of the other functions hasn't
* already loaded it. See PAM_AUTH_MODULE code.
*/
loading_functions = 1;
passwdp = calloc(1, sizeof (struct password_module));
if (passwdp == NULL)
return (PAM_BUF_ERR);
/* if open_module fails, continue */
if ((mh = open_module(pamh,
pam_entry->module_path)) == NULL) {
__pam_log(LOG_AUTH | LOG_ERR,
"load_modules[%d:%s]: can not open module "
"%s", pamh->include_depth,
pam_trace_cname(pamh),
pam_entry->module_path);
free(passwdp);
return (PAM_OPEN_ERR);
}
if (load_function(mh, PAM_SM_CHAUTHTOK,
&passwdp->pam_sm_chauthtok) != PAM_SUCCESS) {
free(passwdp);
return (PAM_SYMBOL_ERR);
}
pam_entry->function_ptr = passwdp;
break;
default:
pam_trace(PAM_DEBUG_DEFAULT,
"load_modules[%d:%s](%p, %s): unsupported type %d",
pamh->include_depth, pam_trace_cname(pamh),
(void *)pamh, function_name, type);
break;
}
pam_entry = pam_entry->next;
} /* while */
pam_trace(PAM_DEBUG_MODULE, "load_modules[%d:%s](%p, %s)=done",
pamh->include_depth, pam_trace_cname(pamh), (void *)pamh,
function_name);
return (PAM_SUCCESS);
}
/*
* open_module - Open the module first checking for
* propers modes and ownerships on the file.
*/
static void *
open_module(pam_handle_t *pamh, char *module_so)
{
struct stat64 stb;
char *errmsg;
void *lfd;
fd_list *module_fds = 0;
fd_list *trail = 0;
fd_list *traverse = 0;
/* Check the ownership and file modes */
if (stat64(module_so, &stb) < 0) {
__pam_log(LOG_AUTH | LOG_ERR,
"open_module[%d:%s]: stat(%s) failed: %s",
pamh->include_depth, pam_trace_cname(pamh), module_so,
strerror(errno));
return (NULL);
}
if (stb.st_uid != (uid_t)0) {
__pam_log(LOG_AUTH | LOG_ALERT,
"open_module[%d:%s]: Owner of the module %s is not root",
pamh->include_depth, pam_trace_cname(pamh), module_so);
return (NULL);
}
if (stb.st_mode & S_IWGRP) {
__pam_log(LOG_AUTH | LOG_ALERT,
"open_module[%d:%s]: module %s writable by group",
pamh->include_depth, pam_trace_cname(pamh), module_so);
return (NULL);
}
if (stb.st_mode & S_IWOTH) {
__pam_log(LOG_AUTH | LOG_ALERT,
"open_module[%d:%s]: module %s writable by world",
pamh->include_depth, pam_trace_cname(pamh), module_so);
return (NULL);
}
/*
* Perform the dlopen()
*/
lfd = (void *)dlopen(module_so, RTLD_LAZY);
if (lfd == NULL) {
errmsg = dlerror();
__pam_log(LOG_AUTH | LOG_ERR, "open_module[%d:%s]: %s "
"failed: %s", pamh->include_depth, pam_trace_cname(pamh),
module_so, errmsg != NULL ? errmsg : "Unknown error");
return (NULL);
} else {
/* add this fd to the pam handle */
if ((module_fds = calloc(1, sizeof (fd_list))) == 0) {
(void) dlclose(lfd);
lfd = 0;
return (NULL);
}
module_fds->mh = lfd;
if (pamh->fd == 0) {
/* adding new head of list */
pamh->fd = module_fds;
} else {
/* appending to end of list */
traverse = pamh->fd;
while (traverse) {
trail = traverse;
traverse = traverse->next;
}
trail->next = module_fds;
}
}
return (lfd);
}
/*
* load_function - call dlsym() to resolve the function address
*/
static int
load_function(void *lfd, char *name, int (**func)())
{
char *errmsg = NULL;
if (lfd == NULL)
return (PAM_SYMBOL_ERR);
*func = (int (*)())dlsym(lfd, name);
if (*func == NULL) {
errmsg = dlerror();
__pam_log(LOG_AUTH | LOG_ERR, "dlsym failed %s: error %s",
name, errmsg != NULL ? errmsg : "Unknown error");
return (PAM_SYMBOL_ERR);
}
pam_trace(PAM_DEBUG_DEFAULT,
"load_function: successful load of %s", name);
return (PAM_SUCCESS);
}
/*
* Routines to read the pam.conf configuration file
*/
/*
* open_pam_conf - open the pam.conf config file
*/
static int
open_pam_conf(struct pam_fh **pam_fh, pam_handle_t *pamh, char *config)
{
struct stat64 stb;
int fd;
if ((fd = open(config, O_RDONLY)) == -1) {
__pam_log(LOG_AUTH | LOG_ALERT,
"open_pam_conf[%d:%s]: open(%s) failed: %s",
pamh->include_depth, pam_trace_cname(pamh), config,
strerror(errno));
return (0);
}
/* Check the ownership and file modes */
if (fstat64(fd, &stb) < 0) {
__pam_log(LOG_AUTH | LOG_ALERT,
"open_pam_conf[%d:%s]: stat(%s) failed: %s",
pamh->include_depth, pam_trace_cname(pamh), config,
strerror(errno));
(void) close(fd);
return (0);
}
if (stb.st_uid != (uid_t)0) {
__pam_log(LOG_AUTH | LOG_ALERT,
"open_pam_conf[%d:%s]: Owner of %s is not root",
pamh->include_depth, pam_trace_cname(pamh), config);
(void) close(fd);
return (0);
}
if (stb.st_mode & S_IWGRP) {
__pam_log(LOG_AUTH | LOG_ALERT,
"open_pam_conf[%d:%s]: %s writable by group",
pamh->include_depth, pam_trace_cname(pamh), config);
(void) close(fd);
return (0);
}
if (stb.st_mode & S_IWOTH) {
__pam_log(LOG_AUTH | LOG_ALERT,
"open_pam_conf[%d:%s]: %s writable by world",
pamh->include_depth, pam_trace_cname(pamh), config);
(void) close(fd);
return (0);
}
if ((*pam_fh = calloc(1, sizeof (struct pam_fh))) == NULL) {
(void) close(fd);
return (0);
}
(*pam_fh)->fconfig = fd;
(*pam_fh)->bufsize = (size_t)stb.st_size;
if (((*pam_fh)->data = mmap(0, (*pam_fh)->bufsize, PROT_READ,
MAP_PRIVATE, (*pam_fh)->fconfig, 0)) == MAP_FAILED) {
(void) close(fd);
free (*pam_fh);
return (0);
}
(*pam_fh)->bufferp = (*pam_fh)->data;
return (1);
}
/*
* close_pam_conf - close pam.conf
*/
static void
close_pam_conf(struct pam_fh *pam_fh)
{
(void) munmap(pam_fh->data, pam_fh->bufsize);
(void) close(pam_fh->fconfig);
free(pam_fh);
}
/*
* read_pam_conf - read in each entry in pam.conf and store info
* under the pam handle.
*/
static int
read_pam_conf(pam_handle_t *pamh, char *config)
{
struct pam_fh *pam_fh;
pamtab_t *pamentp;
pamtab_t *tpament;
char *service;
int error;
int i = pamh->include_depth; /* include depth */
/*
* service types:
* error (-1), "auth" (0), "account" (1), "session" (2), "password" (3)
*/
int service_found[PAM_NUM_MODULE_TYPES+1] = {0, 0, 0, 0, 0};
(void) pam_get_item(pamh, PAM_SERVICE, (void **)&service);
if (service == NULL || *service == '\0') {
__pam_log(LOG_AUTH | LOG_ERR, "No service name");
return (PAM_SYSTEM_ERR);
}
pamh->pam_conf_name[i] = strdup(config);
pam_trace(PAM_DEBUG_CONF, "read_pam_conf[%d:%s](%p) open(%s)",
i, pam_trace_cname(pamh), (void *)pamh, config);
if (open_pam_conf(&pam_fh, pamh, config) == 0) {
return (PAM_SYSTEM_ERR);
}
while ((error =
get_pam_conf_entry(pam_fh, pamh, &pamentp)) == PAM_SUCCESS &&
pamentp) {
/* See if entry is this service and valid */
if (verify_pam_conf(pamentp, service)) {
pam_trace(PAM_DEBUG_CONF,
"read_pam_conf[%d:%s](%p): bad entry error %s",
i, pam_trace_cname(pamh), (void *)pamh, service);
error = PAM_SYSTEM_ERR;
free_pamconf(pamentp);
goto out;
}
if (strcasecmp(pamentp->pam_service, service) == 0) {
pam_trace(PAM_DEBUG_CONF,
"read_pam_conf[%d:%s](%p): processing %s",
i, pam_trace_cname(pamh), (void *)pamh, service);
/* process first service entry */
if (service_found[pamentp->pam_type + 1] == 0) {
/* purge "other" entries */
while ((tpament = pamh->pam_conf_info[i]
[pamentp->pam_type]) != NULL) {
pam_trace(PAM_DEBUG_CONF,
"read_pam_conf(%p): purging "
"\"other\"[%d:%s][%s]",
(void *)pamh, i,
pam_trace_cname(pamh),
pam_snames[pamentp->pam_type]);
pamh->pam_conf_info[i]
[pamentp->pam_type] = tpament->next;
free_pamconf(tpament);
}
/* add first service entry */
pam_trace(PAM_DEBUG_CONF,
"read_pam_conf(%p): adding 1st "
"%s[%d:%s][%s]",
(void *)pamh, service, i,
pam_trace_cname(pamh),
pam_snames[pamentp->pam_type]);
pamh->pam_conf_info[i][pamentp->pam_type] =
pamentp;
service_found[pamentp->pam_type + 1] = 1;
} else {
/* append more service entries */
pam_trace(PAM_DEBUG_CONF,
"read_pam_conf(%p): adding more "
"%s[%d:%s][%s]",
(void *)pamh, service, i,
pam_trace_cname(pamh),
pam_snames[pamentp->pam_type]);
tpament =
pamh->pam_conf_info[i][pamentp->pam_type];
while (tpament->next != NULL) {
tpament = tpament->next;
}
tpament->next = pamentp;
}
} else if (service_found[pamentp->pam_type + 1] == 0) {
/* See if "other" entry available and valid */
if (verify_pam_conf(pamentp, "other")) {
pam_trace(PAM_DEBUG_CONF,
"read_pam_conf(%p): bad entry error %s "
"\"other\"[%d:%s]",
(void *)pamh, service, i,
pam_trace_cname(pamh));
error = PAM_SYSTEM_ERR;
free_pamconf(pamentp);
goto out;
}
if (strcasecmp(pamentp->pam_service, "other") == 0) {
pam_trace(PAM_DEBUG_CONF,
"read_pam_conf(%p): processing "
"\"other\"[%d:%s]", (void *)pamh, i,
pam_trace_cname(pamh));
if ((tpament = pamh->pam_conf_info[i]
[pamentp->pam_type]) == NULL) {
/* add first "other" entry */
pam_trace(PAM_DEBUG_CONF,
"read_pam_conf(%p): adding 1st "
"other[%d:%s][%s]", (void *)pamh, i,
pam_trace_cname(pamh),
pam_snames[pamentp->pam_type]);
pamh->pam_conf_info[i]
[pamentp->pam_type] = pamentp;
} else {
/* append more "other" entries */
pam_trace(PAM_DEBUG_CONF,
"read_pam_conf(%p): adding more "
"other[%d:%s][%s]", (void *)pamh, i,
pam_trace_cname(pamh),
pam_snames[pamentp->pam_type]);
while (tpament->next != NULL) {
tpament = tpament->next;
}
tpament->next = pamentp;
}
} else {
/* irrelevant entry */
free_pamconf(pamentp);
}
} else {
/* irrelevant entry */
free_pamconf(pamentp);
}
}
out:
(void) close_pam_conf(pam_fh);
if (error != PAM_SUCCESS)
free_pam_conf_info(pamh);
return (error);
}
/*
* get_pam_conf_entry - get a pam.conf entry
*/
static int
get_pam_conf_entry(struct pam_fh *pam_fh, pam_handle_t *pamh, pamtab_t **pam)
{
char *cp, *arg;
int argc;
char *tmp, *tmp_free;
int i;
char *current_line = NULL;
int error = PAM_SYSTEM_ERR; /* preset to error */
int err;
/* get the next line from pam.conf */
if ((cp = nextline(pam_fh, pamh, &err)) == NULL) {
/* no more lines in pam.conf ==> return */
error = PAM_SUCCESS;
*pam = NULL;
goto out;
}
if ((*pam = calloc(1, sizeof (pamtab_t))) == NULL) {
__pam_log(LOG_AUTH | LOG_ERR, "strdup: out of memory");
goto out;
}
/* copy full line for error reporting */
if ((current_line = strdup(cp)) == NULL) {
__pam_log(LOG_AUTH | LOG_ERR, "strdup: out of memory");
goto out;
}
pam_trace(PAM_DEBUG_CONF,
"pam.conf[%s] entry:\t%s", pam_trace_cname(pamh), current_line);
/* get service name (e.g. login, su, passwd) */
if ((arg = read_next_token(&cp)) == 0) {
__pam_log(LOG_AUTH | LOG_CRIT,
"illegal pam.conf[%s] entry: %s: missing SERVICE NAME",
pam_trace_cname(pamh), current_line);
goto out;
}
if (((*pam)->pam_service = strdup(arg)) == 0) {
__pam_log(LOG_AUTH | LOG_ERR, "strdup: out of memory");
goto out;
}
/* get module type (e.g. authentication, acct mgmt) */
if ((arg = read_next_token(&cp)) == 0) {
__pam_log(LOG_AUTH | LOG_CRIT,
"illegal pam.conf[%s] entry: %s: missing MODULE TYPE",
pam_trace_cname(pamh), current_line);
(*pam)->pam_type = -1; /* 0 is a valid value */
goto getflag;
}
if (strcasecmp(arg, PAM_AUTH_NAME) == 0) {
(*pam)->pam_type = PAM_AUTH_MODULE;
} else if (strcasecmp(arg, PAM_ACCOUNT_NAME) == 0) {
(*pam)->pam_type = PAM_ACCOUNT_MODULE;
} else if (strcasecmp(arg, PAM_SESSION_NAME) == 0) {
(*pam)->pam_type = PAM_SESSION_MODULE;
} else if (strcasecmp(arg, PAM_PASSWORD_NAME) == 0) {
(*pam)->pam_type = PAM_PASSWORD_MODULE;
} else {
/* error */
__pam_log(LOG_AUTH | LOG_CRIT,
"illegal pam.conf[%s] entry: %s: invalid module "
"type: %s", pam_trace_cname(pamh), current_line, arg);
(*pam)->pam_type = -1; /* 0 is a valid value */
}
getflag:
/* get pam flag (e.g., requisite, required, sufficient, optional) */
if ((arg = read_next_token(&cp)) == 0) {
__pam_log(LOG_AUTH | LOG_CRIT,
"illegal pam.conf[%s] entry: %s: missing CONTROL FLAG",
pam_trace_cname(pamh), current_line);
goto getpath;
}
if (strcasecmp(arg, PAM_BINDING_NAME) == 0) {
(*pam)->pam_flag = PAM_BINDING;
} else if (strcasecmp(arg, PAM_INCLUDE_NAME) == 0) {
(*pam)->pam_flag = PAM_INCLUDE;
} else if (strcasecmp(arg, PAM_OPTIONAL_NAME) == 0) {
(*pam)->pam_flag = PAM_OPTIONAL;
} else if (strcasecmp(arg, PAM_REQUIRED_NAME) == 0) {
(*pam)->pam_flag = PAM_REQUIRED;
} else if (strcasecmp(arg, PAM_REQUISITE_NAME) == 0) {
(*pam)->pam_flag = PAM_REQUISITE;
} else if (strcasecmp(arg, PAM_SUFFICIENT_NAME) == 0) {
(*pam)->pam_flag = PAM_SUFFICIENT;
} else {
/* error */
__pam_log(LOG_AUTH | LOG_CRIT,
"illegal pam.conf[%s] entry: %s",
pam_trace_cname(pamh), current_line);
__pam_log(LOG_AUTH | LOG_CRIT,
"\tinvalid control flag: %s", arg);
}
getpath:
/* get module path (e.g. /usr/lib/security/pam_unix_auth.so.1) */
if ((arg = read_next_token(&cp)) == 0) {
__pam_log(LOG_AUTH | LOG_CRIT,
"illegal pam.conf[%s] entry: %s: missing MODULE PATH",
pam_trace_cname(pamh), current_line);
error = PAM_SUCCESS; /* success */
goto out;
}
if (arg[0] != '/') {
size_t len;
/*
* If module path does not start with "/", then
* prepend PAM_LIB_DIR (/usr/lib/security/).
*/
/* sizeof (PAM_LIB_DIR) has room for '\0' */
len = sizeof (PAM_LIB_DIR) + sizeof (PAM_ISA_DIR) + strlen(arg);
if (((*pam)->module_path = malloc(len)) == NULL) {
__pam_log(LOG_AUTH | LOG_ERR, "strdup: out of memory");
goto out;
}
if ((*pam)->pam_flag & PAM_INCLUDE) {
(void) snprintf((*pam)->module_path, len, "%s%s",
PAM_LIB_DIR, arg);
} else {
(void) snprintf((*pam)->module_path, len, "%s%s%s",
PAM_LIB_DIR, PAM_ISA_DIR, arg);
}
} else {
/* Full path provided for module */
char *isa;
/* Check for Instruction Set Architecture indicator */
if ((isa = strstr(arg, PAM_ISA)) != NULL) {
size_t len;
len = strlen(arg) - (sizeof (PAM_ISA)-1) +
sizeof (PAM_ISA_DIR);
/* substitute the architecture dependent path */
if (((*pam)->module_path = malloc(len)) == NULL) {
__pam_log(LOG_AUTH | LOG_ERR,
"strdup: out of memory");
goto out;
}
*isa = '\000';
isa += strlen(PAM_ISA);
(void) snprintf((*pam)->module_path, len, "%s%s%s",
arg, PAM_ISA_DIR, isa);
} else if (((*pam)->module_path = strdup(arg)) == 0) {
__pam_log(LOG_AUTH | LOG_ERR, "strdup: out of memory");
goto out;
}
}
/* count the number of module-specific options first */
argc = 0;
if ((tmp = strdup(cp)) == NULL) {
__pam_log(LOG_AUTH | LOG_ERR, "strdup: out of memory");
goto out;
}
tmp_free = tmp;
for (arg = read_next_token(&tmp); arg; arg = read_next_token(&tmp))
argc++;
free(tmp_free);
/* allocate array for the module-specific options */
if (argc > 0) {
if (((*pam)->module_argv =
calloc(argc+1, sizeof (char *))) == 0) {
__pam_log(LOG_AUTH | LOG_ERR, "calloc: out of memory");
goto out;
}
i = 0;
for (arg = read_next_token(&cp); arg;
arg = read_next_token(&cp)) {
(*pam)->module_argv[i] = strdup(arg);
if ((*pam)->module_argv[i] == NULL) {
__pam_log(LOG_AUTH | LOG_ERR, "strdup failed");
goto out;
}
i++;
}
(*pam)->module_argv[argc] = NULL;
}
(*pam)->module_argc = argc;
error = PAM_SUCCESS; /* success */
(*pam)->pam_err = err; /* was the line truncated */
out:
if (current_line)
free(current_line);
if (error != PAM_SUCCESS) {
/* on error free this */
if (*pam)
free_pamconf(*pam);
}
return (error);
}
/*
* read_next_token - skip tab and space characters and return the next token
*/
static char *
read_next_token(char **cpp)
{
register char *cp = *cpp;
char *start;
if (cp == (char *)0) {
*cpp = (char *)0;
return ((char *)0);
}
while (*cp == ' ' || *cp == '\t')
cp++;
if (*cp == '\0') {
*cpp = (char *)0;
return ((char *)0);
}
start = cp;
while (*cp && *cp != ' ' && *cp != '\t')
cp++;
if (*cp != '\0')
*cp++ = '\0';
*cpp = cp;
return (start);
}
static char *
pam_conf_strnchr(char *sp, int c, intptr_t count)
{
while (count) {
if (*sp == (char)c)
return ((char *)sp);
else {
sp++;
count--;
}
};
return (NULL);
}
/*
* nextline - skip all blank lines and comments
*/
static char *
nextline(struct pam_fh *pam_fh, pam_handle_t *pamh, int *err)
{
char *ll;
int find_a_line = 0;
char *data = pam_fh->data;
char *bufferp = pam_fh->bufferp;
char *bufferendp = &data[pam_fh->bufsize];
size_t input_len;
/*
* Skip the blank line, comment line
*/
while (!find_a_line) {
/* if we are at the end of the buffer, there is no next line */
if (bufferp == bufferendp)
return (NULL);
/* skip blank line */
while (*bufferp == '\n') {
/*
* If we are at the end of the buffer, there is
* no next line.
*/
if (++bufferp == bufferendp) {
return (NULL);
}
/* else we check *bufferp again */
}
/* skip comment line */
while (*bufferp == '#') {
if ((ll = pam_conf_strnchr(bufferp, '\n',
bufferendp - bufferp)) != NULL) {
bufferp = ll;
} else {
/*
* this comment line the last line.
* no next line
*/
return (NULL);
}
/*
* If we are at the end of the buffer, there is
* no next line.
*/
if (bufferp == bufferendp) {
return (NULL);
}
}
if ((*bufferp != '\n') && (*bufferp != '#')) {
find_a_line = 1;
}
}
*err = PAM_SUCCESS;
/* now we find one line */
if ((ll = pam_conf_strnchr(bufferp, '\n', bufferendp - bufferp))
!= NULL) {
if ((input_len = ll - bufferp) >= sizeof (pam_fh->line)) {
__pam_log(LOG_AUTH | LOG_ERR,
"nextline[%d:%s]: pam.conf line too long %.256s",
pamh->include_depth, pam_trace_cname(pamh),
bufferp);
input_len = sizeof (pam_fh->line) - 1;
*err = PAM_SERVICE_ERR;
}
(void) strncpy(pam_fh->line, bufferp, input_len);
pam_fh->line[input_len] = '\0';
pam_fh->bufferp = ll++;
} else {
ll = bufferendp;
if ((input_len = ll - bufferp) >= sizeof (pam_fh->line)) {
__pam_log(LOG_AUTH | LOG_ERR,
"nextline[%d:%s]: pam.conf line too long %.256s",
pamh->include_depth, pam_trace_cname(pamh),
bufferp);
input_len = sizeof (pam_fh->line) - 1;
*err = PAM_SERVICE_ERR;
}
(void) strncpy(pam_fh->line, bufferp, input_len);
pam_fh->line[input_len] = '\0';
pam_fh->bufferp = ll;
}
return (pam_fh->line);
}
/*
* verify_pam_conf - verify that the pam_conf entry is filled in.
*
* True = Error if there is no service.
* True = Error if there is a service and it matches the requested service
* but, the type, flag, line overflow, or path is in error.
*/
static int
verify_pam_conf(pamtab_t *pam, char *service)
{
return ((pam->pam_service == (char *)NULL) ||
((strcasecmp(pam->pam_service, service) == 0) &&
((pam->pam_type == -1) ||
(pam->pam_flag == 0) ||
(pam->pam_err != PAM_SUCCESS) ||
(pam->module_path == (char *)NULL))));
}
/*
* Routines to free allocated storage
*/
/*
* clean_up - free allocated storage in the pam handle
*/
static void
clean_up(pam_handle_t *pamh)
{
int i;
pam_repository_t *auth_rep;
if (pamh) {
while (pamh->include_depth >= 0) {
free_pam_conf_info(pamh);
pamh->include_depth--;
}
/* Cleanup PAM_REPOSITORY structure */
auth_rep = pamh->ps_item[PAM_REPOSITORY].pi_addr;
if (auth_rep != NULL) {
if (auth_rep->type != NULL)
free(auth_rep->type);
if (auth_rep->scope != NULL)
free(auth_rep->scope);
}
for (i = 0; i < PAM_MAX_ITEMS; i++) {
if (pamh->ps_item[i].pi_addr != NULL) {
if (i == PAM_AUTHTOK || i == PAM_OLDAUTHTOK) {
(void) memset(pamh->ps_item[i].pi_addr,
0, pamh->ps_item[i].pi_size);
}
free(pamh->ps_item[i].pi_addr);
}
}
free(pamh);
}
}
/*
* free_pamconf - free memory used to store pam.conf entry
*/
static void
free_pamconf(pamtab_t *cp)
{
int i;
if (cp) {
if (cp->pam_service)
free(cp->pam_service);
if (cp->module_path)
free(cp->module_path);
for (i = 0; i < cp->module_argc; i++) {
if (cp->module_argv[i])
free(cp->module_argv[i]);
}
if (cp->module_argc > 0)
free(cp->module_argv);
if (cp->function_ptr)
free(cp->function_ptr);
free(cp);
}
}
/*
* free_pam_conf_info - free memory used to store all pam.conf info
* under the pam handle
*/
static void
free_pam_conf_info(pam_handle_t *pamh)
{
pamtab_t *pamentp;
pamtab_t *pament_trail;
int i = pamh->include_depth;
int j;
for (j = 0; j < PAM_NUM_MODULE_TYPES; j++) {
pamentp = pamh->pam_conf_info[i][j];
pamh->pam_conf_info[i][j] = NULL;
pament_trail = pamentp;
while (pamentp) {
pamentp = pamentp->next;
free_pamconf(pament_trail);
pament_trail = pamentp;
}
}
if (pamh->pam_conf_name[i] != NULL) {
free(pamh->pam_conf_name[i]);
pamh->pam_conf_name[i] = NULL;
}
}
static void
free_env(env_list *pam_env)
{
if (pam_env) {
if (pam_env->name)
free(pam_env->name);
if (pam_env->value)
free(pam_env->value);
free(pam_env);
}
}
/*
* Internal convenience functions for Solaris PAM service modules.
*/
#include <libintl.h>
#include <nl_types.h>
#include <synch.h>
#include <locale.h>
#include <thread.h>
typedef struct pam_msg_data {
nl_catd fd;
} pam_msg_data_t;
/*
* free_resp():
* free storage for responses used in the call back "pam_conv" functions
*/
void
free_resp(int num_msg, struct pam_response *resp)
{
int i;
struct pam_response *r;
if (resp) {
r = resp;
for (i = 0; i < num_msg; i++, r++) {
if (r->resp) {
/* clear before freeing -- may be a password */
bzero(r->resp, strlen(r->resp));
free(r->resp);
r->resp = NULL;
}
}
free(resp);
}
}
static int
do_conv(pam_handle_t *pamh, int msg_style, int num_msg,
char messages[PAM_MAX_NUM_MSG][PAM_MAX_MSG_SIZE], void *conv_apdp,
struct pam_response *ret_respp[])
{
struct pam_message *msg;
struct pam_message *m;
int i;
int k;
int retcode;
struct pam_conv *pam_convp;
if ((retcode = pam_get_item(pamh, PAM_CONV,
(void **)&pam_convp)) != PAM_SUCCESS) {
return (retcode);
}
/*
* When pam_set_item() is called to set PAM_CONV and the
* item is NULL, memset(pip->pi_addr, 0, size) is called.
* So at this point, we should check whether pam_convp->conv
* is NULL or not.
*/
if ((pam_convp == NULL) || (pam_convp->conv == NULL))
return (PAM_SYSTEM_ERR);
i = 0;
k = num_msg;
msg = calloc(num_msg, sizeof (struct pam_message));
if (msg == NULL) {
return (PAM_BUF_ERR);
}
m = msg;
while (k--) {
/*
* fill out the message structure to display prompt message
*/
m->msg_style = msg_style;
m->msg = messages[i];
pam_trace(PAM_DEBUG_CONV,
"pam_conv_msg(%p:%d[%d]=%s)",
(void *)pamh, msg_style, i, messages[i]);
m++;
i++;
}
/*
* The UNIX pam modules always calls __pam_get_authtok() and
* __pam_display_msg() with a NULL pointer as the conv_apdp.
* In case the conv_apdp is NULL and the pam_convp->appdata_ptr
* is not NULL, we should pass the pam_convp->appdata_ptr
* to the conversation function.
*/
if (conv_apdp == NULL && pam_convp->appdata_ptr != NULL)
conv_apdp = pam_convp->appdata_ptr;
/*
* Call conv function to display the prompt.
*/
retcode = (pam_convp->conv)(num_msg, &msg, ret_respp, conv_apdp);
pam_trace(PAM_DEBUG_CONV,
"pam_conv_resp(%p pam_conv = %s) ret_respp = %p",
(void *)pamh, pam_strerror(pamh, retcode), (void *)ret_respp);
if (*ret_respp == NULL) {
pam_trace(PAM_DEBUG_CONV,
"pam_conv_resp(%p No response requested)", (void *)pamh);
} else if ((pam_debug & (PAM_DEBUG_CONV | PAM_DEBUG_AUTHTOK)) != 0) {
struct pam_response *r = *ret_respp;
for (i = 0; i < num_msg; i++, r++) {
if (r->resp == NULL) {
pam_trace(PAM_DEBUG_CONV,
"pam_conv_resp(%p:"
"[%d] NULL response string)",
(void *)pamh, i);
} else {
if (msg_style == PAM_PROMPT_ECHO_OFF) {
#ifdef DEBUG
pam_trace(PAM_DEBUG_AUTHTOK,
"pam_conv_resp(%p:[%d]=%s, "
"code=%d)",
(void *)pamh, i, r->resp,
r->resp_retcode);
#endif /* DEBUG */
pam_trace(PAM_DEBUG_CONV,
"pam_conv_resp(%p:[%d] len=%lu, "
"code=%d)",
(void *)pamh, i,
(ulong_t)strlen(r->resp),
r->resp_retcode);
} else {
pam_trace(PAM_DEBUG_CONV,
"pam_conv_resp(%p:[%d]=%s, "
"code=%d)",
(void *)pamh, i, r->resp,
r->resp_retcode);
}
}
}
}
if (msg)
free(msg);
return (retcode);
}
/*
* __pam_display_msg():
* display message by calling the call back functions
* provided by the application through "pam_conv" structure
*/
int
__pam_display_msg(pam_handle_t *pamh, int msg_style, int num_msg,
char messages[PAM_MAX_NUM_MSG][PAM_MAX_MSG_SIZE], void *conv_apdp)
{
struct pam_response *ret_respp = NULL;
int ret;
ret = do_conv(pamh, msg_style, num_msg, messages,
conv_apdp, &ret_respp);
if (ret_respp != NULL)
free_resp(num_msg, ret_respp);
return (ret);
}
/*
* __pam_get_authtok()
* retrieves a password of at most PASS_MAX length from the pam
* handle (pam_get_item) or from the input stream (do_conv).
*
* This function allocates memory for the new authtok.
* Applications calling this function are responsible for
* freeing this memory.
*
* If "source" is
* PAM_HANDLE
* and "type" is:
* PAM_AUTHTOK - password is taken from pam handle (PAM_AUTHTOK)
* PAM_OLDAUTHTOK - password is taken from pam handle (PAM_OLDAUTHTOK)
*
* If "source" is
* PAM_PROMPT
* and "type" is:
* 0: Prompt for new passwd, do not even attempt
* to store it in the pam handle.
* PAM_AUTHTOK: Prompt for new passwd, store in pam handle as
* PAM_AUTHTOK item if this value is not already set.
* PAM_OLDAUTHTOK: Prompt for new passwd, store in pam handle as
* PAM_OLDAUTHTOK item if this value is not
* already set.
*/
int
__pam_get_authtok(pam_handle_t *pamh, int source, int type, char *prompt,
char **authtok)
{
int error = PAM_SYSTEM_ERR;
char *new_password = NULL;
struct pam_response *ret_resp = NULL;
char messages[PAM_MAX_NUM_MSG][PAM_MAX_MSG_SIZE];
if ((*authtok = calloc(PASS_MAX+1, sizeof (char))) == NULL)
return (PAM_BUF_ERR);
if (prompt == NULL)
prompt = dgettext(TEXT_DOMAIN, "password: ");
switch (source) {
case PAM_HANDLE:
/* get password from pam handle item list */
switch (type) {
case PAM_AUTHTOK:
case PAM_OLDAUTHTOK:
if ((error = pam_get_item(pamh, type,
(void **)&new_password)) != PAM_SUCCESS)
goto err_ret;
if (new_password == NULL || new_password[0] == '\0') {
free(*authtok);
*authtok = NULL;
} else {
(void) strlcpy(*authtok, new_password,
PASS_MAX+1);
}
break;
default:
__pam_log(LOG_AUTH | LOG_ERR,
"__pam_get_authtok() invalid type: %d", type);
error = PAM_SYMBOL_ERR;
goto err_ret;
}
break;
case PAM_PROMPT:
/*
* Prompt for new password and save in pam handle item list
* if the that item is not already set.
*/
(void) strncpy(messages[0], prompt, sizeof (messages[0]));
if ((error = do_conv(pamh, PAM_PROMPT_ECHO_OFF, 1, messages,
NULL, &ret_resp)) != PAM_SUCCESS)
goto err_ret;
if (ret_resp->resp == NULL) {
/* getpass didn't return anything */
error = PAM_SYSTEM_ERR;
goto err_ret;
}
/* save the new password if this item was NULL */
if (type) {
if ((error = pam_get_item(pamh, type,
(void **)&new_password)) != PAM_SUCCESS) {
free_resp(1, ret_resp);
goto err_ret;
}
if (new_password == NULL)
(void) pam_set_item(pamh, type, ret_resp->resp);
}
(void) strlcpy(*authtok, ret_resp->resp, PASS_MAX+1);
free_resp(1, ret_resp);
break;
default:
__pam_log(LOG_AUTH | LOG_ERR,
"__pam_get_authtok() invalid source: %d", source);
error = PAM_SYMBOL_ERR;
goto err_ret;
}
return (PAM_SUCCESS);
err_ret:
bzero(*authtok, PASS_MAX+1);
free(*authtok);
*authtok = NULL;
return (error);
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_4424_0 |
crossvul-cpp_data_good_998_1 | /*
* Marvell Wireless LAN device driver: AP specific command handling
*
* Copyright (C) 2012-2014, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "main.h"
#include "11ac.h"
#include "11n.h"
/* This function parses security related parameters from cfg80211_ap_settings
* and sets into FW understandable bss_config structure.
*/
int mwifiex_set_secure_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_config,
struct cfg80211_ap_settings *params) {
int i;
struct mwifiex_wep_key wep_key;
if (!params->privacy) {
bss_config->protocol = PROTOCOL_NO_SECURITY;
bss_config->key_mgmt = KEY_MGMT_NONE;
bss_config->wpa_cfg.length = 0;
priv->sec_info.wep_enabled = 0;
priv->sec_info.wpa_enabled = 0;
priv->sec_info.wpa2_enabled = 0;
return 0;
}
switch (params->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
bss_config->auth_mode = WLAN_AUTH_OPEN;
break;
case NL80211_AUTHTYPE_SHARED_KEY:
bss_config->auth_mode = WLAN_AUTH_SHARED_KEY;
break;
case NL80211_AUTHTYPE_NETWORK_EAP:
bss_config->auth_mode = WLAN_AUTH_LEAP;
break;
default:
bss_config->auth_mode = MWIFIEX_AUTH_MODE_AUTO;
break;
}
bss_config->key_mgmt_operation |= KEY_MGMT_ON_HOST;
for (i = 0; i < params->crypto.n_akm_suites; i++) {
switch (params->crypto.akm_suites[i]) {
case WLAN_AKM_SUITE_8021X:
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_1) {
bss_config->protocol = PROTOCOL_WPA;
bss_config->key_mgmt = KEY_MGMT_EAP;
}
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_2) {
bss_config->protocol |= PROTOCOL_WPA2;
bss_config->key_mgmt = KEY_MGMT_EAP;
}
break;
case WLAN_AKM_SUITE_PSK:
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_1) {
bss_config->protocol = PROTOCOL_WPA;
bss_config->key_mgmt = KEY_MGMT_PSK;
}
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_2) {
bss_config->protocol |= PROTOCOL_WPA2;
bss_config->key_mgmt = KEY_MGMT_PSK;
}
break;
default:
break;
}
}
for (i = 0; i < params->crypto.n_ciphers_pairwise; i++) {
switch (params->crypto.ciphers_pairwise[i]) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
break;
case WLAN_CIPHER_SUITE_TKIP:
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
bss_config->wpa_cfg.pairwise_cipher_wpa |=
CIPHER_TKIP;
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
CIPHER_TKIP;
break;
case WLAN_CIPHER_SUITE_CCMP:
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
bss_config->wpa_cfg.pairwise_cipher_wpa |=
CIPHER_AES_CCMP;
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
CIPHER_AES_CCMP;
default:
break;
}
}
switch (params->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
if (priv->sec_info.wep_enabled) {
bss_config->protocol = PROTOCOL_STATIC_WEP;
bss_config->key_mgmt = KEY_MGMT_NONE;
bss_config->wpa_cfg.length = 0;
for (i = 0; i < NUM_WEP_KEYS; i++) {
wep_key = priv->wep_key[i];
bss_config->wep_cfg[i].key_index = i;
if (priv->wep_key_curr_index == i)
bss_config->wep_cfg[i].is_default = 1;
else
bss_config->wep_cfg[i].is_default = 0;
bss_config->wep_cfg[i].length =
wep_key.key_length;
memcpy(&bss_config->wep_cfg[i].key,
&wep_key.key_material,
wep_key.key_length);
}
}
break;
case WLAN_CIPHER_SUITE_TKIP:
bss_config->wpa_cfg.group_cipher = CIPHER_TKIP;
break;
case WLAN_CIPHER_SUITE_CCMP:
bss_config->wpa_cfg.group_cipher = CIPHER_AES_CCMP;
break;
default:
break;
}
return 0;
}
/* This function updates 11n related parameters from IE and sets them into
* bss_config structure.
*/
void
mwifiex_set_ht_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *ht_ie;
if (!ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
return;
ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
params->beacon.tail_len);
if (ht_ie) {
memcpy(&bss_cfg->ht_cap, ht_ie + 2,
sizeof(struct ieee80211_ht_cap));
priv->ap_11n_enabled = 1;
} else {
memset(&bss_cfg->ht_cap, 0, sizeof(struct ieee80211_ht_cap));
bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
bss_cfg->ht_cap.ampdu_params_info = MWIFIEX_DEF_AMPDU;
}
return;
}
/* This function updates 11ac related parameters from IE
* and sets them into bss_config structure.
*/
void mwifiex_set_vht_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *vht_ie;
vht_ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, params->beacon.tail,
params->beacon.tail_len);
if (vht_ie) {
memcpy(&bss_cfg->vht_cap, vht_ie + 2,
sizeof(struct ieee80211_vht_cap));
priv->ap_11ac_enabled = 1;
} else {
priv->ap_11ac_enabled = 0;
}
return;
}
/* This function updates 11ac related parameters from IE
* and sets them into bss_config structure.
*/
void mwifiex_set_tpc_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *tpc_ie;
tpc_ie = cfg80211_find_ie(WLAN_EID_TPC_REQUEST, params->beacon.tail,
params->beacon.tail_len);
if (tpc_ie)
bss_cfg->power_constraint = *(tpc_ie + 2);
else
bss_cfg->power_constraint = 0;
}
/* Enable VHT only when cfg80211_ap_settings has VHT IE.
* Otherwise disable VHT.
*/
void mwifiex_set_vht_width(struct mwifiex_private *priv,
enum nl80211_chan_width width,
bool ap_11ac_enable)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_11ac_vht_cfg vht_cfg;
vht_cfg.band_config = VHT_CFG_5GHZ;
vht_cfg.cap_info = adapter->hw_dot_11ac_dev_cap;
if (!ap_11ac_enable) {
vht_cfg.mcs_tx_set = DISABLE_VHT_MCS_SET;
vht_cfg.mcs_rx_set = DISABLE_VHT_MCS_SET;
} else {
vht_cfg.mcs_tx_set = DEFAULT_VHT_MCS_SET;
vht_cfg.mcs_rx_set = DEFAULT_VHT_MCS_SET;
}
vht_cfg.misc_config = VHT_CAP_UAP_ONLY;
if (ap_11ac_enable && width >= NL80211_CHAN_WIDTH_80)
vht_cfg.misc_config |= VHT_BW_80_160_80P80;
mwifiex_send_cmd(priv, HostCmd_CMD_11AC_CFG,
HostCmd_ACT_GEN_SET, 0, &vht_cfg, true);
return;
}
/* This function finds supported rates IE from beacon parameter and sets
* these rates into bss_config structure.
*/
void
mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
struct ieee_types_header *rate_ie;
int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
const u8 *var_pos = params->beacon.head + var_offset;
int len = params->beacon.head_len - var_offset;
u8 rate_len = 0;
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
if (rate_ie) {
if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
return;
memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
rate_len = rate_ie->len;
}
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
params->beacon.tail,
params->beacon.tail_len);
if (rate_ie) {
if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
return;
memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
}
return;
}
/* This function initializes some of mwifiex_uap_bss_param variables.
* This helps FW in ignoring invalid values. These values may or may not
* be get updated to valid ones at later stage.
*/
void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config)
{
config->bcast_ssid_ctl = 0x7F;
config->radio_ctl = 0x7F;
config->dtim_period = 0x7F;
config->beacon_period = 0x7FFF;
config->auth_mode = 0x7F;
config->rts_threshold = 0x7FFF;
config->frag_threshold = 0x7FFF;
config->retry_limit = 0x7F;
config->qos_info = 0xFF;
}
/* This function parses BSS related parameters from structure
* and prepares TLVs specific to WPA/WPA2 security.
* These TLVs are appended to command buffer.
*/
static void
mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
{
struct host_cmd_tlv_pwk_cipher *pwk_cipher;
struct host_cmd_tlv_gwk_cipher *gwk_cipher;
struct host_cmd_tlv_passphrase *passphrase;
struct host_cmd_tlv_akmp *tlv_akmp;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
u16 cmd_size = *param_size;
u8 *tlv = *tlv_buf;
tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
tlv_akmp->header.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
tlv_akmp->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
sizeof(struct mwifiex_ie_types_header));
tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation);
tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
cmd_size += sizeof(struct host_cmd_tlv_akmp);
tlv += sizeof(struct host_cmd_tlv_akmp);
if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) {
pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
pwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
sizeof(struct mwifiex_ie_types_header));
pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa;
cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
}
if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) {
pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
pwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
sizeof(struct mwifiex_ie_types_header));
pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
}
if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
gwk_cipher->header.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
gwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) -
sizeof(struct mwifiex_ie_types_header));
gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
}
if (bss_cfg->wpa_cfg.length) {
passphrase = (struct host_cmd_tlv_passphrase *)tlv;
passphrase->header.type =
cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
passphrase->header.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase,
bss_cfg->wpa_cfg.length);
cmd_size += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->wpa_cfg.length;
tlv += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->wpa_cfg.length;
}
*param_size = cmd_size;
*tlv_buf = tlv;
return;
}
/* This function parses WMM related parameters from cfg80211_ap_settings
* structure and updates bss_config structure.
*/
void
mwifiex_set_wmm_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *vendor_ie;
const u8 *wmm_ie;
u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02};
vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
params->beacon.tail,
params->beacon.tail_len);
if (vendor_ie) {
wmm_ie = vendor_ie;
if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
return;
memcpy(&bss_cfg->wmm_info, wmm_ie +
sizeof(struct ieee_types_header), *(wmm_ie + 1));
priv->wmm_enabled = 1;
} else {
memset(&bss_cfg->wmm_info, 0, sizeof(bss_cfg->wmm_info));
memcpy(&bss_cfg->wmm_info.oui, wmm_oui, sizeof(wmm_oui));
bss_cfg->wmm_info.subtype = MWIFIEX_WMM_SUBTYPE;
bss_cfg->wmm_info.version = MWIFIEX_WMM_VERSION;
priv->wmm_enabled = 0;
}
bss_cfg->qos_info = 0x00;
return;
}
/* This function parses BSS related parameters from structure
* and prepares TLVs specific to WEP encryption.
* These TLVs are appended to command buffer.
*/
static void
mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
{
struct host_cmd_tlv_wep_key *wep_key;
u16 cmd_size = *param_size;
int i;
u8 *tlv = *tlv_buf;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
for (i = 0; i < NUM_WEP_KEYS; i++) {
if (bss_cfg->wep_cfg[i].length &&
(bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 ||
bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) {
wep_key = (struct host_cmd_tlv_wep_key *)tlv;
wep_key->header.type =
cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
wep_key->header.len =
cpu_to_le16(bss_cfg->wep_cfg[i].length + 2);
wep_key->key_index = bss_cfg->wep_cfg[i].key_index;
wep_key->is_default = bss_cfg->wep_cfg[i].is_default;
memcpy(wep_key->key, bss_cfg->wep_cfg[i].key,
bss_cfg->wep_cfg[i].length);
cmd_size += sizeof(struct mwifiex_ie_types_header) + 2 +
bss_cfg->wep_cfg[i].length;
tlv += sizeof(struct mwifiex_ie_types_header) + 2 +
bss_cfg->wep_cfg[i].length;
}
}
*param_size = cmd_size;
*tlv_buf = tlv;
return;
}
/* This function enable 11D if userspace set the country IE.
*/
void mwifiex_config_uap_11d(struct mwifiex_private *priv,
struct cfg80211_beacon_data *beacon_data)
{
enum state_11d_t state_11d;
const u8 *country_ie;
country_ie = cfg80211_find_ie(WLAN_EID_COUNTRY, beacon_data->tail,
beacon_data->tail_len);
if (country_ie) {
/* Send cmd to FW to enable 11D function */
state_11d = ENABLE_11D;
if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
HostCmd_ACT_GEN_SET, DOT11D_I,
&state_11d, true)) {
mwifiex_dbg(priv->adapter, ERROR,
"11D: failed to enable 11D\n");
}
}
}
/* This function parses BSS related parameters from structure
* and prepares TLVs. These TLVs are appended to command buffer.
*/
static int
mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
{
struct host_cmd_tlv_dtim_period *dtim_period;
struct host_cmd_tlv_beacon_period *beacon_period;
struct host_cmd_tlv_ssid *ssid;
struct host_cmd_tlv_bcast_ssid *bcast_ssid;
struct host_cmd_tlv_channel_band *chan_band;
struct host_cmd_tlv_frag_threshold *frag_threshold;
struct host_cmd_tlv_rts_threshold *rts_threshold;
struct host_cmd_tlv_retry_limit *retry_limit;
struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
struct host_cmd_tlv_auth_type *auth_type;
struct host_cmd_tlv_rates *tlv_rates;
struct host_cmd_tlv_ageout_timer *ao_timer, *ps_ao_timer;
struct host_cmd_tlv_power_constraint *pwr_ct;
struct mwifiex_ie_types_htcap *htcap;
struct mwifiex_ie_types_wmmcap *wmm_cap;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
int i;
u16 cmd_size = *param_size;
if (bss_cfg->ssid.ssid_len) {
ssid = (struct host_cmd_tlv_ssid *)tlv;
ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
ssid->header.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len);
memcpy(ssid->ssid, bss_cfg->ssid.ssid, bss_cfg->ssid.ssid_len);
cmd_size += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->ssid.ssid_len;
tlv += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->ssid.ssid_len;
bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
bcast_ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
bcast_ssid->header.len =
cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
}
if (bss_cfg->rates[0]) {
tlv_rates = (struct host_cmd_tlv_rates *)tlv;
tlv_rates->header.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i];
i++)
tlv_rates->rates[i] = bss_cfg->rates[i];
tlv_rates->header.len = cpu_to_le16(i);
cmd_size += sizeof(struct host_cmd_tlv_rates) + i;
tlv += sizeof(struct host_cmd_tlv_rates) + i;
}
if (bss_cfg->channel &&
(((bss_cfg->band_cfg & BIT(0)) == BAND_CONFIG_BG &&
bss_cfg->channel <= MAX_CHANNEL_BAND_BG) ||
((bss_cfg->band_cfg & BIT(0)) == BAND_CONFIG_A &&
bss_cfg->channel <= MAX_CHANNEL_BAND_A))) {
chan_band = (struct host_cmd_tlv_channel_band *)tlv;
chan_band->header.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
chan_band->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_channel_band) -
sizeof(struct mwifiex_ie_types_header));
chan_band->band_config = bss_cfg->band_cfg;
chan_band->channel = bss_cfg->channel;
cmd_size += sizeof(struct host_cmd_tlv_channel_band);
tlv += sizeof(struct host_cmd_tlv_channel_band);
}
if (bss_cfg->beacon_period >= MIN_BEACON_PERIOD &&
bss_cfg->beacon_period <= MAX_BEACON_PERIOD) {
beacon_period = (struct host_cmd_tlv_beacon_period *)tlv;
beacon_period->header.type =
cpu_to_le16(TLV_TYPE_UAP_BEACON_PERIOD);
beacon_period->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_beacon_period) -
sizeof(struct mwifiex_ie_types_header));
beacon_period->period = cpu_to_le16(bss_cfg->beacon_period);
cmd_size += sizeof(struct host_cmd_tlv_beacon_period);
tlv += sizeof(struct host_cmd_tlv_beacon_period);
}
if (bss_cfg->dtim_period >= MIN_DTIM_PERIOD &&
bss_cfg->dtim_period <= MAX_DTIM_PERIOD) {
dtim_period = (struct host_cmd_tlv_dtim_period *)tlv;
dtim_period->header.type =
cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD);
dtim_period->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_dtim_period) -
sizeof(struct mwifiex_ie_types_header));
dtim_period->period = bss_cfg->dtim_period;
cmd_size += sizeof(struct host_cmd_tlv_dtim_period);
tlv += sizeof(struct host_cmd_tlv_dtim_period);
}
if (bss_cfg->rts_threshold <= MWIFIEX_RTS_MAX_VALUE) {
rts_threshold = (struct host_cmd_tlv_rts_threshold *)tlv;
rts_threshold->header.type =
cpu_to_le16(TLV_TYPE_UAP_RTS_THRESHOLD);
rts_threshold->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_rts_threshold) -
sizeof(struct mwifiex_ie_types_header));
rts_threshold->rts_thr = cpu_to_le16(bss_cfg->rts_threshold);
cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
tlv += sizeof(struct host_cmd_tlv_frag_threshold);
}
if ((bss_cfg->frag_threshold >= MWIFIEX_FRAG_MIN_VALUE) &&
(bss_cfg->frag_threshold <= MWIFIEX_FRAG_MAX_VALUE)) {
frag_threshold = (struct host_cmd_tlv_frag_threshold *)tlv;
frag_threshold->header.type =
cpu_to_le16(TLV_TYPE_UAP_FRAG_THRESHOLD);
frag_threshold->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_frag_threshold) -
sizeof(struct mwifiex_ie_types_header));
frag_threshold->frag_thr = cpu_to_le16(bss_cfg->frag_threshold);
cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
tlv += sizeof(struct host_cmd_tlv_frag_threshold);
}
if (bss_cfg->retry_limit <= MWIFIEX_RETRY_LIMIT) {
retry_limit = (struct host_cmd_tlv_retry_limit *)tlv;
retry_limit->header.type =
cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT);
retry_limit->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_retry_limit) -
sizeof(struct mwifiex_ie_types_header));
retry_limit->limit = (u8)bss_cfg->retry_limit;
cmd_size += sizeof(struct host_cmd_tlv_retry_limit);
tlv += sizeof(struct host_cmd_tlv_retry_limit);
}
if ((bss_cfg->protocol & PROTOCOL_WPA) ||
(bss_cfg->protocol & PROTOCOL_WPA2) ||
(bss_cfg->protocol & PROTOCOL_EAP))
mwifiex_uap_bss_wpa(&tlv, cmd_buf, &cmd_size);
else
mwifiex_uap_bss_wep(&tlv, cmd_buf, &cmd_size);
if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) ||
(bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) {
auth_type = (struct host_cmd_tlv_auth_type *)tlv;
auth_type->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
auth_type->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_auth_type) -
sizeof(struct mwifiex_ie_types_header));
auth_type->auth_type = (u8)bss_cfg->auth_mode;
cmd_size += sizeof(struct host_cmd_tlv_auth_type);
tlv += sizeof(struct host_cmd_tlv_auth_type);
}
if (bss_cfg->protocol) {
encrypt_protocol = (struct host_cmd_tlv_encrypt_protocol *)tlv;
encrypt_protocol->header.type =
cpu_to_le16(TLV_TYPE_UAP_ENCRY_PROTOCOL);
encrypt_protocol->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_encrypt_protocol)
- sizeof(struct mwifiex_ie_types_header));
encrypt_protocol->proto = cpu_to_le16(bss_cfg->protocol);
cmd_size += sizeof(struct host_cmd_tlv_encrypt_protocol);
tlv += sizeof(struct host_cmd_tlv_encrypt_protocol);
}
if (bss_cfg->ht_cap.cap_info) {
htcap = (struct mwifiex_ie_types_htcap *)tlv;
htcap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
htcap->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
htcap->ht_cap.cap_info = bss_cfg->ht_cap.cap_info;
htcap->ht_cap.ampdu_params_info =
bss_cfg->ht_cap.ampdu_params_info;
memcpy(&htcap->ht_cap.mcs, &bss_cfg->ht_cap.mcs,
sizeof(struct ieee80211_mcs_info));
htcap->ht_cap.extended_ht_cap_info =
bss_cfg->ht_cap.extended_ht_cap_info;
htcap->ht_cap.tx_BF_cap_info = bss_cfg->ht_cap.tx_BF_cap_info;
htcap->ht_cap.antenna_selection_info =
bss_cfg->ht_cap.antenna_selection_info;
cmd_size += sizeof(struct mwifiex_ie_types_htcap);
tlv += sizeof(struct mwifiex_ie_types_htcap);
}
if (bss_cfg->wmm_info.qos_info != 0xFF) {
wmm_cap = (struct mwifiex_ie_types_wmmcap *)tlv;
wmm_cap->header.type = cpu_to_le16(WLAN_EID_VENDOR_SPECIFIC);
wmm_cap->header.len = cpu_to_le16(sizeof(wmm_cap->wmm_info));
memcpy(&wmm_cap->wmm_info, &bss_cfg->wmm_info,
sizeof(wmm_cap->wmm_info));
cmd_size += sizeof(struct mwifiex_ie_types_wmmcap);
tlv += sizeof(struct mwifiex_ie_types_wmmcap);
}
if (bss_cfg->sta_ao_timer) {
ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
ao_timer->header.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
ao_timer->header.len = cpu_to_le16(sizeof(*ao_timer) -
sizeof(struct mwifiex_ie_types_header));
ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer);
cmd_size += sizeof(*ao_timer);
tlv += sizeof(*ao_timer);
}
if (bss_cfg->power_constraint) {
pwr_ct = (void *)tlv;
pwr_ct->header.type = cpu_to_le16(TLV_TYPE_PWR_CONSTRAINT);
pwr_ct->header.len = cpu_to_le16(sizeof(u8));
pwr_ct->constraint = bss_cfg->power_constraint;
cmd_size += sizeof(*pwr_ct);
tlv += sizeof(*pwr_ct);
}
if (bss_cfg->ps_sta_ao_timer) {
ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
ps_ao_timer->header.type =
cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
ps_ao_timer->header.len = cpu_to_le16(sizeof(*ps_ao_timer) -
sizeof(struct mwifiex_ie_types_header));
ps_ao_timer->sta_ao_timer =
cpu_to_le32(bss_cfg->ps_sta_ao_timer);
cmd_size += sizeof(*ps_ao_timer);
tlv += sizeof(*ps_ao_timer);
}
*param_size = cmd_size;
return 0;
}
/* This function parses custom IEs from IE list and prepares command buffer */
static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size)
{
struct mwifiex_ie_list *ap_ie = cmd_buf;
struct mwifiex_ie_types_header *tlv_ie = (void *)tlv;
if (!ap_ie || !ap_ie->len)
return -1;
*ie_size += le16_to_cpu(ap_ie->len) +
sizeof(struct mwifiex_ie_types_header);
tlv_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
tlv_ie->len = ap_ie->len;
tlv += sizeof(struct mwifiex_ie_types_header);
memcpy(tlv, ap_ie->ie_list, le16_to_cpu(ap_ie->len));
return 0;
}
/* Parse AP config structure and prepare TLV based command structure
* to be sent to FW for uAP configuration
*/
static int
mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action,
u32 type, void *cmd_buf)
{
u8 *tlv;
u16 cmd_size, param_size, ie_size;
struct host_cmd_ds_sys_config *sys_cfg;
cmd->command = cpu_to_le16(HostCmd_CMD_UAP_SYS_CONFIG);
cmd_size = (u16)(sizeof(struct host_cmd_ds_sys_config) + S_DS_GEN);
sys_cfg = (struct host_cmd_ds_sys_config *)&cmd->params.uap_sys_config;
sys_cfg->action = cpu_to_le16(cmd_action);
tlv = sys_cfg->tlv;
switch (type) {
case UAP_BSS_PARAMS_I:
param_size = cmd_size;
if (mwifiex_uap_bss_param_prepare(tlv, cmd_buf, ¶m_size))
return -1;
cmd->size = cpu_to_le16(param_size);
break;
case UAP_CUSTOM_IE_I:
ie_size = cmd_size;
if (mwifiex_uap_custom_ie_prepare(tlv, cmd_buf, &ie_size))
return -1;
cmd->size = cpu_to_le16(ie_size);
break;
default:
return -1;
}
return 0;
}
/* This function prepares AP specific deauth command with mac supplied in
* function parameter.
*/
static int mwifiex_cmd_uap_sta_deauth(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd, u8 *mac)
{
struct host_cmd_ds_sta_deauth *sta_deauth = &cmd->params.sta_deauth;
cmd->command = cpu_to_le16(HostCmd_CMD_UAP_STA_DEAUTH);
memcpy(sta_deauth->mac, mac, ETH_ALEN);
sta_deauth->reason = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING);
cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_sta_deauth) +
S_DS_GEN);
return 0;
}
/* This function prepares the AP specific commands before sending them
* to the firmware.
* This is a generic function which calls specific command preparation
* routines based upon the command number.
*/
int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
u16 cmd_action, u32 type,
void *data_buf, void *cmd_buf)
{
struct host_cmd_ds_command *cmd = cmd_buf;
switch (cmd_no) {
case HostCmd_CMD_UAP_SYS_CONFIG:
if (mwifiex_cmd_uap_sys_config(cmd, cmd_action, type, data_buf))
return -1;
break;
case HostCmd_CMD_UAP_BSS_START:
case HostCmd_CMD_UAP_BSS_STOP:
case HOST_CMD_APCMD_SYS_RESET:
case HOST_CMD_APCMD_STA_LIST:
cmd->command = cpu_to_le16(cmd_no);
cmd->size = cpu_to_le16(S_DS_GEN);
break;
case HostCmd_CMD_UAP_STA_DEAUTH:
if (mwifiex_cmd_uap_sta_deauth(priv, cmd, data_buf))
return -1;
break;
case HostCmd_CMD_CHAN_REPORT_REQUEST:
if (mwifiex_cmd_issue_chan_report_request(priv, cmd_buf,
data_buf))
return -1;
break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd %#x\n", cmd_no);
return -1;
}
return 0;
}
void mwifiex_uap_set_channel(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_chan_def chandef)
{
u8 config_bands = 0, old_bands = priv->adapter->config_bands;
priv->bss_chandef = chandef;
bss_cfg->channel = ieee80211_frequency_to_channel(
chandef.chan->center_freq);
/* Set appropriate bands */
if (chandef.chan->band == NL80211_BAND_2GHZ) {
bss_cfg->band_cfg = BAND_CONFIG_BG;
config_bands = BAND_B | BAND_G;
if (chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
config_bands |= BAND_GN;
} else {
bss_cfg->band_cfg = BAND_CONFIG_A;
config_bands = BAND_A;
if (chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
config_bands |= BAND_AN;
if (chandef.width > NL80211_CHAN_WIDTH_40)
config_bands |= BAND_AAC;
}
switch (chandef.width) {
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
break;
case NL80211_CHAN_WIDTH_40:
if (chandef.center_freq1 < chandef.chan->center_freq)
bss_cfg->band_cfg |= MWIFIEX_SEC_CHAN_BELOW;
else
bss_cfg->band_cfg |= MWIFIEX_SEC_CHAN_ABOVE;
break;
case NL80211_CHAN_WIDTH_80:
case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_160:
bss_cfg->band_cfg |=
mwifiex_get_sec_chan_offset(bss_cfg->channel) << 4;
break;
default:
mwifiex_dbg(priv->adapter,
WARN, "Unknown channel width: %d\n",
chandef.width);
break;
}
priv->adapter->config_bands = config_bands;
if (old_bands != config_bands) {
mwifiex_send_domain_info_cmd_fw(priv->adapter->wiphy);
mwifiex_dnld_txpwr_table(priv);
}
}
int mwifiex_config_start_uap(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg)
{
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
HostCmd_ACT_GEN_SET,
UAP_BSS_PARAMS_I, bss_cfg, true)) {
mwifiex_dbg(priv->adapter, ERROR,
"Failed to set AP configuration\n");
return -1;
}
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
HostCmd_ACT_GEN_SET, 0, NULL, true)) {
mwifiex_dbg(priv->adapter, ERROR,
"Failed to start the BSS\n");
return -1;
}
if (priv->sec_info.wep_enabled)
priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
else
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
HostCmd_ACT_GEN_SET, 0,
&priv->curr_pkt_filter, true))
return -1;
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_998_1 |
crossvul-cpp_data_good_3995_0 | // SPDX-License-Identifier: ISC
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
#include <linux/dma-mapping.h>
#include "mt76.h"
#include "dma.h"
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
u32 ring_base)
{
int size;
int i;
spin_lock_init(&q->lock);
q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
q->ndesc = n_desc;
q->buf_size = bufsize;
q->hw_idx = idx;
size = q->ndesc * sizeof(struct mt76_desc);
q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
if (!q->desc)
return -ENOMEM;
size = q->ndesc * sizeof(*q->entry);
q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
/* clear descriptors */
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
writel(q->desc_dma, &q->regs->desc_base);
writel(0, &q->regs->cpu_idx);
writel(0, &q->regs->dma_idx);
writel(q->ndesc, &q->regs->ring_size);
return 0;
}
static int
mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi)
{
struct mt76_desc *desc;
u32 ctrl;
int i, idx = -1;
if (txwi) {
q->entry[q->head].txwi = DMA_DUMMY_DATA;
q->entry[q->head].skip_buf0 = true;
}
for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0;
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
if (i < nbufs - 1) {
buf1 = buf[1].addr;
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
}
if (i == nbufs - 1)
ctrl |= MT_DMA_CTL_LAST_SEC0;
else if (i == nbufs - 2)
ctrl |= MT_DMA_CTL_LAST_SEC1;
idx = q->head;
q->head = (q->head + 1) % q->ndesc;
desc = &q->desc[idx];
WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
WRITE_ONCE(desc->info, cpu_to_le32(info));
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
q->queued++;
}
q->entry[idx].txwi = txwi;
q->entry[idx].skb = skb;
return idx;
}
static void
mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
struct mt76_queue_entry *prev_e)
{
struct mt76_queue_entry *e = &q->entry[idx];
__le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
u32 ctrl = le32_to_cpu(__ctrl);
if (!e->skip_buf0) {
__le32 addr = READ_ONCE(q->desc[idx].buf0);
u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
DMA_TO_DEVICE);
}
if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
__le32 addr = READ_ONCE(q->desc[idx].buf1);
u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
DMA_TO_DEVICE);
}
if (e->txwi == DMA_DUMMY_DATA)
e->txwi = NULL;
if (e->skb == DMA_DUMMY_DATA)
e->skb = NULL;
*prev_e = *e;
memset(e, 0, sizeof(*e));
}
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
writel(q->desc_dma, &q->regs->desc_base);
writel(q->ndesc, &q->regs->ring_size);
q->head = readl(&q->regs->dma_idx);
q->tail = q->head;
writel(q->head, &q->regs->cpu_idx);
}
static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
{
struct mt76_sw_queue *sq = &dev->q_tx[qid];
struct mt76_queue *q = sq->q;
struct mt76_queue_entry entry;
unsigned int n_swq_queued[4] = {};
unsigned int n_queued = 0;
bool wake = false;
int i, last;
if (!q)
return;
if (flush)
last = -1;
else
last = readl(&q->regs->dma_idx);
while ((q->queued > n_queued) && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
if (entry.schedule)
n_swq_queued[entry.qid]++;
q->tail = (q->tail + 1) % q->ndesc;
n_queued++;
if (entry.skb)
dev->drv->tx_complete_skb(dev, qid, &entry);
if (entry.txwi) {
if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
mt76_put_txwi(dev, entry.txwi);
wake = !flush;
}
if (!flush && q->tail == last)
last = readl(&q->regs->dma_idx);
}
spin_lock_bh(&q->lock);
q->queued -= n_queued;
for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
if (!n_swq_queued[i])
continue;
dev->q_tx[i].swq_queued -= n_swq_queued[i];
}
if (flush)
mt76_dma_sync_idx(dev, q);
wake = wake && q->stopped &&
qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
if (wake)
q->stopped = false;
if (!q->queued)
wake_up(&dev->tx_wait);
spin_unlock_bh(&q->lock);
if (wake)
ieee80211_wake_queue(dev->hw, qid);
}
static void *
mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
int *len, u32 *info, bool *more)
{
struct mt76_queue_entry *e = &q->entry[idx];
struct mt76_desc *desc = &q->desc[idx];
dma_addr_t buf_addr;
void *buf = e->buf;
int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
if (len) {
u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
*more = !(ctl & MT_DMA_CTL_LAST_SEC0);
}
if (info)
*info = le32_to_cpu(desc->info);
dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
e->buf = NULL;
return buf;
}
static void *
mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
int *len, u32 *info, bool *more)
{
int idx = q->tail;
*more = false;
if (!q->queued)
return NULL;
if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
return NULL;
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
return mt76_dma_get_buf(dev, q, idx, len, info, more);
}
static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
writel(q->head, &q->regs->cpu_idx);
}
static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_queue_buf buf;
dma_addr_t addr;
addr = dma_map_single(dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr)))
return -ENOMEM;
buf.addr = addr;
buf.len = skb->len;
spin_lock_bh(&q->lock);
mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
mt76_dma_kick_queue(dev, q);
spin_unlock_bh(&q->lock);
return 0;
}
static int
mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_tx_info tx_info = {
.skb = skb,
};
int len, n = 0, ret = -ENOMEM;
struct mt76_queue_entry e;
struct mt76_txwi_cache *t;
struct sk_buff *iter;
dma_addr_t addr;
u8 *txwi;
t = mt76_get_txwi(dev);
if (!t) {
ieee80211_free_txskb(dev->hw, skb);
return -ENOMEM;
}
txwi = mt76_get_txwi_ptr(dev, t);
skb->prev = skb->next = NULL;
if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
mt76_insert_hdr_pad(skb);
len = skb_headlen(skb);
addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr)))
goto free;
tx_info.buf[n].addr = t->dma_addr;
tx_info.buf[n++].len = dev->drv->txwi_size;
tx_info.buf[n].addr = addr;
tx_info.buf[n++].len = len;
skb_walk_frags(skb, iter) {
if (n == ARRAY_SIZE(tx_info.buf))
goto unmap;
addr = dma_map_single(dev->dev, iter->data, iter->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr)))
goto unmap;
tx_info.buf[n].addr = addr;
tx_info.buf[n++].len = iter->len;
}
tx_info.nbuf = n;
dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
if (ret < 0)
goto unmap;
if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
ret = -ENOMEM;
goto unmap;
}
return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
tx_info.info, tx_info.skb, t);
unmap:
for (n--; n > 0; n--)
dma_unmap_single(dev->dev, tx_info.buf[n].addr,
tx_info.buf[n].len, DMA_TO_DEVICE);
free:
e.skb = tx_info.skb;
e.txwi = t;
dev->drv->tx_complete_skb(dev, qid, &e);
mt76_put_txwi(dev, t);
return ret;
}
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
{
dma_addr_t addr;
void *buf;
int frames = 0;
int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset;
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
struct mt76_queue_buf qbuf;
buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
if (!buf)
break;
addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr))) {
skb_free_frag(buf);
break;
}
qbuf.addr = addr + offset;
qbuf.len = len - offset;
mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
frames++;
}
if (frames)
mt76_dma_kick_queue(dev, q);
spin_unlock_bh(&q->lock);
return frames;
}
static void
mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
{
struct page *page;
void *buf;
bool more;
spin_lock_bh(&q->lock);
do {
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
if (!buf)
break;
skb_free_frag(buf);
} while (1);
spin_unlock_bh(&q->lock);
if (!q->rx_page.va)
return;
page = virt_to_page(q->rx_page.va);
__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
memset(&q->rx_page, 0, sizeof(q->rx_page));
}
static void
mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
{
struct mt76_queue *q = &dev->q_rx[qid];
int i;
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
mt76_dma_rx_cleanup(dev, q);
mt76_dma_sync_idx(dev, q);
mt76_dma_rx_fill(dev, q);
if (!q->rx_head)
return;
dev_kfree_skb(q->rx_head);
q->rx_head = NULL;
}
static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
int len, bool more)
{
struct page *page = virt_to_head_page(data);
int offset = data - page_address(page);
struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb);
if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
offset += q->buf_offset;
skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
q->buf_size);
}
if (more)
return;
q->rx_head = NULL;
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
}
static int
mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
{
int len, data_len, done = 0;
struct sk_buff *skb;
unsigned char *data;
bool more;
while (done < budget) {
u32 info;
data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
if (!data)
break;
if (q->rx_head)
data_len = q->buf_size;
else
data_len = SKB_WITH_OVERHEAD(q->buf_size);
if (data_len < len + q->buf_offset) {
dev_kfree_skb(q->rx_head);
q->rx_head = NULL;
skb_free_frag(data);
continue;
}
if (q->rx_head) {
mt76_add_fragment(dev, q, data, len, more);
continue;
}
skb = build_skb(data, q->buf_size);
if (!skb) {
skb_free_frag(data);
continue;
}
skb_reserve(skb, q->buf_offset);
if (q == &dev->q_rx[MT_RXQ_MCU]) {
u32 *rxfce = (u32 *)skb->cb;
*rxfce = info;
}
__skb_put(skb, len);
done++;
if (more) {
q->rx_head = skb;
continue;
}
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
}
mt76_dma_rx_fill(dev, q);
return done;
}
static int
mt76_dma_rx_poll(struct napi_struct *napi, int budget)
{
struct mt76_dev *dev;
int qid, done = 0, cur;
dev = container_of(napi->dev, struct mt76_dev, napi_dev);
qid = napi - dev->napi;
rcu_read_lock();
do {
cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
mt76_rx_poll_complete(dev, qid, napi);
done += cur;
} while (cur && done < budget);
rcu_read_unlock();
if (done < budget && napi_complete(napi))
dev->drv->rx_poll_complete(dev, qid);
return done;
}
static int
mt76_dma_init(struct mt76_dev *dev)
{
int i;
init_dummy_netdev(&dev->napi_dev);
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
64);
mt76_dma_rx_fill(dev, &dev->q_rx[i]);
skb_queue_head_init(&dev->rx_skb[i]);
napi_enable(&dev->napi[i]);
}
return 0;
}
static const struct mt76_queue_ops mt76_dma_ops = {
.init = mt76_dma_init,
.alloc = mt76_dma_alloc_queue,
.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
.tx_queue_skb = mt76_dma_tx_queue_skb,
.tx_cleanup = mt76_dma_tx_cleanup,
.rx_reset = mt76_dma_rx_reset,
.kick = mt76_dma_kick_queue,
};
void mt76_dma_attach(struct mt76_dev *dev)
{
dev->queue_ops = &mt76_dma_ops;
}
EXPORT_SYMBOL_GPL(mt76_dma_attach);
void mt76_dma_cleanup(struct mt76_dev *dev)
{
int i;
netif_napi_del(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
mt76_dma_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
netif_napi_del(&dev->napi[i]);
mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
}
}
EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_3995_0 |
crossvul-cpp_data_bad_999_1 | /*
* Marvell Wireless LAN device driver: AP specific command handling
*
* Copyright (C) 2012-2014, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "main.h"
#include "11ac.h"
#include "11n.h"
/* This function parses security related parameters from cfg80211_ap_settings
* and sets into FW understandable bss_config structure.
*/
int mwifiex_set_secure_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_config,
struct cfg80211_ap_settings *params) {
int i;
struct mwifiex_wep_key wep_key;
if (!params->privacy) {
bss_config->protocol = PROTOCOL_NO_SECURITY;
bss_config->key_mgmt = KEY_MGMT_NONE;
bss_config->wpa_cfg.length = 0;
priv->sec_info.wep_enabled = 0;
priv->sec_info.wpa_enabled = 0;
priv->sec_info.wpa2_enabled = 0;
return 0;
}
switch (params->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
bss_config->auth_mode = WLAN_AUTH_OPEN;
break;
case NL80211_AUTHTYPE_SHARED_KEY:
bss_config->auth_mode = WLAN_AUTH_SHARED_KEY;
break;
case NL80211_AUTHTYPE_NETWORK_EAP:
bss_config->auth_mode = WLAN_AUTH_LEAP;
break;
default:
bss_config->auth_mode = MWIFIEX_AUTH_MODE_AUTO;
break;
}
bss_config->key_mgmt_operation |= KEY_MGMT_ON_HOST;
for (i = 0; i < params->crypto.n_akm_suites; i++) {
switch (params->crypto.akm_suites[i]) {
case WLAN_AKM_SUITE_8021X:
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_1) {
bss_config->protocol = PROTOCOL_WPA;
bss_config->key_mgmt = KEY_MGMT_EAP;
}
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_2) {
bss_config->protocol |= PROTOCOL_WPA2;
bss_config->key_mgmt = KEY_MGMT_EAP;
}
break;
case WLAN_AKM_SUITE_PSK:
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_1) {
bss_config->protocol = PROTOCOL_WPA;
bss_config->key_mgmt = KEY_MGMT_PSK;
}
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_2) {
bss_config->protocol |= PROTOCOL_WPA2;
bss_config->key_mgmt = KEY_MGMT_PSK;
}
break;
default:
break;
}
}
for (i = 0; i < params->crypto.n_ciphers_pairwise; i++) {
switch (params->crypto.ciphers_pairwise[i]) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
break;
case WLAN_CIPHER_SUITE_TKIP:
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
bss_config->wpa_cfg.pairwise_cipher_wpa |=
CIPHER_TKIP;
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
CIPHER_TKIP;
break;
case WLAN_CIPHER_SUITE_CCMP:
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
bss_config->wpa_cfg.pairwise_cipher_wpa |=
CIPHER_AES_CCMP;
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
CIPHER_AES_CCMP;
default:
break;
}
}
switch (params->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
if (priv->sec_info.wep_enabled) {
bss_config->protocol = PROTOCOL_STATIC_WEP;
bss_config->key_mgmt = KEY_MGMT_NONE;
bss_config->wpa_cfg.length = 0;
for (i = 0; i < NUM_WEP_KEYS; i++) {
wep_key = priv->wep_key[i];
bss_config->wep_cfg[i].key_index = i;
if (priv->wep_key_curr_index == i)
bss_config->wep_cfg[i].is_default = 1;
else
bss_config->wep_cfg[i].is_default = 0;
bss_config->wep_cfg[i].length =
wep_key.key_length;
memcpy(&bss_config->wep_cfg[i].key,
&wep_key.key_material,
wep_key.key_length);
}
}
break;
case WLAN_CIPHER_SUITE_TKIP:
bss_config->wpa_cfg.group_cipher = CIPHER_TKIP;
break;
case WLAN_CIPHER_SUITE_CCMP:
bss_config->wpa_cfg.group_cipher = CIPHER_AES_CCMP;
break;
default:
break;
}
return 0;
}
/* This function updates 11n related parameters from IE and sets them into
* bss_config structure.
*/
void
mwifiex_set_ht_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *ht_ie;
if (!ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
return;
ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
params->beacon.tail_len);
if (ht_ie) {
memcpy(&bss_cfg->ht_cap, ht_ie + 2,
sizeof(struct ieee80211_ht_cap));
priv->ap_11n_enabled = 1;
} else {
memset(&bss_cfg->ht_cap, 0, sizeof(struct ieee80211_ht_cap));
bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
bss_cfg->ht_cap.ampdu_params_info = MWIFIEX_DEF_AMPDU;
}
return;
}
/* This function updates 11ac related parameters from IE
* and sets them into bss_config structure.
*/
void mwifiex_set_vht_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *vht_ie;
vht_ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, params->beacon.tail,
params->beacon.tail_len);
if (vht_ie) {
memcpy(&bss_cfg->vht_cap, vht_ie + 2,
sizeof(struct ieee80211_vht_cap));
priv->ap_11ac_enabled = 1;
} else {
priv->ap_11ac_enabled = 0;
}
return;
}
/* This function updates 11ac related parameters from IE
* and sets them into bss_config structure.
*/
void mwifiex_set_tpc_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *tpc_ie;
tpc_ie = cfg80211_find_ie(WLAN_EID_TPC_REQUEST, params->beacon.tail,
params->beacon.tail_len);
if (tpc_ie)
bss_cfg->power_constraint = *(tpc_ie + 2);
else
bss_cfg->power_constraint = 0;
}
/* Enable VHT only when cfg80211_ap_settings has VHT IE.
* Otherwise disable VHT.
*/
void mwifiex_set_vht_width(struct mwifiex_private *priv,
enum nl80211_chan_width width,
bool ap_11ac_enable)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_11ac_vht_cfg vht_cfg;
vht_cfg.band_config = VHT_CFG_5GHZ;
vht_cfg.cap_info = adapter->hw_dot_11ac_dev_cap;
if (!ap_11ac_enable) {
vht_cfg.mcs_tx_set = DISABLE_VHT_MCS_SET;
vht_cfg.mcs_rx_set = DISABLE_VHT_MCS_SET;
} else {
vht_cfg.mcs_tx_set = DEFAULT_VHT_MCS_SET;
vht_cfg.mcs_rx_set = DEFAULT_VHT_MCS_SET;
}
vht_cfg.misc_config = VHT_CAP_UAP_ONLY;
if (ap_11ac_enable && width >= NL80211_CHAN_WIDTH_80)
vht_cfg.misc_config |= VHT_BW_80_160_80P80;
mwifiex_send_cmd(priv, HostCmd_CMD_11AC_CFG,
HostCmd_ACT_GEN_SET, 0, &vht_cfg, true);
return;
}
/* This function finds supported rates IE from beacon parameter and sets
* these rates into bss_config structure.
*/
void
mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
struct ieee_types_header *rate_ie;
int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
const u8 *var_pos = params->beacon.head + var_offset;
int len = params->beacon.head_len - var_offset;
u8 rate_len = 0;
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
if (rate_ie) {
memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
rate_len = rate_ie->len;
}
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
params->beacon.tail,
params->beacon.tail_len);
if (rate_ie)
memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
return;
}
/* This function initializes some of mwifiex_uap_bss_param variables.
* This helps FW in ignoring invalid values. These values may or may not
* be get updated to valid ones at later stage.
*/
void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config)
{
config->bcast_ssid_ctl = 0x7F;
config->radio_ctl = 0x7F;
config->dtim_period = 0x7F;
config->beacon_period = 0x7FFF;
config->auth_mode = 0x7F;
config->rts_threshold = 0x7FFF;
config->frag_threshold = 0x7FFF;
config->retry_limit = 0x7F;
config->qos_info = 0xFF;
}
/* This function parses BSS related parameters from structure
* and prepares TLVs specific to WPA/WPA2 security.
* These TLVs are appended to command buffer.
*/
static void
mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
{
struct host_cmd_tlv_pwk_cipher *pwk_cipher;
struct host_cmd_tlv_gwk_cipher *gwk_cipher;
struct host_cmd_tlv_passphrase *passphrase;
struct host_cmd_tlv_akmp *tlv_akmp;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
u16 cmd_size = *param_size;
u8 *tlv = *tlv_buf;
tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
tlv_akmp->header.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
tlv_akmp->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
sizeof(struct mwifiex_ie_types_header));
tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation);
tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
cmd_size += sizeof(struct host_cmd_tlv_akmp);
tlv += sizeof(struct host_cmd_tlv_akmp);
if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) {
pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
pwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
sizeof(struct mwifiex_ie_types_header));
pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa;
cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
}
if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) {
pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
pwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
sizeof(struct mwifiex_ie_types_header));
pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
}
if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
gwk_cipher->header.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
gwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) -
sizeof(struct mwifiex_ie_types_header));
gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
}
if (bss_cfg->wpa_cfg.length) {
passphrase = (struct host_cmd_tlv_passphrase *)tlv;
passphrase->header.type =
cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
passphrase->header.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase,
bss_cfg->wpa_cfg.length);
cmd_size += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->wpa_cfg.length;
tlv += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->wpa_cfg.length;
}
*param_size = cmd_size;
*tlv_buf = tlv;
return;
}
/* This function parses WMM related parameters from cfg80211_ap_settings
* structure and updates bss_config structure.
*/
void
mwifiex_set_wmm_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params)
{
const u8 *vendor_ie;
const u8 *wmm_ie;
u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02};
vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
params->beacon.tail,
params->beacon.tail_len);
if (vendor_ie) {
wmm_ie = vendor_ie;
memcpy(&bss_cfg->wmm_info, wmm_ie +
sizeof(struct ieee_types_header), *(wmm_ie + 1));
priv->wmm_enabled = 1;
} else {
memset(&bss_cfg->wmm_info, 0, sizeof(bss_cfg->wmm_info));
memcpy(&bss_cfg->wmm_info.oui, wmm_oui, sizeof(wmm_oui));
bss_cfg->wmm_info.subtype = MWIFIEX_WMM_SUBTYPE;
bss_cfg->wmm_info.version = MWIFIEX_WMM_VERSION;
priv->wmm_enabled = 0;
}
bss_cfg->qos_info = 0x00;
return;
}
/* This function parses BSS related parameters from structure
* and prepares TLVs specific to WEP encryption.
* These TLVs are appended to command buffer.
*/
static void
mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
{
struct host_cmd_tlv_wep_key *wep_key;
u16 cmd_size = *param_size;
int i;
u8 *tlv = *tlv_buf;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
for (i = 0; i < NUM_WEP_KEYS; i++) {
if (bss_cfg->wep_cfg[i].length &&
(bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 ||
bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) {
wep_key = (struct host_cmd_tlv_wep_key *)tlv;
wep_key->header.type =
cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
wep_key->header.len =
cpu_to_le16(bss_cfg->wep_cfg[i].length + 2);
wep_key->key_index = bss_cfg->wep_cfg[i].key_index;
wep_key->is_default = bss_cfg->wep_cfg[i].is_default;
memcpy(wep_key->key, bss_cfg->wep_cfg[i].key,
bss_cfg->wep_cfg[i].length);
cmd_size += sizeof(struct mwifiex_ie_types_header) + 2 +
bss_cfg->wep_cfg[i].length;
tlv += sizeof(struct mwifiex_ie_types_header) + 2 +
bss_cfg->wep_cfg[i].length;
}
}
*param_size = cmd_size;
*tlv_buf = tlv;
return;
}
/* This function enable 11D if userspace set the country IE.
*/
void mwifiex_config_uap_11d(struct mwifiex_private *priv,
struct cfg80211_beacon_data *beacon_data)
{
enum state_11d_t state_11d;
const u8 *country_ie;
country_ie = cfg80211_find_ie(WLAN_EID_COUNTRY, beacon_data->tail,
beacon_data->tail_len);
if (country_ie) {
/* Send cmd to FW to enable 11D function */
state_11d = ENABLE_11D;
if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
HostCmd_ACT_GEN_SET, DOT11D_I,
&state_11d, true)) {
mwifiex_dbg(priv->adapter, ERROR,
"11D: failed to enable 11D\n");
}
}
}
/* This function parses BSS related parameters from structure
* and prepares TLVs. These TLVs are appended to command buffer.
*/
static int
mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
{
struct host_cmd_tlv_dtim_period *dtim_period;
struct host_cmd_tlv_beacon_period *beacon_period;
struct host_cmd_tlv_ssid *ssid;
struct host_cmd_tlv_bcast_ssid *bcast_ssid;
struct host_cmd_tlv_channel_band *chan_band;
struct host_cmd_tlv_frag_threshold *frag_threshold;
struct host_cmd_tlv_rts_threshold *rts_threshold;
struct host_cmd_tlv_retry_limit *retry_limit;
struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
struct host_cmd_tlv_auth_type *auth_type;
struct host_cmd_tlv_rates *tlv_rates;
struct host_cmd_tlv_ageout_timer *ao_timer, *ps_ao_timer;
struct host_cmd_tlv_power_constraint *pwr_ct;
struct mwifiex_ie_types_htcap *htcap;
struct mwifiex_ie_types_wmmcap *wmm_cap;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
int i;
u16 cmd_size = *param_size;
if (bss_cfg->ssid.ssid_len) {
ssid = (struct host_cmd_tlv_ssid *)tlv;
ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
ssid->header.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len);
memcpy(ssid->ssid, bss_cfg->ssid.ssid, bss_cfg->ssid.ssid_len);
cmd_size += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->ssid.ssid_len;
tlv += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->ssid.ssid_len;
bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
bcast_ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
bcast_ssid->header.len =
cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
}
if (bss_cfg->rates[0]) {
tlv_rates = (struct host_cmd_tlv_rates *)tlv;
tlv_rates->header.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i];
i++)
tlv_rates->rates[i] = bss_cfg->rates[i];
tlv_rates->header.len = cpu_to_le16(i);
cmd_size += sizeof(struct host_cmd_tlv_rates) + i;
tlv += sizeof(struct host_cmd_tlv_rates) + i;
}
if (bss_cfg->channel &&
(((bss_cfg->band_cfg & BIT(0)) == BAND_CONFIG_BG &&
bss_cfg->channel <= MAX_CHANNEL_BAND_BG) ||
((bss_cfg->band_cfg & BIT(0)) == BAND_CONFIG_A &&
bss_cfg->channel <= MAX_CHANNEL_BAND_A))) {
chan_band = (struct host_cmd_tlv_channel_band *)tlv;
chan_band->header.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
chan_band->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_channel_band) -
sizeof(struct mwifiex_ie_types_header));
chan_band->band_config = bss_cfg->band_cfg;
chan_band->channel = bss_cfg->channel;
cmd_size += sizeof(struct host_cmd_tlv_channel_band);
tlv += sizeof(struct host_cmd_tlv_channel_band);
}
if (bss_cfg->beacon_period >= MIN_BEACON_PERIOD &&
bss_cfg->beacon_period <= MAX_BEACON_PERIOD) {
beacon_period = (struct host_cmd_tlv_beacon_period *)tlv;
beacon_period->header.type =
cpu_to_le16(TLV_TYPE_UAP_BEACON_PERIOD);
beacon_period->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_beacon_period) -
sizeof(struct mwifiex_ie_types_header));
beacon_period->period = cpu_to_le16(bss_cfg->beacon_period);
cmd_size += sizeof(struct host_cmd_tlv_beacon_period);
tlv += sizeof(struct host_cmd_tlv_beacon_period);
}
if (bss_cfg->dtim_period >= MIN_DTIM_PERIOD &&
bss_cfg->dtim_period <= MAX_DTIM_PERIOD) {
dtim_period = (struct host_cmd_tlv_dtim_period *)tlv;
dtim_period->header.type =
cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD);
dtim_period->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_dtim_period) -
sizeof(struct mwifiex_ie_types_header));
dtim_period->period = bss_cfg->dtim_period;
cmd_size += sizeof(struct host_cmd_tlv_dtim_period);
tlv += sizeof(struct host_cmd_tlv_dtim_period);
}
if (bss_cfg->rts_threshold <= MWIFIEX_RTS_MAX_VALUE) {
rts_threshold = (struct host_cmd_tlv_rts_threshold *)tlv;
rts_threshold->header.type =
cpu_to_le16(TLV_TYPE_UAP_RTS_THRESHOLD);
rts_threshold->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_rts_threshold) -
sizeof(struct mwifiex_ie_types_header));
rts_threshold->rts_thr = cpu_to_le16(bss_cfg->rts_threshold);
cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
tlv += sizeof(struct host_cmd_tlv_frag_threshold);
}
if ((bss_cfg->frag_threshold >= MWIFIEX_FRAG_MIN_VALUE) &&
(bss_cfg->frag_threshold <= MWIFIEX_FRAG_MAX_VALUE)) {
frag_threshold = (struct host_cmd_tlv_frag_threshold *)tlv;
frag_threshold->header.type =
cpu_to_le16(TLV_TYPE_UAP_FRAG_THRESHOLD);
frag_threshold->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_frag_threshold) -
sizeof(struct mwifiex_ie_types_header));
frag_threshold->frag_thr = cpu_to_le16(bss_cfg->frag_threshold);
cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
tlv += sizeof(struct host_cmd_tlv_frag_threshold);
}
if (bss_cfg->retry_limit <= MWIFIEX_RETRY_LIMIT) {
retry_limit = (struct host_cmd_tlv_retry_limit *)tlv;
retry_limit->header.type =
cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT);
retry_limit->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_retry_limit) -
sizeof(struct mwifiex_ie_types_header));
retry_limit->limit = (u8)bss_cfg->retry_limit;
cmd_size += sizeof(struct host_cmd_tlv_retry_limit);
tlv += sizeof(struct host_cmd_tlv_retry_limit);
}
if ((bss_cfg->protocol & PROTOCOL_WPA) ||
(bss_cfg->protocol & PROTOCOL_WPA2) ||
(bss_cfg->protocol & PROTOCOL_EAP))
mwifiex_uap_bss_wpa(&tlv, cmd_buf, &cmd_size);
else
mwifiex_uap_bss_wep(&tlv, cmd_buf, &cmd_size);
if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) ||
(bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) {
auth_type = (struct host_cmd_tlv_auth_type *)tlv;
auth_type->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
auth_type->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_auth_type) -
sizeof(struct mwifiex_ie_types_header));
auth_type->auth_type = (u8)bss_cfg->auth_mode;
cmd_size += sizeof(struct host_cmd_tlv_auth_type);
tlv += sizeof(struct host_cmd_tlv_auth_type);
}
if (bss_cfg->protocol) {
encrypt_protocol = (struct host_cmd_tlv_encrypt_protocol *)tlv;
encrypt_protocol->header.type =
cpu_to_le16(TLV_TYPE_UAP_ENCRY_PROTOCOL);
encrypt_protocol->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_encrypt_protocol)
- sizeof(struct mwifiex_ie_types_header));
encrypt_protocol->proto = cpu_to_le16(bss_cfg->protocol);
cmd_size += sizeof(struct host_cmd_tlv_encrypt_protocol);
tlv += sizeof(struct host_cmd_tlv_encrypt_protocol);
}
if (bss_cfg->ht_cap.cap_info) {
htcap = (struct mwifiex_ie_types_htcap *)tlv;
htcap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
htcap->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
htcap->ht_cap.cap_info = bss_cfg->ht_cap.cap_info;
htcap->ht_cap.ampdu_params_info =
bss_cfg->ht_cap.ampdu_params_info;
memcpy(&htcap->ht_cap.mcs, &bss_cfg->ht_cap.mcs,
sizeof(struct ieee80211_mcs_info));
htcap->ht_cap.extended_ht_cap_info =
bss_cfg->ht_cap.extended_ht_cap_info;
htcap->ht_cap.tx_BF_cap_info = bss_cfg->ht_cap.tx_BF_cap_info;
htcap->ht_cap.antenna_selection_info =
bss_cfg->ht_cap.antenna_selection_info;
cmd_size += sizeof(struct mwifiex_ie_types_htcap);
tlv += sizeof(struct mwifiex_ie_types_htcap);
}
if (bss_cfg->wmm_info.qos_info != 0xFF) {
wmm_cap = (struct mwifiex_ie_types_wmmcap *)tlv;
wmm_cap->header.type = cpu_to_le16(WLAN_EID_VENDOR_SPECIFIC);
wmm_cap->header.len = cpu_to_le16(sizeof(wmm_cap->wmm_info));
memcpy(&wmm_cap->wmm_info, &bss_cfg->wmm_info,
sizeof(wmm_cap->wmm_info));
cmd_size += sizeof(struct mwifiex_ie_types_wmmcap);
tlv += sizeof(struct mwifiex_ie_types_wmmcap);
}
if (bss_cfg->sta_ao_timer) {
ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
ao_timer->header.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
ao_timer->header.len = cpu_to_le16(sizeof(*ao_timer) -
sizeof(struct mwifiex_ie_types_header));
ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer);
cmd_size += sizeof(*ao_timer);
tlv += sizeof(*ao_timer);
}
if (bss_cfg->power_constraint) {
pwr_ct = (void *)tlv;
pwr_ct->header.type = cpu_to_le16(TLV_TYPE_PWR_CONSTRAINT);
pwr_ct->header.len = cpu_to_le16(sizeof(u8));
pwr_ct->constraint = bss_cfg->power_constraint;
cmd_size += sizeof(*pwr_ct);
tlv += sizeof(*pwr_ct);
}
if (bss_cfg->ps_sta_ao_timer) {
ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
ps_ao_timer->header.type =
cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
ps_ao_timer->header.len = cpu_to_le16(sizeof(*ps_ao_timer) -
sizeof(struct mwifiex_ie_types_header));
ps_ao_timer->sta_ao_timer =
cpu_to_le32(bss_cfg->ps_sta_ao_timer);
cmd_size += sizeof(*ps_ao_timer);
tlv += sizeof(*ps_ao_timer);
}
*param_size = cmd_size;
return 0;
}
/* This function parses custom IEs from IE list and prepares command buffer */
static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size)
{
struct mwifiex_ie_list *ap_ie = cmd_buf;
struct mwifiex_ie_types_header *tlv_ie = (void *)tlv;
if (!ap_ie || !ap_ie->len)
return -1;
*ie_size += le16_to_cpu(ap_ie->len) +
sizeof(struct mwifiex_ie_types_header);
tlv_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
tlv_ie->len = ap_ie->len;
tlv += sizeof(struct mwifiex_ie_types_header);
memcpy(tlv, ap_ie->ie_list, le16_to_cpu(ap_ie->len));
return 0;
}
/* Parse AP config structure and prepare TLV based command structure
* to be sent to FW for uAP configuration
*/
static int
mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action,
u32 type, void *cmd_buf)
{
u8 *tlv;
u16 cmd_size, param_size, ie_size;
struct host_cmd_ds_sys_config *sys_cfg;
cmd->command = cpu_to_le16(HostCmd_CMD_UAP_SYS_CONFIG);
cmd_size = (u16)(sizeof(struct host_cmd_ds_sys_config) + S_DS_GEN);
sys_cfg = (struct host_cmd_ds_sys_config *)&cmd->params.uap_sys_config;
sys_cfg->action = cpu_to_le16(cmd_action);
tlv = sys_cfg->tlv;
switch (type) {
case UAP_BSS_PARAMS_I:
param_size = cmd_size;
if (mwifiex_uap_bss_param_prepare(tlv, cmd_buf, ¶m_size))
return -1;
cmd->size = cpu_to_le16(param_size);
break;
case UAP_CUSTOM_IE_I:
ie_size = cmd_size;
if (mwifiex_uap_custom_ie_prepare(tlv, cmd_buf, &ie_size))
return -1;
cmd->size = cpu_to_le16(ie_size);
break;
default:
return -1;
}
return 0;
}
/* This function prepares AP specific deauth command with mac supplied in
* function parameter.
*/
static int mwifiex_cmd_uap_sta_deauth(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd, u8 *mac)
{
struct host_cmd_ds_sta_deauth *sta_deauth = &cmd->params.sta_deauth;
cmd->command = cpu_to_le16(HostCmd_CMD_UAP_STA_DEAUTH);
memcpy(sta_deauth->mac, mac, ETH_ALEN);
sta_deauth->reason = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING);
cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_sta_deauth) +
S_DS_GEN);
return 0;
}
/* This function prepares the AP specific commands before sending them
* to the firmware.
* This is a generic function which calls specific command preparation
* routines based upon the command number.
*/
int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
u16 cmd_action, u32 type,
void *data_buf, void *cmd_buf)
{
struct host_cmd_ds_command *cmd = cmd_buf;
switch (cmd_no) {
case HostCmd_CMD_UAP_SYS_CONFIG:
if (mwifiex_cmd_uap_sys_config(cmd, cmd_action, type, data_buf))
return -1;
break;
case HostCmd_CMD_UAP_BSS_START:
case HostCmd_CMD_UAP_BSS_STOP:
case HOST_CMD_APCMD_SYS_RESET:
case HOST_CMD_APCMD_STA_LIST:
cmd->command = cpu_to_le16(cmd_no);
cmd->size = cpu_to_le16(S_DS_GEN);
break;
case HostCmd_CMD_UAP_STA_DEAUTH:
if (mwifiex_cmd_uap_sta_deauth(priv, cmd, data_buf))
return -1;
break;
case HostCmd_CMD_CHAN_REPORT_REQUEST:
if (mwifiex_cmd_issue_chan_report_request(priv, cmd_buf,
data_buf))
return -1;
break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd %#x\n", cmd_no);
return -1;
}
return 0;
}
void mwifiex_uap_set_channel(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_chan_def chandef)
{
u8 config_bands = 0, old_bands = priv->adapter->config_bands;
priv->bss_chandef = chandef;
bss_cfg->channel = ieee80211_frequency_to_channel(
chandef.chan->center_freq);
/* Set appropriate bands */
if (chandef.chan->band == NL80211_BAND_2GHZ) {
bss_cfg->band_cfg = BAND_CONFIG_BG;
config_bands = BAND_B | BAND_G;
if (chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
config_bands |= BAND_GN;
} else {
bss_cfg->band_cfg = BAND_CONFIG_A;
config_bands = BAND_A;
if (chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
config_bands |= BAND_AN;
if (chandef.width > NL80211_CHAN_WIDTH_40)
config_bands |= BAND_AAC;
}
switch (chandef.width) {
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
break;
case NL80211_CHAN_WIDTH_40:
if (chandef.center_freq1 < chandef.chan->center_freq)
bss_cfg->band_cfg |= MWIFIEX_SEC_CHAN_BELOW;
else
bss_cfg->band_cfg |= MWIFIEX_SEC_CHAN_ABOVE;
break;
case NL80211_CHAN_WIDTH_80:
case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_160:
bss_cfg->band_cfg |=
mwifiex_get_sec_chan_offset(bss_cfg->channel) << 4;
break;
default:
mwifiex_dbg(priv->adapter,
WARN, "Unknown channel width: %d\n",
chandef.width);
break;
}
priv->adapter->config_bands = config_bands;
if (old_bands != config_bands) {
mwifiex_send_domain_info_cmd_fw(priv->adapter->wiphy);
mwifiex_dnld_txpwr_table(priv);
}
}
int mwifiex_config_start_uap(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg)
{
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
HostCmd_ACT_GEN_SET,
UAP_BSS_PARAMS_I, bss_cfg, true)) {
mwifiex_dbg(priv->adapter, ERROR,
"Failed to set AP configuration\n");
return -1;
}
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
HostCmd_ACT_GEN_SET, 0, NULL, true)) {
mwifiex_dbg(priv->adapter, ERROR,
"Failed to start the BSS\n");
return -1;
}
if (priv->sec_info.wep_enabled)
priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
else
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
HostCmd_ACT_GEN_SET, 0,
&priv->curr_pkt_filter, true))
return -1;
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_999_1 |
crossvul-cpp_data_bad_4075_0 | //
// Copyright (C) 1993-1996 Id Software, Inc.
// Copyright (C) 2016-2017 Alexey Khokholov (Nuke.YKT)
// Copyright (C) 2017 Alexandre-Xavier Labont�-Lamoureux
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// DESCRIPTION:
// Main loop menu stuff.
// Default Config File.
// PCX Screenshots.
//
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
#include <stdlib.h>
#include <unistd.h>
#include <ctype.h>
#include "doomdef.h"
#include "z_zone.h"
#include "w_wad.h"
#include "i_system.h"
#include "v_video.h"
#include "hu_stuff.h"
// State.
#include "doomstat.h"
// Data.
#include "dstrings.h"
#include "m_misc.h"
int myargc;
char** myargv;
//
// M_DrawText
// Returns the final X coordinate
// HU_Init must have been called to init the font
//
extern patch_t* hu_font[HU_FONTSIZE];
int
M_DrawText
( int x,
int y,
boolean direct,
char* string )
{
int c;
int w;
while (*string)
{
c = toupper(*string) - HU_FONTSTART;
string++;
if (c < 0 || c> HU_FONTSIZE)
{
x += 4;
continue;
}
w = SHORT (hu_font[c]->width);
if (x+w > SCREENWIDTH)
break;
if (direct)
V_DrawPatchDirect(x, y, 0, hu_font[c]);
else
V_DrawPatch(x, y, 0, hu_font[c]);
x+=w;
}
return x;
}
//
// M_CheckParm
// Checks for the given parameter
// in the program's command line arguments.
// Returns the argument number (1 to argc-1)
// or 0 if not present
int M_CheckParm (char *check)
{
int i;
for (i = 1;i<myargc;i++)
{
if ( !strcasecmp(check, myargv[i]) )
return i;
}
return 0;
}
//
// M_Random
// Returns a 0-255 number
//
unsigned char rndtable[256] = {
0, 8, 109, 220, 222, 241, 149, 107, 75, 248, 254, 140, 16, 66 ,
74, 21, 211, 47, 80, 242, 154, 27, 205, 128, 161, 89, 77, 36 ,
95, 110, 85, 48, 212, 140, 211, 249, 22, 79, 200, 50, 28, 188 ,
52, 140, 202, 120, 68, 145, 62, 70, 184, 190, 91, 197, 152, 224 ,
149, 104, 25, 178, 252, 182, 202, 182, 141, 197, 4, 81, 181, 242 ,
145, 42, 39, 227, 156, 198, 225, 193, 219, 93, 122, 175, 249, 0 ,
175, 143, 70, 239, 46, 246, 163, 53, 163, 109, 168, 135, 2, 235 ,
25, 92, 20, 145, 138, 77, 69, 166, 78, 176, 173, 212, 166, 113 ,
94, 161, 41, 50, 239, 49, 111, 164, 70, 60, 2, 37, 171, 75 ,
136, 156, 11, 56, 42, 146, 138, 229, 73, 146, 77, 61, 98, 196 ,
135, 106, 63, 197, 195, 86, 96, 203, 113, 101, 170, 247, 181, 113 ,
80, 250, 108, 7, 255, 237, 129, 226, 79, 107, 112, 166, 103, 241 ,
24, 223, 239, 120, 198, 58, 60, 82, 128, 3, 184, 66, 143, 224 ,
145, 224, 81, 206, 163, 45, 63, 90, 168, 114, 59, 33, 159, 95 ,
28, 139, 123, 98, 125, 196, 15, 70, 194, 253, 54, 14, 109, 226 ,
71, 17, 161, 93, 186, 87, 244, 138, 20, 52, 123, 251, 26, 36 ,
17, 46, 52, 231, 232, 76, 31, 221, 84, 37, 216, 165, 212, 106 ,
197, 242, 98, 43, 39, 175, 254, 145, 190, 84, 118, 222, 187, 136 ,
120, 163, 236, 249
};
int rndindex = 0;
int prndindex = 0;
// Which one is deterministic?
int P_Random (void)
{
prndindex = (prndindex+1)&0xff;
return rndtable[prndindex];
}
int M_Random (void)
{
rndindex = (rndindex+1)&0xff;
return rndtable[rndindex];
}
void M_ClearRandom (void)
{
rndindex = prndindex = 0;
}
void M_ClearBox (fixed_t *box)
{
box[BOXTOP] = box[BOXRIGHT] = MININT;
box[BOXBOTTOM] = box[BOXLEFT] = MAXINT;
}
void
M_AddToBox
( fixed_t* box,
fixed_t x,
fixed_t y )
{
if (x<box[BOXLEFT])
box[BOXLEFT] = x;
else if (x>box[BOXRIGHT])
box[BOXRIGHT] = x;
if (y<box[BOXBOTTOM])
box[BOXBOTTOM] = y;
else if (y>box[BOXTOP])
box[BOXTOP] = y;
}
//
// M_WriteFile
//
#ifndef O_BINARY
#define O_BINARY 0
#endif
boolean
M_WriteFile
( char const* name,
void* source,
int length )
{
int handle;
int count;
handle = open ( name, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0666);
if (handle == -1)
return false;
count = write (handle, source, length);
close (handle);
if (count < length)
return false;
return true;
}
//
// M_ReadFile
//
int
M_ReadFile
( char const* name,
byte** buffer )
{
int handle, count, length;
struct stat fileinfo;
byte *buf;
handle = open (name, O_RDONLY | O_BINARY, 0666);
if (handle == -1)
I_Error ("Couldn't read file %s", name);
if (fstat (handle,&fileinfo) == -1)
I_Error ("Couldn't read file %s", name);
length = fileinfo.st_size;
buf = Z_Malloc (length, PU_STATIC, NULL);
count = read (handle, buf, length);
close (handle);
if (count < length)
I_Error ("Couldn't read file %s", name);
*buffer = buf;
return length;
}
//
// DEFAULTS
//
int usemouse;
int usejoystick;
extern int key_right;
extern int key_left;
extern int key_up;
extern int key_down;
extern int key_strafeleft;
extern int key_straferight;
extern int key_fire;
extern int key_use;
extern int key_strafe;
extern int key_speed;
extern int mousebfire;
extern int mousebstrafe;
extern int mousebforward;
extern int joybfire;
extern int joybstrafe;
extern int joybuse;
extern int joybspeed;
extern int viewwidth;
extern int viewheight;
extern int mouseSensitivity;
extern int showMessages;
extern int detailLevel;
extern int screenblocks;
extern int showMessages;
// machine-independent sound params
extern int numChannels;
extern int sfxVolume;
extern int musicVolume;
extern int snd_SBport, snd_SBirq, snd_SBdma;
extern int snd_Mport;
extern char* chat_macros[];
typedef struct
{
char* name;
int* location;
int defaultvalue;
int scantranslate; // PC scan code hack
int untranslated; // lousy hack
} default_t;
#define SC_UPARROW 0x48
#define SC_DOWNARROW 0x50
#define SC_LEFTARROW 0x4b
#define SC_RIGHTARROW 0x4d
#define SC_RCTRL 0x1d
#define SC_RALT 0x38
#define SC_RSHIFT 0x36
#define SC_SPACE 0x39
#define SC_COMMA 0x33
#define SC_PERIOD 0x34
#define SC_PAGEUP 0x49
#define SC_INSERT 0x52
#define SC_HOME 0x47
#define SC_PAGEDOWN 0x51
#define SC_DELETE 0x53
#define SC_END 0x4f
#define SC_ENTER 0x1c
#define SC_KEY_A 0x1e
#define SC_KEY_B 0x30
#define SC_KEY_C 0x2e
#define SC_KEY_D 0x20
#define SC_KEY_E 0x12
#define SC_KEY_F 0x21
#define SC_KEY_G 0x22
#define SC_KEY_H 0x23
#define SC_KEY_I 0x17
#define SC_KEY_J 0x24
#define SC_KEY_K 0x25
#define SC_KEY_L 0x26
#define SC_KEY_M 0x32
#define SC_KEY_N 0x31
#define SC_KEY_O 0x18
#define SC_KEY_P 0x19
#define SC_KEY_Q 0x10
#define SC_KEY_R 0x13
#define SC_KEY_S 0x1f
#define SC_KEY_T 0x14
#define SC_KEY_U 0x16
#define SC_KEY_V 0x2f
#define SC_KEY_W 0x11
#define SC_KEY_X 0x2d
#define SC_KEY_Y 0x15
#define SC_KEY_Z 0x2c
#define SC_BACKSPACE 0x0e
default_t defaults[] =
{
{"mouse_sensitivity",&mouseSensitivity, 5},
{"sfx_volume",&sfxVolume, 8},
{"music_volume",&musicVolume, 8},
{"show_messages",&showMessages, 1},
{"key_right",&key_right, SC_RIGHTARROW, 1},
{"key_left",&key_left, SC_LEFTARROW, 1},
{"key_up",&key_up, SC_UPARROW, 1},
{"key_down",&key_down, SC_DOWNARROW, 1},
{"key_strafeleft",&key_strafeleft, SC_COMMA, 1},
{"key_straferight",&key_straferight, SC_PERIOD, 1},
{"key_fire",&key_fire, SC_RCTRL, 1},
{"key_use",&key_use, SC_SPACE, 1},
{"key_strafe",&key_strafe, SC_RALT, 1},
{"key_speed",&key_speed, SC_RSHIFT, 1},
{"use_mouse",&usemouse, 1},
{"mouseb_fire",&mousebfire,0},
{"mouseb_strafe",&mousebstrafe,1},
{"mouseb_forward",&mousebforward,2},
{"use_joystick",&usejoystick, 0},
{"joyb_fire",&joybfire,0},
{"joyb_strafe",&joybstrafe,1},
{"joyb_use",&joybuse,3},
{"joyb_speed",&joybspeed,2},
{"screenblocks",&screenblocks, 9},
{"detaillevel",&detailLevel, 0},
{"snd_channels",&numChannels, 3},
{"snd_musicdevice",&snd_DesiredMusicDevice, 0},
{"snd_sfxdevice",&snd_DesiredSfxDevice, 0},
{"snd_sbport",&snd_SBport, 0x220},
{"snd_sbirq",&snd_SBirq, 5},
{"snd_sbdma",&snd_SBdma, 1},
{"snd_mport",&snd_Mport, 0x330},
{"usegamma",&usegamma, 0},
{"chatmacro0", (int *) &chat_macros[0], (int) HUSTR_CHATMACRO0 },
{"chatmacro1", (int *) &chat_macros[1], (int) HUSTR_CHATMACRO1 },
{"chatmacro2", (int *) &chat_macros[2], (int) HUSTR_CHATMACRO2 },
{"chatmacro3", (int *) &chat_macros[3], (int) HUSTR_CHATMACRO3 },
{"chatmacro4", (int *) &chat_macros[4], (int) HUSTR_CHATMACRO4 },
{"chatmacro5", (int *) &chat_macros[5], (int) HUSTR_CHATMACRO5 },
{"chatmacro6", (int *) &chat_macros[6], (int) HUSTR_CHATMACRO6 },
{"chatmacro7", (int *) &chat_macros[7], (int) HUSTR_CHATMACRO7 },
{"chatmacro8", (int *) &chat_macros[8], (int) HUSTR_CHATMACRO8 },
{"chatmacro9", (int *) &chat_macros[9], (int) HUSTR_CHATMACRO9 }
};
int numdefaults;
char* defaultfile;
//
// M_SaveDefaults
//
void M_SaveDefaults (void)
{
int i;
int v;
FILE* f;
f = fopen (defaultfile, "w");
if (!f)
return; // can't write the file, but don't complain
for (i=0 ; i<numdefaults ; i++)
{
if (defaults[i].scantranslate)
defaults[i].location = &defaults[i].untranslated;
if (defaults[i].defaultvalue > -0xfff
&& defaults[i].defaultvalue < 0xfff)
{
v = *defaults[i].location;
fprintf (f,"%s\t\t%i\n",defaults[i].name,v);
} else {
fprintf (f,"%s\t\t\"%s\"\n",defaults[i].name,
* (char **) (defaults[i].location));
}
}
fclose (f);
}
//
// M_LoadDefaults
//
extern byte scantokey[128];
void M_LoadDefaults (void)
{
int i;
int len;
FILE* f;
char def[80];
char strparm[100];
char* newstring;
int parm;
boolean isstring;
// set everything to base values
numdefaults = sizeof(defaults)/sizeof(defaults[0]);
for (i=0 ; i<numdefaults ; i++)
*defaults[i].location = defaults[i].defaultvalue;
// check for a custom default file
i = M_CheckParm ("-config");
if (i && i<myargc-1)
{
defaultfile = myargv[i+1];
printf (" default file: %s\n",defaultfile);
}
else
defaultfile = basedefault;
// read the file in, overriding any set defaults
f = fopen (defaultfile, "r");
if (f)
{
while (!feof(f))
{
isstring = false;
if (fscanf (f, "%79s %[^\n]\n", def, strparm) == 2)
{
if (strparm[0] == '"')
{
// get a string default
isstring = true;
len = strlen(strparm);
newstring = (char *) malloc(len);
strparm[len-1] = 0;
strcpy(newstring, strparm+1);
}
else if (strparm[0] == '0' && strparm[1] == 'x')
sscanf(strparm+2, "%x", &parm);
else
sscanf(strparm, "%i", &parm);
for (i=0 ; i<numdefaults ; i++)
if (!strcmp(def, defaults[i].name))
{
if (!isstring)
*defaults[i].location = parm;
else
*defaults[i].location =
(int) newstring;
break;
}
}
}
fclose (f);
}
for (i = 0; i < numdefaults; i++)
{
if (defaults[i].scantranslate)
{
parm = *defaults[i].location;
defaults[i].untranslated = parm;
*defaults[i].location = scantokey[parm];
}
}
}
//
// SCREEN SHOTS
//
typedef struct
{
char manufacturer;
char version;
char encoding;
char bits_per_pixel;
unsigned short xmin;
unsigned short ymin;
unsigned short xmax;
unsigned short ymax;
unsigned short hres;
unsigned short vres;
unsigned char palette[48];
char reserved;
char color_planes;
unsigned short bytes_per_line;
unsigned short palette_type;
char filler[58];
unsigned char data; // unbounded
} pcx_t;
//
// WritePCXfile
//
void
WritePCXfile
( char* filename,
byte* data,
int width,
int height,
byte* palette )
{
int i;
int length;
pcx_t* pcx;
byte* pack;
pcx = Z_Malloc (width*height*2+1000, PU_STATIC, NULL);
pcx->manufacturer = 0x0a; // PCX id
pcx->version = 5; // 256 color
pcx->encoding = 1; // uncompressed
pcx->bits_per_pixel = 8; // 256 color
pcx->xmin = 0;
pcx->ymin = 0;
pcx->xmax = SHORT(width-1);
pcx->ymax = SHORT(height-1);
pcx->hres = SHORT(width);
pcx->vres = SHORT(height);
memset (pcx->palette,0,sizeof(pcx->palette));
pcx->color_planes = 1; // chunky image
pcx->bytes_per_line = SHORT(width);
pcx->palette_type = SHORT(2); // not a grey scale
memset (pcx->filler,0,sizeof(pcx->filler));
// pack the image
pack = &pcx->data;
for (i=0 ; i<width*height ; i++)
{
if ( (*data & 0xc0) != 0xc0)
*pack++ = *data++;
else
{
*pack++ = 0xc1;
*pack++ = *data++;
}
}
// write the palette
*pack++ = 0x0c; // palette ID byte
for (i=0 ; i<768 ; i++)
*pack++ = *palette++;
// write output file
length = pack - (byte *)pcx;
M_WriteFile (filename, pcx, length);
Z_Free (pcx);
}
//
// M_ScreenShot
//
void M_ScreenShot (void)
{
int i;
byte* linear;
char lbmname[12];
// munge planar buffer to linear
linear = screens[2];
I_ReadScreen (linear);
// find a file name to save it to
strcpy(lbmname,"DOOM00.pcx");
for (i=0 ; i<=99 ; i++)
{
lbmname[4] = i/10 + '0';
lbmname[5] = i%10 + '0';
if (access(lbmname,0) == -1)
break; // file doesn't exist
}
if (i==100)
I_Error ("M_ScreenShot: Couldn't create a PCX");
// save the pcx file
WritePCXfile (lbmname, linear,
SCREENWIDTH, SCREENHEIGHT,
W_CacheLumpName ("PLAYPAL",PU_CACHE));
players[consoleplayer].message = "screen shot";
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_4075_0 |
crossvul-cpp_data_good_4489_0 | /*
* NXP Wireless LAN device driver: association and ad-hoc start/join
*
* Copyright 2011-2020 NXP
*
* This software file (the "File") is distributed by NXP
* under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "decl.h"
#include "ioctl.h"
#include "util.h"
#include "fw.h"
#include "main.h"
#include "wmm.h"
#include "11n.h"
#include "11ac.h"
#define CAPINFO_MASK (~(BIT(15) | BIT(14) | BIT(12) | BIT(11) | BIT(9)))
/*
* Append a generic IE as a pass through TLV to a TLV buffer.
*
* This function is called from the network join command preparation routine.
*
* If the IE buffer has been setup by the application, this routine appends
* the buffer as a pass through TLV type to the request.
*/
static int
mwifiex_cmd_append_generic_ie(struct mwifiex_private *priv, u8 **buffer)
{
int ret_len = 0;
struct mwifiex_ie_types_header ie_header;
/* Null Checks */
if (!buffer)
return 0;
if (!(*buffer))
return 0;
/*
* If there is a generic ie buffer setup, append it to the return
* parameter buffer pointer.
*/
if (priv->gen_ie_buf_len) {
mwifiex_dbg(priv->adapter, INFO,
"info: %s: append generic ie len %d to %p\n",
__func__, priv->gen_ie_buf_len, *buffer);
/* Wrap the generic IE buffer with a pass through TLV type */
ie_header.type = cpu_to_le16(TLV_TYPE_PASSTHROUGH);
ie_header.len = cpu_to_le16(priv->gen_ie_buf_len);
memcpy(*buffer, &ie_header, sizeof(ie_header));
/* Increment the return size and the return buffer pointer
param */
*buffer += sizeof(ie_header);
ret_len += sizeof(ie_header);
/* Copy the generic IE buffer to the output buffer, advance
pointer */
memcpy(*buffer, priv->gen_ie_buf, priv->gen_ie_buf_len);
/* Increment the return size and the return buffer pointer
param */
*buffer += priv->gen_ie_buf_len;
ret_len += priv->gen_ie_buf_len;
/* Reset the generic IE buffer */
priv->gen_ie_buf_len = 0;
}
/* return the length appended to the buffer */
return ret_len;
}
/*
* Append TSF tracking info from the scan table for the target AP.
*
* This function is called from the network join command preparation routine.
*
* The TSF table TSF sent to the firmware contains two TSF values:
* - The TSF of the target AP from its previous beacon/probe response
* - The TSF timestamp of our local MAC at the time we observed the
* beacon/probe response.
*
* The firmware uses the timestamp values to set an initial TSF value
* in the MAC for the new association after a reassociation attempt.
*/
static int
mwifiex_cmd_append_tsf_tlv(struct mwifiex_private *priv, u8 **buffer,
struct mwifiex_bssdescriptor *bss_desc)
{
struct mwifiex_ie_types_tsf_timestamp tsf_tlv;
__le64 tsf_val;
/* Null Checks */
if (buffer == NULL)
return 0;
if (*buffer == NULL)
return 0;
memset(&tsf_tlv, 0x00, sizeof(struct mwifiex_ie_types_tsf_timestamp));
tsf_tlv.header.type = cpu_to_le16(TLV_TYPE_TSFTIMESTAMP);
tsf_tlv.header.len = cpu_to_le16(2 * sizeof(tsf_val));
memcpy(*buffer, &tsf_tlv, sizeof(tsf_tlv.header));
*buffer += sizeof(tsf_tlv.header);
/* TSF at the time when beacon/probe_response was received */
tsf_val = cpu_to_le64(bss_desc->fw_tsf);
memcpy(*buffer, &tsf_val, sizeof(tsf_val));
*buffer += sizeof(tsf_val);
tsf_val = cpu_to_le64(bss_desc->timestamp);
mwifiex_dbg(priv->adapter, INFO,
"info: %s: TSF offset calc: %016llx - %016llx\n",
__func__, bss_desc->timestamp, bss_desc->fw_tsf);
memcpy(*buffer, &tsf_val, sizeof(tsf_val));
*buffer += sizeof(tsf_val);
return sizeof(tsf_tlv.header) + (2 * sizeof(tsf_val));
}
/*
* This function finds out the common rates between rate1 and rate2.
*
* It will fill common rates in rate1 as output if found.
*
* NOTE: Setting the MSB of the basic rates needs to be taken
* care of, either before or after calling this function.
*/
static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
u32 rate1_size, u8 *rate2, u32 rate2_size)
{
int ret;
u8 *ptr = rate1, *tmp;
u32 i, j;
tmp = kmemdup(rate1, rate1_size, GFP_KERNEL);
if (!tmp) {
mwifiex_dbg(priv->adapter, ERROR, "failed to alloc tmp buf\n");
return -ENOMEM;
}
memset(rate1, 0, rate1_size);
for (i = 0; i < rate2_size && rate2[i]; i++) {
for (j = 0; j < rate1_size && tmp[j]; j++) {
/* Check common rate, excluding the bit for
basic rate */
if ((rate2[i] & 0x7F) == (tmp[j] & 0x7F)) {
*rate1++ = tmp[j];
break;
}
}
}
mwifiex_dbg(priv->adapter, INFO, "info: Tx data rate set to %#x\n",
priv->data_rate);
if (!priv->is_data_rate_auto) {
while (*ptr) {
if ((*ptr & 0x7f) == priv->data_rate) {
ret = 0;
goto done;
}
ptr++;
}
mwifiex_dbg(priv->adapter, ERROR,
"previously set fixed data rate %#x\t"
"is not compatible with the network\n",
priv->data_rate);
ret = -1;
goto done;
}
ret = 0;
done:
kfree(tmp);
return ret;
}
/*
* This function creates the intersection of the rates supported by a
* target BSS and our adapter settings for use in an assoc/join command.
*/
static int
mwifiex_setup_rates_from_bssdesc(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc,
u8 *out_rates, u32 *out_rates_size)
{
u8 card_rates[MWIFIEX_SUPPORTED_RATES];
u32 card_rates_size;
/* Copy AP supported rates */
memcpy(out_rates, bss_desc->supported_rates, MWIFIEX_SUPPORTED_RATES);
/* Get the STA supported rates */
card_rates_size = mwifiex_get_active_data_rates(priv, card_rates);
/* Get the common rates between AP and STA supported rates */
if (mwifiex_get_common_rates(priv, out_rates, MWIFIEX_SUPPORTED_RATES,
card_rates, card_rates_size)) {
*out_rates_size = 0;
mwifiex_dbg(priv->adapter, ERROR,
"%s: cannot get common rates\n",
__func__);
return -1;
}
*out_rates_size =
min_t(size_t, strlen(out_rates), MWIFIEX_SUPPORTED_RATES);
return 0;
}
/*
* This function appends a WPS IE. It is called from the network join command
* preparation routine.
*
* If the IE buffer has been setup by the application, this routine appends
* the buffer as a WPS TLV type to the request.
*/
static int
mwifiex_cmd_append_wps_ie(struct mwifiex_private *priv, u8 **buffer)
{
int retLen = 0;
struct mwifiex_ie_types_header ie_header;
if (!buffer || !*buffer)
return 0;
/*
* If there is a wps ie buffer setup, append it to the return
* parameter buffer pointer.
*/
if (priv->wps_ie_len) {
mwifiex_dbg(priv->adapter, CMD,
"cmd: append wps ie %d to %p\n",
priv->wps_ie_len, *buffer);
/* Wrap the generic IE buffer with a pass through TLV type */
ie_header.type = cpu_to_le16(TLV_TYPE_PASSTHROUGH);
ie_header.len = cpu_to_le16(priv->wps_ie_len);
memcpy(*buffer, &ie_header, sizeof(ie_header));
*buffer += sizeof(ie_header);
retLen += sizeof(ie_header);
memcpy(*buffer, priv->wps_ie, priv->wps_ie_len);
*buffer += priv->wps_ie_len;
retLen += priv->wps_ie_len;
}
kfree(priv->wps_ie);
priv->wps_ie_len = 0;
return retLen;
}
/*
* This function appends a WAPI IE.
*
* This function is called from the network join command preparation routine.
*
* If the IE buffer has been setup by the application, this routine appends
* the buffer as a WAPI TLV type to the request.
*/
static int
mwifiex_cmd_append_wapi_ie(struct mwifiex_private *priv, u8 **buffer)
{
int retLen = 0;
struct mwifiex_ie_types_header ie_header;
/* Null Checks */
if (buffer == NULL)
return 0;
if (*buffer == NULL)
return 0;
/*
* If there is a wapi ie buffer setup, append it to the return
* parameter buffer pointer.
*/
if (priv->wapi_ie_len) {
mwifiex_dbg(priv->adapter, CMD,
"cmd: append wapi ie %d to %p\n",
priv->wapi_ie_len, *buffer);
/* Wrap the generic IE buffer with a pass through TLV type */
ie_header.type = cpu_to_le16(TLV_TYPE_WAPI_IE);
ie_header.len = cpu_to_le16(priv->wapi_ie_len);
memcpy(*buffer, &ie_header, sizeof(ie_header));
/* Increment the return size and the return buffer pointer
param */
*buffer += sizeof(ie_header);
retLen += sizeof(ie_header);
/* Copy the wapi IE buffer to the output buffer, advance
pointer */
memcpy(*buffer, priv->wapi_ie, priv->wapi_ie_len);
/* Increment the return size and the return buffer pointer
param */
*buffer += priv->wapi_ie_len;
retLen += priv->wapi_ie_len;
}
/* return the length appended to the buffer */
return retLen;
}
/*
* This function appends rsn ie tlv for wpa/wpa2 security modes.
* It is called from the network join command preparation routine.
*/
static int mwifiex_append_rsn_ie_wpa_wpa2(struct mwifiex_private *priv,
u8 **buffer)
{
struct mwifiex_ie_types_rsn_param_set *rsn_ie_tlv;
int rsn_ie_len;
if (!buffer || !(*buffer))
return 0;
rsn_ie_tlv = (struct mwifiex_ie_types_rsn_param_set *) (*buffer);
rsn_ie_tlv->header.type = cpu_to_le16((u16) priv->wpa_ie[0]);
rsn_ie_tlv->header.type = cpu_to_le16(
le16_to_cpu(rsn_ie_tlv->header.type) & 0x00FF);
rsn_ie_tlv->header.len = cpu_to_le16((u16) priv->wpa_ie[1]);
rsn_ie_tlv->header.len = cpu_to_le16(le16_to_cpu(rsn_ie_tlv->header.len)
& 0x00FF);
if (le16_to_cpu(rsn_ie_tlv->header.len) <= (sizeof(priv->wpa_ie) - 2))
memcpy(rsn_ie_tlv->rsn_ie, &priv->wpa_ie[2],
le16_to_cpu(rsn_ie_tlv->header.len));
else
return -1;
rsn_ie_len = sizeof(rsn_ie_tlv->header) +
le16_to_cpu(rsn_ie_tlv->header.len);
*buffer += rsn_ie_len;
return rsn_ie_len;
}
/*
* This function prepares command for association.
*
* This sets the following parameters -
* - Peer MAC address
* - Listen interval
* - Beacon interval
* - Capability information
*
* ...and the following TLVs, as required -
* - SSID TLV
* - PHY TLV
* - SS TLV
* - Rates TLV
* - Authentication TLV
* - Channel TLV
* - WPA/WPA2 IE
* - 11n TLV
* - Vendor specific TLV
* - WMM TLV
* - WAPI IE
* - Generic IE
* - TSF TLV
*
* Preparation also includes -
* - Setting command ID and proper size
* - Ensuring correct endian-ness
*/
int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
struct mwifiex_bssdescriptor *bss_desc)
{
struct host_cmd_ds_802_11_associate *assoc = &cmd->params.associate;
struct mwifiex_ie_types_ssid_param_set *ssid_tlv;
struct mwifiex_ie_types_phy_param_set *phy_tlv;
struct mwifiex_ie_types_ss_param_set *ss_tlv;
struct mwifiex_ie_types_rates_param_set *rates_tlv;
struct mwifiex_ie_types_auth_type *auth_tlv;
struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
u8 rates[MWIFIEX_SUPPORTED_RATES];
u32 rates_size;
u16 tmp_cap;
u8 *pos;
int rsn_ie_len = 0;
pos = (u8 *) assoc;
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_ASSOCIATE);
/* Save so we know which BSS Desc to use in the response handler */
priv->attempted_bss_desc = bss_desc;
memcpy(assoc->peer_sta_addr,
bss_desc->mac_address, sizeof(assoc->peer_sta_addr));
pos += sizeof(assoc->peer_sta_addr);
/* Set the listen interval */
assoc->listen_interval = cpu_to_le16(priv->listen_interval);
/* Set the beacon period */
assoc->beacon_period = cpu_to_le16(bss_desc->beacon_period);
pos += sizeof(assoc->cap_info_bitmap);
pos += sizeof(assoc->listen_interval);
pos += sizeof(assoc->beacon_period);
pos += sizeof(assoc->dtim_period);
ssid_tlv = (struct mwifiex_ie_types_ssid_param_set *) pos;
ssid_tlv->header.type = cpu_to_le16(WLAN_EID_SSID);
ssid_tlv->header.len = cpu_to_le16((u16) bss_desc->ssid.ssid_len);
memcpy(ssid_tlv->ssid, bss_desc->ssid.ssid,
le16_to_cpu(ssid_tlv->header.len));
pos += sizeof(ssid_tlv->header) + le16_to_cpu(ssid_tlv->header.len);
phy_tlv = (struct mwifiex_ie_types_phy_param_set *) pos;
phy_tlv->header.type = cpu_to_le16(WLAN_EID_DS_PARAMS);
phy_tlv->header.len = cpu_to_le16(sizeof(phy_tlv->fh_ds.ds_param_set));
memcpy(&phy_tlv->fh_ds.ds_param_set,
&bss_desc->phy_param_set.ds_param_set.current_chan,
sizeof(phy_tlv->fh_ds.ds_param_set));
pos += sizeof(phy_tlv->header) + le16_to_cpu(phy_tlv->header.len);
ss_tlv = (struct mwifiex_ie_types_ss_param_set *) pos;
ss_tlv->header.type = cpu_to_le16(WLAN_EID_CF_PARAMS);
ss_tlv->header.len = cpu_to_le16(sizeof(ss_tlv->cf_ibss.cf_param_set));
pos += sizeof(ss_tlv->header) + le16_to_cpu(ss_tlv->header.len);
/* Get the common rates supported between the driver and the BSS Desc */
if (mwifiex_setup_rates_from_bssdesc
(priv, bss_desc, rates, &rates_size))
return -1;
/* Save the data rates into Current BSS state structure */
priv->curr_bss_params.num_of_rates = rates_size;
memcpy(&priv->curr_bss_params.data_rates, rates, rates_size);
/* Setup the Rates TLV in the association command */
rates_tlv = (struct mwifiex_ie_types_rates_param_set *) pos;
rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
rates_tlv->header.len = cpu_to_le16((u16) rates_size);
memcpy(rates_tlv->rates, rates, rates_size);
pos += sizeof(rates_tlv->header) + rates_size;
mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_CMD: rates size = %d\n",
rates_size);
/* Add the Authentication type to be used for Auth frames */
auth_tlv = (struct mwifiex_ie_types_auth_type *) pos;
auth_tlv->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
auth_tlv->header.len = cpu_to_le16(sizeof(auth_tlv->auth_type));
if (priv->sec_info.wep_enabled)
auth_tlv->auth_type = cpu_to_le16(
(u16) priv->sec_info.authentication_mode);
else
auth_tlv->auth_type = cpu_to_le16(NL80211_AUTHTYPE_OPEN_SYSTEM);
pos += sizeof(auth_tlv->header) + le16_to_cpu(auth_tlv->header.len);
if (IS_SUPPORT_MULTI_BANDS(priv->adapter) &&
!(ISSUPP_11NENABLED(priv->adapter->fw_cap_info) &&
(!bss_desc->disable_11n) &&
(priv->adapter->config_bands & BAND_GN ||
priv->adapter->config_bands & BAND_AN) &&
(bss_desc->bcn_ht_cap)
)
) {
/* Append a channel TLV for the channel the attempted AP was
found on */
chan_tlv = (struct mwifiex_ie_types_chan_list_param_set *) pos;
chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
chan_tlv->header.len =
cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));
memset(chan_tlv->chan_scan_param, 0x00,
sizeof(struct mwifiex_chan_scan_param_set));
chan_tlv->chan_scan_param[0].chan_number =
(bss_desc->phy_param_set.ds_param_set.current_chan);
mwifiex_dbg(priv->adapter, INFO, "info: Assoc: TLV Chan = %d\n",
chan_tlv->chan_scan_param[0].chan_number);
chan_tlv->chan_scan_param[0].radio_type =
mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
mwifiex_dbg(priv->adapter, INFO, "info: Assoc: TLV Band = %d\n",
chan_tlv->chan_scan_param[0].radio_type);
pos += sizeof(chan_tlv->header) +
sizeof(struct mwifiex_chan_scan_param_set);
}
if (!priv->wps.session_enable) {
if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos);
if (rsn_ie_len == -1)
return -1;
}
if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info) &&
(!bss_desc->disable_11n) &&
(priv->adapter->config_bands & BAND_GN ||
priv->adapter->config_bands & BAND_AN))
mwifiex_cmd_append_11n_tlv(priv, bss_desc, &pos);
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
priv->adapter->config_bands & BAND_AAC)
mwifiex_cmd_append_11ac_tlv(priv, bss_desc, &pos);
/* Append vendor specific IE TLV */
mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_ASSOC, &pos);
mwifiex_wmm_process_association_req(priv, &pos, &bss_desc->wmm_ie,
bss_desc->bcn_ht_cap);
if (priv->sec_info.wapi_enabled && priv->wapi_ie_len)
mwifiex_cmd_append_wapi_ie(priv, &pos);
if (priv->wps.session_enable && priv->wps_ie_len)
mwifiex_cmd_append_wps_ie(priv, &pos);
mwifiex_cmd_append_generic_ie(priv, &pos);
mwifiex_cmd_append_tsf_tlv(priv, &pos, bss_desc);
mwifiex_11h_process_join(priv, &pos, bss_desc);
cmd->size = cpu_to_le16((u16) (pos - (u8 *) assoc) + S_DS_GEN);
/* Set the Capability info at last */
tmp_cap = bss_desc->cap_info_bitmap;
if (priv->adapter->config_bands == BAND_B)
tmp_cap &= ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
tmp_cap &= CAPINFO_MASK;
mwifiex_dbg(priv->adapter, INFO,
"info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
tmp_cap, CAPINFO_MASK);
assoc->cap_info_bitmap = cpu_to_le16(tmp_cap);
return 0;
}
static const char *assoc_failure_reason_to_str(u16 cap_info)
{
switch (cap_info) {
case CONNECT_ERR_AUTH_ERR_STA_FAILURE:
return "CONNECT_ERR_AUTH_ERR_STA_FAILURE";
case CONNECT_ERR_AUTH_MSG_UNHANDLED:
return "CONNECT_ERR_AUTH_MSG_UNHANDLED";
case CONNECT_ERR_ASSOC_ERR_TIMEOUT:
return "CONNECT_ERR_ASSOC_ERR_TIMEOUT";
case CONNECT_ERR_ASSOC_ERR_AUTH_REFUSED:
return "CONNECT_ERR_ASSOC_ERR_AUTH_REFUSED";
case CONNECT_ERR_STA_FAILURE:
return "CONNECT_ERR_STA_FAILURE";
}
return "Unknown connect failure";
}
/*
* Association firmware command response handler
*
* The response buffer for the association command has the following
* memory layout.
*
* For cases where an association response was not received (indicated
* by the CapInfo and AId field):
*
* .------------------------------------------------------------.
* | Header(4 * sizeof(t_u16)): Standard command response hdr |
* .------------------------------------------------------------.
* | cap_info/Error Return(t_u16): |
* | 0xFFFF(-1): Internal error |
* | 0xFFFE(-2): Authentication unhandled message |
* | 0xFFFD(-3): Authentication refused |
* | 0xFFFC(-4): Timeout waiting for AP response |
* .------------------------------------------------------------.
* | status_code(t_u16): |
* | If cap_info is -1: |
* | An internal firmware failure prevented the |
* | command from being processed. The status_code |
* | will be set to 1. |
* | |
* | If cap_info is -2: |
* | An authentication frame was received but was |
* | not handled by the firmware. IEEE Status |
* | code for the failure is returned. |
* | |
* | If cap_info is -3: |
* | An authentication frame was received and the |
* | status_code is the IEEE Status reported in the |
* | response. |
* | |
* | If cap_info is -4: |
* | (1) Association response timeout |
* | (2) Authentication response timeout |
* .------------------------------------------------------------.
* | a_id(t_u16): 0xFFFF |
* .------------------------------------------------------------.
*
*
* For cases where an association response was received, the IEEE
* standard association response frame is returned:
*
* .------------------------------------------------------------.
* | Header(4 * sizeof(t_u16)): Standard command response hdr |
* .------------------------------------------------------------.
* | cap_info(t_u16): IEEE Capability |
* .------------------------------------------------------------.
* | status_code(t_u16): IEEE Status Code |
* .------------------------------------------------------------.
* | a_id(t_u16): IEEE Association ID |
* .------------------------------------------------------------.
* | IEEE IEs(variable): Any received IEs comprising the |
* | remaining portion of a received |
* | association response frame. |
* .------------------------------------------------------------.
*
* For simplistic handling, the status_code field can be used to determine
* an association success (0) or failure (non-zero).
*/
int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
struct mwifiex_adapter *adapter = priv->adapter;
int ret = 0;
struct ieee_types_assoc_rsp *assoc_rsp;
struct mwifiex_bssdescriptor *bss_desc;
bool enable_data = true;
u16 cap_info, status_code, aid;
const u8 *ie_ptr;
struct ieee80211_ht_operation *assoc_resp_ht_oper;
if (!priv->attempted_bss_desc) {
mwifiex_dbg(priv->adapter, ERROR,
"ASSOC_RESP: failed, association terminated by host\n");
goto done;
}
assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
cap_info = le16_to_cpu(assoc_rsp->cap_info_bitmap);
status_code = le16_to_cpu(assoc_rsp->status_code);
aid = le16_to_cpu(assoc_rsp->a_id);
if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
dev_err(priv->adapter->dev,
"invalid AID value 0x%x; bits 15:14 not set\n",
aid);
aid &= ~(BIT(15) | BIT(14));
priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
sizeof(priv->assoc_rsp_buf));
assoc_rsp->a_id = cpu_to_le16(aid);
memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
if (status_code) {
priv->adapter->dbg.num_cmd_assoc_failure++;
mwifiex_dbg(priv->adapter, ERROR,
"ASSOC_RESP: failed,\t"
"status code=%d err=%#x a_id=%#x\n",
status_code, cap_info,
le16_to_cpu(assoc_rsp->a_id));
mwifiex_dbg(priv->adapter, ERROR, "assoc failure: reason %s\n",
assoc_failure_reason_to_str(cap_info));
if (cap_info == CONNECT_ERR_ASSOC_ERR_TIMEOUT) {
if (status_code == MWIFIEX_ASSOC_CMD_FAILURE_AUTH) {
ret = WLAN_STATUS_AUTH_TIMEOUT;
mwifiex_dbg(priv->adapter, ERROR,
"ASSOC_RESP: AUTH timeout\n");
} else {
ret = WLAN_STATUS_UNSPECIFIED_FAILURE;
mwifiex_dbg(priv->adapter, ERROR,
"ASSOC_RESP: UNSPECIFIED failure\n");
}
} else {
ret = status_code;
}
goto done;
}
/* Send a Media Connected event, according to the Spec */
priv->media_connected = true;
priv->adapter->ps_state = PS_STATE_AWAKE;
priv->adapter->pps_uapsd_mode = false;
priv->adapter->tx_lock_flag = false;
/* Set the attempted BSSID Index to current */
bss_desc = priv->attempted_bss_desc;
mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: %s\n",
bss_desc->ssid.ssid);
/* Make a copy of current BSSID descriptor */
memcpy(&priv->curr_bss_params.bss_descriptor,
bss_desc, sizeof(struct mwifiex_bssdescriptor));
/* Update curr_bss_params */
priv->curr_bss_params.bss_descriptor.channel
= bss_desc->phy_param_set.ds_param_set.current_chan;
priv->curr_bss_params.band = (u8) bss_desc->bss_band;
if (bss_desc->wmm_ie.vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC)
priv->curr_bss_params.wmm_enabled = true;
else
priv->curr_bss_params.wmm_enabled = false;
if ((priv->wmm_required || bss_desc->bcn_ht_cap) &&
priv->curr_bss_params.wmm_enabled)
priv->wmm_enabled = true;
else
priv->wmm_enabled = false;
priv->curr_bss_params.wmm_uapsd_enabled = false;
if (priv->wmm_enabled)
priv->curr_bss_params.wmm_uapsd_enabled
= ((bss_desc->wmm_ie.qos_info_bitmap &
IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0);
/* Store the bandwidth information from assoc response */
ie_ptr = cfg80211_find_ie(WLAN_EID_HT_OPERATION, assoc_rsp->ie_buffer,
priv->assoc_rsp_size
- sizeof(struct ieee_types_assoc_rsp));
if (ie_ptr) {
assoc_resp_ht_oper = (struct ieee80211_ht_operation *)(ie_ptr
+ sizeof(struct ieee_types_header));
priv->assoc_resp_ht_param = assoc_resp_ht_oper->ht_param;
priv->ht_param_present = true;
} else {
priv->ht_param_present = false;
}
mwifiex_dbg(priv->adapter, INFO,
"info: ASSOC_RESP: curr_pkt_filter is %#x\n",
priv->curr_pkt_filter);
if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
priv->wpa_is_gtk_set = false;
if (priv->wmm_enabled) {
/* Don't re-enable carrier until we get the WMM_GET_STATUS
event */
enable_data = false;
} else {
/* Since WMM is not enabled, setup the queues with the
defaults */
mwifiex_wmm_setup_queue_priorities(priv, NULL);
mwifiex_wmm_setup_ac_downgrade(priv);
}
if (enable_data)
mwifiex_dbg(priv->adapter, INFO,
"info: post association, re-enabling data flow\n");
/* Reset SNR/NF/RSSI values */
priv->data_rssi_last = 0;
priv->data_nf_last = 0;
priv->data_rssi_avg = 0;
priv->data_nf_avg = 0;
priv->bcn_rssi_last = 0;
priv->bcn_nf_last = 0;
priv->bcn_rssi_avg = 0;
priv->bcn_nf_avg = 0;
priv->rxpd_rate = 0;
priv->rxpd_htinfo = 0;
mwifiex_save_curr_bcn(priv);
priv->adapter->dbg.num_cmd_assoc_success++;
mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: associated\n");
/* Add the ra_list here for infra mode as there will be only 1 ra
always */
mwifiex_ralist_add(priv,
priv->curr_bss_params.bss_descriptor.mac_address);
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
priv->scan_block = true;
else
priv->port_open = true;
done:
/* Need to indicate IOCTL complete */
if (adapter->curr_cmd->wait_q_enabled) {
if (ret)
adapter->cmd_wait_q.status = -1;
else
adapter->cmd_wait_q.status = 0;
}
return ret;
}
/*
* This function prepares command for ad-hoc start.
*
* Driver will fill up SSID, BSS mode, IBSS parameters, physical
* parameters, probe delay, and capability information. Firmware
* will fill up beacon period, basic rates and operational rates.
*
* In addition, the following TLVs are added -
* - Channel TLV
* - Vendor specific IE
* - WPA/WPA2 IE
* - HT Capabilities IE
* - HT Information IE
*
* Preparation also includes -
* - Setting command ID and proper size
* - Ensuring correct endian-ness
*/
int
mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
struct cfg80211_ssid *req_ssid)
{
int rsn_ie_len = 0;
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_ad_hoc_start *adhoc_start =
&cmd->params.adhoc_start;
struct mwifiex_bssdescriptor *bss_desc;
u32 cmd_append_size = 0;
u32 i;
u16 tmp_cap;
struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
u8 radio_type;
struct mwifiex_ie_types_htcap *ht_cap;
struct mwifiex_ie_types_htinfo *ht_info;
u8 *pos = (u8 *) adhoc_start +
sizeof(struct host_cmd_ds_802_11_ad_hoc_start);
if (!adapter)
return -1;
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_AD_HOC_START);
bss_desc = &priv->curr_bss_params.bss_descriptor;
priv->attempted_bss_desc = bss_desc;
/*
* Fill in the parameters for 2 data structures:
* 1. struct host_cmd_ds_802_11_ad_hoc_start command
* 2. bss_desc
* Driver will fill up SSID, bss_mode,IBSS param, Physical Param,
* probe delay, and Cap info.
* Firmware will fill up beacon period, Basic rates
* and operational rates.
*/
memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN);
if (req_ssid->ssid_len > IEEE80211_MAX_SSID_LEN)
req_ssid->ssid_len = IEEE80211_MAX_SSID_LEN;
memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len);
mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n",
adhoc_start->ssid);
memset(bss_desc->ssid.ssid, 0, IEEE80211_MAX_SSID_LEN);
memcpy(bss_desc->ssid.ssid, req_ssid->ssid, req_ssid->ssid_len);
bss_desc->ssid.ssid_len = req_ssid->ssid_len;
/* Set the BSS mode */
adhoc_start->bss_mode = HostCmd_BSS_MODE_IBSS;
bss_desc->bss_mode = NL80211_IFTYPE_ADHOC;
adhoc_start->beacon_period = cpu_to_le16(priv->beacon_period);
bss_desc->beacon_period = priv->beacon_period;
/* Set Physical param set */
/* Parameter IE Id */
#define DS_PARA_IE_ID 3
/* Parameter IE length */
#define DS_PARA_IE_LEN 1
adhoc_start->phy_param_set.ds_param_set.element_id = DS_PARA_IE_ID;
adhoc_start->phy_param_set.ds_param_set.len = DS_PARA_IE_LEN;
if (!mwifiex_get_cfp(priv, adapter->adhoc_start_band,
(u16) priv->adhoc_channel, 0)) {
struct mwifiex_chan_freq_power *cfp;
cfp = mwifiex_get_cfp(priv, adapter->adhoc_start_band,
FIRST_VALID_CHANNEL, 0);
if (cfp)
priv->adhoc_channel = (u8) cfp->channel;
}
if (!priv->adhoc_channel) {
mwifiex_dbg(adapter, ERROR,
"ADHOC_S_CMD: adhoc_channel cannot be 0\n");
return -1;
}
mwifiex_dbg(adapter, INFO,
"info: ADHOC_S_CMD: creating ADHOC on channel %d\n",
priv->adhoc_channel);
priv->curr_bss_params.bss_descriptor.channel = priv->adhoc_channel;
priv->curr_bss_params.band = adapter->adhoc_start_band;
bss_desc->channel = priv->adhoc_channel;
adhoc_start->phy_param_set.ds_param_set.current_chan =
priv->adhoc_channel;
memcpy(&bss_desc->phy_param_set, &adhoc_start->phy_param_set,
sizeof(union ieee_types_phy_param_set));
/* Set IBSS param set */
/* IBSS parameter IE Id */
#define IBSS_PARA_IE_ID 6
/* IBSS parameter IE length */
#define IBSS_PARA_IE_LEN 2
adhoc_start->ss_param_set.ibss_param_set.element_id = IBSS_PARA_IE_ID;
adhoc_start->ss_param_set.ibss_param_set.len = IBSS_PARA_IE_LEN;
adhoc_start->ss_param_set.ibss_param_set.atim_window
= cpu_to_le16(priv->atim_window);
memcpy(&bss_desc->ss_param_set, &adhoc_start->ss_param_set,
sizeof(union ieee_types_ss_param_set));
/* Set Capability info */
bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_IBSS;
tmp_cap = WLAN_CAPABILITY_IBSS;
/* Set up privacy in bss_desc */
if (priv->sec_info.encryption_mode) {
/* Ad-Hoc capability privacy on */
mwifiex_dbg(adapter, INFO,
"info: ADHOC_S_CMD: wep_status set privacy to WEP\n");
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
tmp_cap |= WLAN_CAPABILITY_PRIVACY;
} else {
mwifiex_dbg(adapter, INFO,
"info: ADHOC_S_CMD: wep_status NOT set,\t"
"setting privacy to ACCEPT ALL\n");
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
}
memset(adhoc_start->data_rate, 0, sizeof(adhoc_start->data_rate));
mwifiex_get_active_data_rates(priv, adhoc_start->data_rate);
if ((adapter->adhoc_start_band & BAND_G) &&
(priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
HostCmd_ACT_GEN_SET, 0,
&priv->curr_pkt_filter, false)) {
mwifiex_dbg(adapter, ERROR,
"ADHOC_S_CMD: G Protection config failed\n");
return -1;
}
}
/* Find the last non zero */
for (i = 0; i < sizeof(adhoc_start->data_rate); i++)
if (!adhoc_start->data_rate[i])
break;
priv->curr_bss_params.num_of_rates = i;
/* Copy the ad-hoc creating rates into Current BSS rate structure */
memcpy(&priv->curr_bss_params.data_rates,
&adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: rates=%4ph\n",
adhoc_start->data_rate);
mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
if (IS_SUPPORT_MULTI_BANDS(adapter)) {
/* Append a channel TLV */
chan_tlv = (struct mwifiex_ie_types_chan_list_param_set *) pos;
chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
chan_tlv->header.len =
cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));
memset(chan_tlv->chan_scan_param, 0x00,
sizeof(struct mwifiex_chan_scan_param_set));
chan_tlv->chan_scan_param[0].chan_number =
(u8) priv->curr_bss_params.bss_descriptor.channel;
mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: TLV Chan = %d\n",
chan_tlv->chan_scan_param[0].chan_number);
chan_tlv->chan_scan_param[0].radio_type
= mwifiex_band_to_radio_type(priv->curr_bss_params.band);
if (adapter->adhoc_start_band & BAND_GN ||
adapter->adhoc_start_band & BAND_AN) {
if (adapter->sec_chan_offset ==
IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
chan_tlv->chan_scan_param[0].radio_type |=
(IEEE80211_HT_PARAM_CHA_SEC_ABOVE << 4);
else if (adapter->sec_chan_offset ==
IEEE80211_HT_PARAM_CHA_SEC_BELOW)
chan_tlv->chan_scan_param[0].radio_type |=
(IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4);
}
mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: TLV Band = %d\n",
chan_tlv->chan_scan_param[0].radio_type);
pos += sizeof(chan_tlv->header) +
sizeof(struct mwifiex_chan_scan_param_set);
cmd_append_size +=
sizeof(chan_tlv->header) +
sizeof(struct mwifiex_chan_scan_param_set);
}
/* Append vendor specific IE TLV */
cmd_append_size += mwifiex_cmd_append_vsie_tlv(priv,
MWIFIEX_VSIE_MASK_ADHOC, &pos);
if (priv->sec_info.wpa_enabled) {
rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos);
if (rsn_ie_len == -1)
return -1;
cmd_append_size += rsn_ie_len;
}
if (adapter->adhoc_11n_enabled) {
/* Fill HT CAPABILITY */
ht_cap = (struct mwifiex_ie_types_htcap *) pos;
memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap));
ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
ht_cap->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
radio_type = mwifiex_band_to_radio_type(
priv->adapter->config_bands);
mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
if (adapter->sec_chan_offset ==
IEEE80211_HT_PARAM_CHA_SEC_NONE) {
u16 tmp_ht_cap;
tmp_ht_cap = le16_to_cpu(ht_cap->ht_cap.cap_info);
tmp_ht_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
tmp_ht_cap &= ~IEEE80211_HT_CAP_SGI_40;
ht_cap->ht_cap.cap_info = cpu_to_le16(tmp_ht_cap);
}
pos += sizeof(struct mwifiex_ie_types_htcap);
cmd_append_size += sizeof(struct mwifiex_ie_types_htcap);
/* Fill HT INFORMATION */
ht_info = (struct mwifiex_ie_types_htinfo *) pos;
memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo));
ht_info->header.type = cpu_to_le16(WLAN_EID_HT_OPERATION);
ht_info->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_operation));
ht_info->ht_oper.primary_chan =
(u8) priv->curr_bss_params.bss_descriptor.channel;
if (adapter->sec_chan_offset) {
ht_info->ht_oper.ht_param = adapter->sec_chan_offset;
ht_info->ht_oper.ht_param |=
IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
}
ht_info->ht_oper.operation_mode =
cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
ht_info->ht_oper.basic_set[0] = 0xff;
pos += sizeof(struct mwifiex_ie_types_htinfo);
cmd_append_size +=
sizeof(struct mwifiex_ie_types_htinfo);
}
cmd->size =
cpu_to_le16((u16)(sizeof(struct host_cmd_ds_802_11_ad_hoc_start)
+ S_DS_GEN + cmd_append_size));
if (adapter->adhoc_start_band == BAND_B)
tmp_cap &= ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
else
tmp_cap |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
adhoc_start->cap_info_bitmap = cpu_to_le16(tmp_cap);
return 0;
}
/*
* This function prepares command for ad-hoc join.
*
* Most of the parameters are set up by copying from the target BSS descriptor
* from the scan response.
*
* In addition, the following TLVs are added -
* - Channel TLV
* - Vendor specific IE
* - WPA/WPA2 IE
* - 11n IE
*
* Preparation also includes -
* - Setting command ID and proper size
* - Ensuring correct endian-ness
*/
int
mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
struct mwifiex_bssdescriptor *bss_desc)
{
int rsn_ie_len = 0;
struct host_cmd_ds_802_11_ad_hoc_join *adhoc_join =
&cmd->params.adhoc_join;
struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
u32 cmd_append_size = 0;
u16 tmp_cap;
u32 i, rates_size = 0;
u16 curr_pkt_filter;
u8 *pos =
(u8 *) adhoc_join +
sizeof(struct host_cmd_ds_802_11_ad_hoc_join);
/* Use G protection */
#define USE_G_PROTECTION 0x02
if (bss_desc->erp_flags & USE_G_PROTECTION) {
curr_pkt_filter =
priv->
curr_pkt_filter | HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON;
if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
HostCmd_ACT_GEN_SET, 0,
&curr_pkt_filter, false)) {
mwifiex_dbg(priv->adapter, ERROR,
"ADHOC_J_CMD: G Protection config failed\n");
return -1;
}
}
priv->attempted_bss_desc = bss_desc;
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_AD_HOC_JOIN);
adhoc_join->bss_descriptor.bss_mode = HostCmd_BSS_MODE_IBSS;
adhoc_join->bss_descriptor.beacon_period
= cpu_to_le16(bss_desc->beacon_period);
memcpy(&adhoc_join->bss_descriptor.bssid,
&bss_desc->mac_address, ETH_ALEN);
memcpy(&adhoc_join->bss_descriptor.ssid,
&bss_desc->ssid.ssid, bss_desc->ssid.ssid_len);
memcpy(&adhoc_join->bss_descriptor.phy_param_set,
&bss_desc->phy_param_set,
sizeof(union ieee_types_phy_param_set));
memcpy(&adhoc_join->bss_descriptor.ss_param_set,
&bss_desc->ss_param_set, sizeof(union ieee_types_ss_param_set));
tmp_cap = bss_desc->cap_info_bitmap;
tmp_cap &= CAPINFO_MASK;
mwifiex_dbg(priv->adapter, INFO,
"info: ADHOC_J_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
tmp_cap, CAPINFO_MASK);
/* Information on BSSID descriptor passed to FW */
mwifiex_dbg(priv->adapter, INFO,
"info: ADHOC_J_CMD: BSSID=%pM, SSID='%s'\n",
adhoc_join->bss_descriptor.bssid,
adhoc_join->bss_descriptor.ssid);
for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
bss_desc->supported_rates[i]; i++)
;
rates_size = i;
/* Copy Data Rates from the Rates recorded in scan response */
memset(adhoc_join->bss_descriptor.data_rates, 0,
sizeof(adhoc_join->bss_descriptor.data_rates));
memcpy(adhoc_join->bss_descriptor.data_rates,
bss_desc->supported_rates, rates_size);
/* Copy the adhoc join rates into Current BSS state structure */
priv->curr_bss_params.num_of_rates = rates_size;
memcpy(&priv->curr_bss_params.data_rates, bss_desc->supported_rates,
rates_size);
/* Copy the channel information */
priv->curr_bss_params.bss_descriptor.channel = bss_desc->channel;
priv->curr_bss_params.band = (u8) bss_desc->bss_band;
if (priv->sec_info.wep_enabled || priv->sec_info.wpa_enabled)
tmp_cap |= WLAN_CAPABILITY_PRIVACY;
if (IS_SUPPORT_MULTI_BANDS(priv->adapter)) {
/* Append a channel TLV */
chan_tlv = (struct mwifiex_ie_types_chan_list_param_set *) pos;
chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
chan_tlv->header.len =
cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));
memset(chan_tlv->chan_scan_param, 0x00,
sizeof(struct mwifiex_chan_scan_param_set));
chan_tlv->chan_scan_param[0].chan_number =
(bss_desc->phy_param_set.ds_param_set.current_chan);
mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_J_CMD: TLV Chan=%d\n",
chan_tlv->chan_scan_param[0].chan_number);
chan_tlv->chan_scan_param[0].radio_type =
mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_J_CMD: TLV Band=%d\n",
chan_tlv->chan_scan_param[0].radio_type);
pos += sizeof(chan_tlv->header) +
sizeof(struct mwifiex_chan_scan_param_set);
cmd_append_size += sizeof(chan_tlv->header) +
sizeof(struct mwifiex_chan_scan_param_set);
}
if (priv->sec_info.wpa_enabled)
rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos);
if (rsn_ie_len == -1)
return -1;
cmd_append_size += rsn_ie_len;
if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
cmd_append_size += mwifiex_cmd_append_11n_tlv(priv,
bss_desc, &pos);
/* Append vendor specific IE TLV */
cmd_append_size += mwifiex_cmd_append_vsie_tlv(priv,
MWIFIEX_VSIE_MASK_ADHOC, &pos);
cmd->size = cpu_to_le16
((u16) (sizeof(struct host_cmd_ds_802_11_ad_hoc_join)
+ S_DS_GEN + cmd_append_size));
adhoc_join->bss_descriptor.cap_info_bitmap = cpu_to_le16(tmp_cap);
return 0;
}
/*
* This function handles the command response of ad-hoc start and
* ad-hoc join.
*
* The function generates a device-connected event to notify
* the applications, in case of successful ad-hoc start/join, and
* saves the beacon buffer.
*/
int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
int ret = 0;
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_ad_hoc_start_result *start_result =
&resp->params.start_result;
struct host_cmd_ds_802_11_ad_hoc_join_result *join_result =
&resp->params.join_result;
struct mwifiex_bssdescriptor *bss_desc;
u16 cmd = le16_to_cpu(resp->command);
u8 result;
if (!priv->attempted_bss_desc) {
mwifiex_dbg(priv->adapter, ERROR,
"ADHOC_RESP: failed, association terminated by host\n");
goto done;
}
if (cmd == HostCmd_CMD_802_11_AD_HOC_START)
result = start_result->result;
else
result = join_result->result;
bss_desc = priv->attempted_bss_desc;
/* Join result code 0 --> SUCCESS */
if (result) {
mwifiex_dbg(priv->adapter, ERROR, "ADHOC_RESP: failed\n");
if (priv->media_connected)
mwifiex_reset_connect_state(priv, result, true);
memset(&priv->curr_bss_params.bss_descriptor,
0x00, sizeof(struct mwifiex_bssdescriptor));
ret = -1;
goto done;
}
/* Send a Media Connected event, according to the Spec */
priv->media_connected = true;
if (le16_to_cpu(resp->command) == HostCmd_CMD_802_11_AD_HOC_START) {
mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_S_RESP %s\n",
bss_desc->ssid.ssid);
/* Update the created network descriptor with the new BSSID */
memcpy(bss_desc->mac_address,
start_result->bssid, ETH_ALEN);
priv->adhoc_state = ADHOC_STARTED;
} else {
/*
* Now the join cmd should be successful.
* If BSSID has changed use SSID to compare instead of BSSID
*/
mwifiex_dbg(priv->adapter, INFO,
"info: ADHOC_J_RESP %s\n",
bss_desc->ssid.ssid);
/*
* Make a copy of current BSSID descriptor, only needed for
* join since the current descriptor is already being used
* for adhoc start
*/
memcpy(&priv->curr_bss_params.bss_descriptor,
bss_desc, sizeof(struct mwifiex_bssdescriptor));
priv->adhoc_state = ADHOC_JOINED;
}
mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_RESP: channel = %d\n",
priv->adhoc_channel);
mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_RESP: BSSID = %pM\n",
priv->curr_bss_params.bss_descriptor.mac_address);
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
mwifiex_save_curr_bcn(priv);
done:
/* Need to indicate IOCTL complete */
if (adapter->curr_cmd->wait_q_enabled) {
if (ret)
adapter->cmd_wait_q.status = -1;
else
adapter->cmd_wait_q.status = 0;
}
return ret;
}
/*
* This function associates to a specific BSS discovered in a scan.
*
* It clears any past association response stored for application
* retrieval and calls the command preparation routine to send the
* command to firmware.
*/
int mwifiex_associate(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
/* Return error if the adapter is not STA role or table entry
* is not marked as infra.
*/
if ((GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) ||
(bss_desc->bss_mode != NL80211_IFTYPE_STATION))
return -1;
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
/* Clear any past association response stored for application
retrieval */
priv->assoc_rsp_size = 0;
return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_ASSOCIATE,
HostCmd_ACT_GEN_SET, 0, bss_desc, true);
}
/*
* This function starts an ad-hoc network.
*
* It calls the command preparation routine to send the command to firmware.
*/
int
mwifiex_adhoc_start(struct mwifiex_private *priv,
struct cfg80211_ssid *adhoc_ssid)
{
mwifiex_dbg(priv->adapter, INFO, "info: Adhoc Channel = %d\n",
priv->adhoc_channel);
mwifiex_dbg(priv->adapter, INFO, "info: curr_bss_params.channel = %d\n",
priv->curr_bss_params.bss_descriptor.channel);
mwifiex_dbg(priv->adapter, INFO, "info: curr_bss_params.band = %d\n",
priv->curr_bss_params.band);
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_START,
HostCmd_ACT_GEN_SET, 0, adhoc_ssid, true);
}
/*
* This function joins an ad-hoc network found in a previous scan.
*
* It calls the command preparation routine to send the command to firmware,
* if already not connected to the requested SSID.
*/
int mwifiex_adhoc_join(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
mwifiex_dbg(priv->adapter, INFO,
"info: adhoc join: curr_bss ssid =%s\n",
priv->curr_bss_params.bss_descriptor.ssid.ssid);
mwifiex_dbg(priv->adapter, INFO,
"info: adhoc join: curr_bss ssid_len =%u\n",
priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
mwifiex_dbg(priv->adapter, INFO, "info: adhoc join: ssid =%s\n",
bss_desc->ssid.ssid);
mwifiex_dbg(priv->adapter, INFO, "info: adhoc join: ssid_len =%u\n",
bss_desc->ssid.ssid_len);
/* Check if the requested SSID is already joined */
if (priv->curr_bss_params.bss_descriptor.ssid.ssid_len &&
!mwifiex_ssid_cmp(&bss_desc->ssid,
&priv->curr_bss_params.bss_descriptor.ssid) &&
(priv->curr_bss_params.bss_descriptor.bss_mode ==
NL80211_IFTYPE_ADHOC)) {
mwifiex_dbg(priv->adapter, INFO,
"info: ADHOC_J_CMD: new ad-hoc SSID\t"
"is the same as current; not attempting to re-join\n");
return -1;
}
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
mwifiex_dbg(priv->adapter, INFO,
"info: curr_bss_params.channel = %d\n",
priv->curr_bss_params.bss_descriptor.channel);
mwifiex_dbg(priv->adapter, INFO,
"info: curr_bss_params.band = %c\n",
priv->curr_bss_params.band);
return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
HostCmd_ACT_GEN_SET, 0, bss_desc, true);
}
/*
* This function deauthenticates/disconnects from infra network by sending
* deauthentication request.
*/
static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
{
u8 mac_address[ETH_ALEN];
int ret;
if (!mac || is_zero_ether_addr(mac))
memcpy(mac_address,
priv->curr_bss_params.bss_descriptor.mac_address,
ETH_ALEN);
else
memcpy(mac_address, mac, ETH_ALEN);
ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
HostCmd_ACT_GEN_SET, 0, mac_address, true);
return ret;
}
/*
* This function deauthenticates/disconnects from a BSS.
*
* In case of infra made, it sends deauthentication request, and
* in case of ad-hoc mode, a stop network request is sent to the firmware.
* In AP mode, a command to stop bss is sent to firmware.
*/
int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
{
int ret = 0;
if (!priv->media_connected)
return 0;
switch (priv->bss_mode) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
ret = mwifiex_deauthenticate_infra(priv, mac);
if (ret)
cfg80211_disconnected(priv->netdev, 0, NULL, 0,
true, GFP_KERNEL);
break;
case NL80211_IFTYPE_ADHOC:
return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_STOP,
HostCmd_ACT_GEN_SET, 0, NULL, true);
case NL80211_IFTYPE_AP:
return mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
HostCmd_ACT_GEN_SET, 0, NULL, true);
default:
break;
}
return ret;
}
/* This function deauthenticates/disconnects from all BSS. */
void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter)
{
struct mwifiex_private *priv;
int i;
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
if (priv)
mwifiex_deauthenticate(priv, NULL);
}
}
EXPORT_SYMBOL_GPL(mwifiex_deauthenticate_all);
/*
* This function converts band to radio type used in channel TLV.
*/
u8
mwifiex_band_to_radio_type(u8 band)
{
switch (band) {
case BAND_A:
case BAND_AN:
case BAND_A | BAND_AN:
case BAND_A | BAND_AN | BAND_AAC:
return HostCmd_SCAN_RADIO_TYPE_A;
case BAND_B:
case BAND_G:
case BAND_B | BAND_G:
default:
return HostCmd_SCAN_RADIO_TYPE_BG;
}
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_4489_0 |
crossvul-cpp_data_bad_1706_0 | /*
* A bus for connecting virtio serial and console ports
*
* Copyright (C) 2009, 2010 Red Hat, Inc.
*
* Author(s):
* Amit Shah <amit.shah@redhat.com>
*
* Some earlier parts are:
* Copyright IBM, Corp. 2008
* authored by
* Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* Contributions after 2012-01-13 are licensed under the terms of the
* GNU GPL, version 2 or (at your option) any later version.
*/
#include "qemu/iov.h"
#include "monitor/monitor.h"
#include "qemu/error-report.h"
#include "qemu/queue.h"
#include "hw/sysbus.h"
#include "trace.h"
#include "hw/virtio/virtio-serial.h"
#include "hw/virtio/virtio-access.h"
static struct VirtIOSerialDevices {
QLIST_HEAD(, VirtIOSerial) devices;
} vserdevices;
static VirtIOSerialPort *find_port_by_id(VirtIOSerial *vser, uint32_t id)
{
VirtIOSerialPort *port;
if (id == VIRTIO_CONSOLE_BAD_ID) {
return NULL;
}
QTAILQ_FOREACH(port, &vser->ports, next) {
if (port->id == id)
return port;
}
return NULL;
}
static VirtIOSerialPort *find_port_by_vq(VirtIOSerial *vser, VirtQueue *vq)
{
VirtIOSerialPort *port;
QTAILQ_FOREACH(port, &vser->ports, next) {
if (port->ivq == vq || port->ovq == vq)
return port;
}
return NULL;
}
static VirtIOSerialPort *find_port_by_name(char *name)
{
VirtIOSerial *vser;
QLIST_FOREACH(vser, &vserdevices.devices, next) {
VirtIOSerialPort *port;
QTAILQ_FOREACH(port, &vser->ports, next) {
if (port->name && !strcmp(port->name, name)) {
return port;
}
}
}
return NULL;
}
static bool use_multiport(VirtIOSerial *vser)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vser);
return virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT);
}
static size_t write_to_port(VirtIOSerialPort *port,
const uint8_t *buf, size_t size)
{
VirtQueueElement elem;
VirtQueue *vq;
size_t offset;
vq = port->ivq;
if (!virtio_queue_ready(vq)) {
return 0;
}
offset = 0;
while (offset < size) {
size_t len;
if (!virtqueue_pop(vq, &elem)) {
break;
}
len = iov_from_buf(elem.in_sg, elem.in_num, 0,
buf + offset, size - offset);
offset += len;
virtqueue_push(vq, &elem, len);
}
virtio_notify(VIRTIO_DEVICE(port->vser), vq);
return offset;
}
static void discard_vq_data(VirtQueue *vq, VirtIODevice *vdev)
{
VirtQueueElement elem;
if (!virtio_queue_ready(vq)) {
return;
}
while (virtqueue_pop(vq, &elem)) {
virtqueue_push(vq, &elem, 0);
}
virtio_notify(vdev, vq);
}
static void do_flush_queued_data(VirtIOSerialPort *port, VirtQueue *vq,
VirtIODevice *vdev)
{
VirtIOSerialPortClass *vsc;
assert(port);
assert(virtio_queue_ready(vq));
vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
while (!port->throttled) {
unsigned int i;
/* Pop an elem only if we haven't left off a previous one mid-way */
if (!port->elem.out_num) {
if (!virtqueue_pop(vq, &port->elem)) {
break;
}
port->iov_idx = 0;
port->iov_offset = 0;
}
for (i = port->iov_idx; i < port->elem.out_num; i++) {
size_t buf_size;
ssize_t ret;
buf_size = port->elem.out_sg[i].iov_len - port->iov_offset;
ret = vsc->have_data(port,
port->elem.out_sg[i].iov_base
+ port->iov_offset,
buf_size);
if (port->throttled) {
port->iov_idx = i;
if (ret > 0) {
port->iov_offset += ret;
}
break;
}
port->iov_offset = 0;
}
if (port->throttled) {
break;
}
virtqueue_push(vq, &port->elem, 0);
port->elem.out_num = 0;
}
virtio_notify(vdev, vq);
}
static void flush_queued_data(VirtIOSerialPort *port)
{
assert(port);
if (!virtio_queue_ready(port->ovq)) {
return;
}
do_flush_queued_data(port, port->ovq, VIRTIO_DEVICE(port->vser));
}
static size_t send_control_msg(VirtIOSerial *vser, void *buf, size_t len)
{
VirtQueueElement elem;
VirtQueue *vq;
vq = vser->c_ivq;
if (!virtio_queue_ready(vq)) {
return 0;
}
if (!virtqueue_pop(vq, &elem)) {
return 0;
}
memcpy(elem.in_sg[0].iov_base, buf, len);
virtqueue_push(vq, &elem, len);
virtio_notify(VIRTIO_DEVICE(vser), vq);
return len;
}
static size_t send_control_event(VirtIOSerial *vser, uint32_t port_id,
uint16_t event, uint16_t value)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vser);
struct virtio_console_control cpkt;
virtio_stl_p(vdev, &cpkt.id, port_id);
virtio_stw_p(vdev, &cpkt.event, event);
virtio_stw_p(vdev, &cpkt.value, value);
trace_virtio_serial_send_control_event(port_id, event, value);
return send_control_msg(vser, &cpkt, sizeof(cpkt));
}
/* Functions for use inside qemu to open and read from/write to ports */
int virtio_serial_open(VirtIOSerialPort *port)
{
/* Don't allow opening an already-open port */
if (port->host_connected) {
return 0;
}
/* Send port open notification to the guest */
port->host_connected = true;
send_control_event(port->vser, port->id, VIRTIO_CONSOLE_PORT_OPEN, 1);
return 0;
}
int virtio_serial_close(VirtIOSerialPort *port)
{
port->host_connected = false;
/*
* If there's any data the guest sent which the app didn't
* consume, reset the throttling flag and discard the data.
*/
port->throttled = false;
discard_vq_data(port->ovq, VIRTIO_DEVICE(port->vser));
send_control_event(port->vser, port->id, VIRTIO_CONSOLE_PORT_OPEN, 0);
return 0;
}
/* Individual ports/apps call this function to write to the guest. */
ssize_t virtio_serial_write(VirtIOSerialPort *port, const uint8_t *buf,
size_t size)
{
if (!port || !port->host_connected || !port->guest_connected) {
return 0;
}
return write_to_port(port, buf, size);
}
/*
* Readiness of the guest to accept data on a port.
* Returns max. data the guest can receive
*/
size_t virtio_serial_guest_ready(VirtIOSerialPort *port)
{
VirtIODevice *vdev = VIRTIO_DEVICE(port->vser);
VirtQueue *vq = port->ivq;
unsigned int bytes;
if (!virtio_queue_ready(vq) ||
!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) ||
virtio_queue_empty(vq)) {
return 0;
}
if (use_multiport(port->vser) && !port->guest_connected) {
return 0;
}
virtqueue_get_avail_bytes(vq, &bytes, NULL, 4096, 0);
return bytes;
}
static void flush_queued_data_bh(void *opaque)
{
VirtIOSerialPort *port = opaque;
flush_queued_data(port);
}
void virtio_serial_throttle_port(VirtIOSerialPort *port, bool throttle)
{
if (!port) {
return;
}
trace_virtio_serial_throttle_port(port->id, throttle);
port->throttled = throttle;
if (throttle) {
return;
}
qemu_bh_schedule(port->bh);
}
/* Guest wants to notify us of some event */
static void handle_control_message(VirtIOSerial *vser, void *buf, size_t len)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vser);
struct VirtIOSerialPort *port;
VirtIOSerialPortClass *vsc;
struct virtio_console_control cpkt, *gcpkt;
uint8_t *buffer;
size_t buffer_len;
gcpkt = buf;
if (len < sizeof(cpkt)) {
/* The guest sent an invalid control packet */
return;
}
cpkt.event = virtio_lduw_p(vdev, &gcpkt->event);
cpkt.value = virtio_lduw_p(vdev, &gcpkt->value);
trace_virtio_serial_handle_control_message(cpkt.event, cpkt.value);
if (cpkt.event == VIRTIO_CONSOLE_DEVICE_READY) {
if (!cpkt.value) {
error_report("virtio-serial-bus: Guest failure in adding device %s",
vser->bus.qbus.name);
return;
}
/*
* The device is up, we can now tell the device about all the
* ports we have here.
*/
QTAILQ_FOREACH(port, &vser->ports, next) {
send_control_event(vser, port->id, VIRTIO_CONSOLE_PORT_ADD, 1);
}
return;
}
port = find_port_by_id(vser, virtio_ldl_p(vdev, &gcpkt->id));
if (!port) {
error_report("virtio-serial-bus: Unexpected port id %u for device %s",
virtio_ldl_p(vdev, &gcpkt->id), vser->bus.qbus.name);
return;
}
trace_virtio_serial_handle_control_message_port(port->id);
vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
switch(cpkt.event) {
case VIRTIO_CONSOLE_PORT_READY:
if (!cpkt.value) {
error_report("virtio-serial-bus: Guest failure in adding port %u for device %s",
port->id, vser->bus.qbus.name);
break;
}
/*
* Now that we know the guest asked for the port name, we're
* sure the guest has initialised whatever state is necessary
* for this port. Now's a good time to let the guest know if
* this port is a console port so that the guest can hook it
* up to hvc.
*/
if (vsc->is_console) {
send_control_event(vser, port->id, VIRTIO_CONSOLE_CONSOLE_PORT, 1);
}
if (port->name) {
virtio_stl_p(vdev, &cpkt.id, port->id);
virtio_stw_p(vdev, &cpkt.event, VIRTIO_CONSOLE_PORT_NAME);
virtio_stw_p(vdev, &cpkt.value, 1);
buffer_len = sizeof(cpkt) + strlen(port->name) + 1;
buffer = g_malloc(buffer_len);
memcpy(buffer, &cpkt, sizeof(cpkt));
memcpy(buffer + sizeof(cpkt), port->name, strlen(port->name));
buffer[buffer_len - 1] = 0;
send_control_msg(vser, buffer, buffer_len);
g_free(buffer);
}
if (port->host_connected) {
send_control_event(vser, port->id, VIRTIO_CONSOLE_PORT_OPEN, 1);
}
/*
* When the guest has asked us for this information it means
* the guest is all setup and has its virtqueues
* initialised. If some app is interested in knowing about
* this event, let it know.
*/
if (vsc->guest_ready) {
vsc->guest_ready(port);
}
break;
case VIRTIO_CONSOLE_PORT_OPEN:
port->guest_connected = cpkt.value;
if (vsc->set_guest_connected) {
/* Send the guest opened notification if an app is interested */
vsc->set_guest_connected(port, cpkt.value);
}
break;
}
}
static void control_in(VirtIODevice *vdev, VirtQueue *vq)
{
}
static void control_out(VirtIODevice *vdev, VirtQueue *vq)
{
VirtQueueElement elem;
VirtIOSerial *vser;
uint8_t *buf;
size_t len;
vser = VIRTIO_SERIAL(vdev);
len = 0;
buf = NULL;
while (virtqueue_pop(vq, &elem)) {
size_t cur_len;
cur_len = iov_size(elem.out_sg, elem.out_num);
/*
* Allocate a new buf only if we didn't have one previously or
* if the size of the buf differs
*/
if (cur_len > len) {
g_free(buf);
buf = g_malloc(cur_len);
len = cur_len;
}
iov_to_buf(elem.out_sg, elem.out_num, 0, buf, cur_len);
handle_control_message(vser, buf, cur_len);
virtqueue_push(vq, &elem, 0);
}
g_free(buf);
virtio_notify(vdev, vq);
}
/* Guest wrote something to some port. */
static void handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOSerial *vser;
VirtIOSerialPort *port;
vser = VIRTIO_SERIAL(vdev);
port = find_port_by_vq(vser, vq);
if (!port || !port->host_connected) {
discard_vq_data(vq, vdev);
return;
}
if (!port->throttled) {
do_flush_queued_data(port, vq, vdev);
return;
}
}
static void handle_input(VirtIODevice *vdev, VirtQueue *vq)
{
/*
* Users of virtio-serial would like to know when guest becomes
* writable again -- i.e. if a vq had stuff queued up and the
* guest wasn't reading at all, the host would not be able to
* write to the vq anymore. Once the guest reads off something,
* we can start queueing things up again. However, this call is
* made for each buffer addition by the guest -- even though free
* buffers existed prior to the current buffer addition. This is
* done so as not to maintain previous state, which will need
* additional live-migration-related changes.
*/
VirtIOSerial *vser;
VirtIOSerialPort *port;
VirtIOSerialPortClass *vsc;
vser = VIRTIO_SERIAL(vdev);
port = find_port_by_vq(vser, vq);
if (!port) {
return;
}
vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
/*
* If guest_connected is false, this call is being made by the
* early-boot queueing up of descriptors, which is just noise for
* the host apps -- don't disturb them in that case.
*/
if (port->guest_connected && port->host_connected && vsc->guest_writable) {
vsc->guest_writable(port);
}
}
static uint64_t get_features(VirtIODevice *vdev, uint64_t features)
{
VirtIOSerial *vser;
vser = VIRTIO_SERIAL(vdev);
if (vser->bus.max_nr_ports > 1) {
virtio_add_feature(&features, VIRTIO_CONSOLE_F_MULTIPORT);
}
return features;
}
/* Guest requested config info */
static void get_config(VirtIODevice *vdev, uint8_t *config_data)
{
VirtIOSerial *vser = VIRTIO_SERIAL(vdev);
struct virtio_console_config *config =
(struct virtio_console_config *)config_data;
config->cols = 0;
config->rows = 0;
config->max_nr_ports = virtio_tswap32(vdev,
vser->serial.max_virtserial_ports);
}
static void guest_reset(VirtIOSerial *vser)
{
VirtIOSerialPort *port;
VirtIOSerialPortClass *vsc;
QTAILQ_FOREACH(port, &vser->ports, next) {
vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
if (port->guest_connected) {
port->guest_connected = false;
if (vsc->set_guest_connected) {
vsc->set_guest_connected(port, false);
}
}
}
}
static void set_status(VirtIODevice *vdev, uint8_t status)
{
VirtIOSerial *vser;
VirtIOSerialPort *port;
vser = VIRTIO_SERIAL(vdev);
port = find_port_by_id(vser, 0);
if (port && !use_multiport(port->vser)
&& (status & VIRTIO_CONFIG_S_DRIVER_OK)) {
/*
* Non-multiport guests won't be able to tell us guest
* open/close status. Such guests can only have a port at id
* 0, so set guest_connected for such ports as soon as guest
* is up.
*/
port->guest_connected = true;
}
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
guest_reset(vser);
}
}
static void vser_reset(VirtIODevice *vdev)
{
VirtIOSerial *vser;
vser = VIRTIO_SERIAL(vdev);
guest_reset(vser);
}
static void virtio_serial_save(QEMUFile *f, void *opaque)
{
/* The virtio device */
virtio_save(VIRTIO_DEVICE(opaque), f);
}
static void virtio_serial_save_device(VirtIODevice *vdev, QEMUFile *f)
{
VirtIOSerial *s = VIRTIO_SERIAL(vdev);
VirtIOSerialPort *port;
uint32_t nr_active_ports;
unsigned int i, max_nr_ports;
struct virtio_console_config config;
/* The config space (ignored on the far end in current versions) */
get_config(vdev, (uint8_t *)&config);
qemu_put_be16s(f, &config.cols);
qemu_put_be16s(f, &config.rows);
qemu_put_be32s(f, &config.max_nr_ports);
/* The ports map */
max_nr_ports = s->serial.max_virtserial_ports;
for (i = 0; i < (max_nr_ports + 31) / 32; i++) {
qemu_put_be32s(f, &s->ports_map[i]);
}
/* Ports */
nr_active_ports = 0;
QTAILQ_FOREACH(port, &s->ports, next) {
nr_active_ports++;
}
qemu_put_be32s(f, &nr_active_ports);
/*
* Items in struct VirtIOSerialPort.
*/
QTAILQ_FOREACH(port, &s->ports, next) {
uint32_t elem_popped;
qemu_put_be32s(f, &port->id);
qemu_put_byte(f, port->guest_connected);
qemu_put_byte(f, port->host_connected);
elem_popped = 0;
if (port->elem.out_num) {
elem_popped = 1;
}
qemu_put_be32s(f, &elem_popped);
if (elem_popped) {
qemu_put_be32s(f, &port->iov_idx);
qemu_put_be64s(f, &port->iov_offset);
qemu_put_buffer(f, (unsigned char *)&port->elem,
sizeof(port->elem));
}
}
}
static void virtio_serial_post_load_timer_cb(void *opaque)
{
uint32_t i;
VirtIOSerial *s = VIRTIO_SERIAL(opaque);
VirtIOSerialPort *port;
uint8_t host_connected;
VirtIOSerialPortClass *vsc;
if (!s->post_load) {
return;
}
for (i = 0 ; i < s->post_load->nr_active_ports; ++i) {
port = s->post_load->connected[i].port;
host_connected = s->post_load->connected[i].host_connected;
if (host_connected != port->host_connected) {
/*
* We have to let the guest know of the host connection
* status change
*/
send_control_event(s, port->id, VIRTIO_CONSOLE_PORT_OPEN,
port->host_connected);
}
vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
if (vsc->set_guest_connected) {
vsc->set_guest_connected(port, port->guest_connected);
}
}
g_free(s->post_load->connected);
timer_free(s->post_load->timer);
g_free(s->post_load);
s->post_load = NULL;
}
static int fetch_active_ports_list(QEMUFile *f, int version_id,
VirtIOSerial *s, uint32_t nr_active_ports)
{
uint32_t i;
s->post_load = g_malloc0(sizeof(*s->post_load));
s->post_load->nr_active_ports = nr_active_ports;
s->post_load->connected =
g_malloc0(sizeof(*s->post_load->connected) * nr_active_ports);
s->post_load->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
virtio_serial_post_load_timer_cb,
s);
/* Items in struct VirtIOSerialPort */
for (i = 0; i < nr_active_ports; i++) {
VirtIOSerialPort *port;
uint32_t id;
id = qemu_get_be32(f);
port = find_port_by_id(s, id);
if (!port) {
return -EINVAL;
}
port->guest_connected = qemu_get_byte(f);
s->post_load->connected[i].port = port;
s->post_load->connected[i].host_connected = qemu_get_byte(f);
if (version_id > 2) {
uint32_t elem_popped;
qemu_get_be32s(f, &elem_popped);
if (elem_popped) {
qemu_get_be32s(f, &port->iov_idx);
qemu_get_be64s(f, &port->iov_offset);
qemu_get_buffer(f, (unsigned char *)&port->elem,
sizeof(port->elem));
virtqueue_map_sg(port->elem.in_sg, port->elem.in_addr,
port->elem.in_num, 1);
virtqueue_map_sg(port->elem.out_sg, port->elem.out_addr,
port->elem.out_num, 1);
/*
* Port was throttled on source machine. Let's
* unthrottle it here so data starts flowing again.
*/
virtio_serial_throttle_port(port, false);
}
}
}
timer_mod(s->post_load->timer, 1);
return 0;
}
static int virtio_serial_load(QEMUFile *f, void *opaque, int version_id)
{
if (version_id > 3) {
return -EINVAL;
}
/* The virtio device */
return virtio_load(VIRTIO_DEVICE(opaque), f, version_id);
}
static int virtio_serial_load_device(VirtIODevice *vdev, QEMUFile *f,
int version_id)
{
VirtIOSerial *s = VIRTIO_SERIAL(vdev);
uint32_t max_nr_ports, nr_active_ports, ports_map;
unsigned int i;
int ret;
uint32_t tmp;
if (version_id < 2) {
return 0;
}
/* Unused */
qemu_get_be16s(f, (uint16_t *) &tmp);
qemu_get_be16s(f, (uint16_t *) &tmp);
qemu_get_be32s(f, &tmp);
max_nr_ports = s->serial.max_virtserial_ports;
for (i = 0; i < (max_nr_ports + 31) / 32; i++) {
qemu_get_be32s(f, &ports_map);
if (ports_map != s->ports_map[i]) {
/*
* Ports active on source and destination don't
* match. Fail migration.
*/
return -EINVAL;
}
}
qemu_get_be32s(f, &nr_active_ports);
if (nr_active_ports) {
ret = fetch_active_ports_list(f, version_id, s, nr_active_ports);
if (ret) {
return ret;
}
}
return 0;
}
static void virtser_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent);
static Property virtser_props[] = {
DEFINE_PROP_UINT32("nr", VirtIOSerialPort, id, VIRTIO_CONSOLE_BAD_ID),
DEFINE_PROP_STRING("name", VirtIOSerialPort, name),
DEFINE_PROP_END_OF_LIST()
};
#define TYPE_VIRTIO_SERIAL_BUS "virtio-serial-bus"
#define VIRTIO_SERIAL_BUS(obj) \
OBJECT_CHECK(VirtIOSerialBus, (obj), TYPE_VIRTIO_SERIAL_BUS)
static void virtser_bus_class_init(ObjectClass *klass, void *data)
{
BusClass *k = BUS_CLASS(klass);
k->print_dev = virtser_bus_dev_print;
}
static const TypeInfo virtser_bus_info = {
.name = TYPE_VIRTIO_SERIAL_BUS,
.parent = TYPE_BUS,
.instance_size = sizeof(VirtIOSerialBus),
.class_init = virtser_bus_class_init,
};
static void virtser_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent)
{
VirtIOSerialPort *port = DO_UPCAST(VirtIOSerialPort, dev, qdev);
monitor_printf(mon, "%*sport %d, guest %s, host %s, throttle %s\n",
indent, "", port->id,
port->guest_connected ? "on" : "off",
port->host_connected ? "on" : "off",
port->throttled ? "on" : "off");
}
/* This function is only used if a port id is not provided by the user */
static uint32_t find_free_port_id(VirtIOSerial *vser)
{
unsigned int i, max_nr_ports;
max_nr_ports = vser->serial.max_virtserial_ports;
for (i = 0; i < (max_nr_ports + 31) / 32; i++) {
uint32_t map, zeroes;
map = vser->ports_map[i];
zeroes = ctz32(~map);
if (zeroes != 32) {
return zeroes + i * 32;
}
}
return VIRTIO_CONSOLE_BAD_ID;
}
static void mark_port_added(VirtIOSerial *vser, uint32_t port_id)
{
unsigned int i;
i = port_id / 32;
vser->ports_map[i] |= 1U << (port_id % 32);
}
static void add_port(VirtIOSerial *vser, uint32_t port_id)
{
mark_port_added(vser, port_id);
send_control_event(vser, port_id, VIRTIO_CONSOLE_PORT_ADD, 1);
}
static void remove_port(VirtIOSerial *vser, uint32_t port_id)
{
VirtIOSerialPort *port;
/*
* Don't mark port 0 removed -- we explicitly reserve it for
* backward compat with older guests, ensure a virtconsole device
* unplug retains the reservation.
*/
if (port_id) {
unsigned int i;
i = port_id / 32;
vser->ports_map[i] &= ~(1U << (port_id % 32));
}
port = find_port_by_id(vser, port_id);
/*
* This function is only called from qdev's unplug callback; if we
* get a NULL port here, we're in trouble.
*/
assert(port);
/* Flush out any unconsumed buffers first */
discard_vq_data(port->ovq, VIRTIO_DEVICE(port->vser));
send_control_event(vser, port->id, VIRTIO_CONSOLE_PORT_REMOVE, 1);
}
static void virtser_port_device_realize(DeviceState *dev, Error **errp)
{
VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(dev);
VirtIOSerialPortClass *vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
VirtIOSerialBus *bus = VIRTIO_SERIAL_BUS(qdev_get_parent_bus(dev));
int max_nr_ports;
bool plugging_port0;
Error *err = NULL;
port->vser = bus->vser;
port->bh = qemu_bh_new(flush_queued_data_bh, port);
assert(vsc->have_data);
/*
* Is the first console port we're seeing? If so, put it up at
* location 0. This is done for backward compatibility (old
* kernel, new qemu).
*/
plugging_port0 = vsc->is_console && !find_port_by_id(port->vser, 0);
if (find_port_by_id(port->vser, port->id)) {
error_setg(errp, "virtio-serial-bus: A port already exists at id %u",
port->id);
return;
}
if (port->name != NULL && find_port_by_name(port->name)) {
error_setg(errp, "virtio-serial-bus: A port already exists by name %s",
port->name);
return;
}
if (port->id == VIRTIO_CONSOLE_BAD_ID) {
if (plugging_port0) {
port->id = 0;
} else {
port->id = find_free_port_id(port->vser);
if (port->id == VIRTIO_CONSOLE_BAD_ID) {
error_setg(errp, "virtio-serial-bus: Maximum port limit for "
"this device reached");
return;
}
}
}
max_nr_ports = port->vser->serial.max_virtserial_ports;
if (port->id >= max_nr_ports) {
error_setg(errp, "virtio-serial-bus: Out-of-range port id specified, "
"max. allowed: %u", max_nr_ports - 1);
return;
}
vsc->realize(dev, &err);
if (err != NULL) {
error_propagate(errp, err);
return;
}
port->elem.out_num = 0;
}
static void virtser_port_device_plug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(dev);
QTAILQ_INSERT_TAIL(&port->vser->ports, port, next);
port->ivq = port->vser->ivqs[port->id];
port->ovq = port->vser->ovqs[port->id];
add_port(port->vser, port->id);
/* Send an update to the guest about this new port added */
virtio_notify_config(VIRTIO_DEVICE(hotplug_dev));
}
static void virtser_port_device_unrealize(DeviceState *dev, Error **errp)
{
VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(dev);
VirtIOSerialPortClass *vsc = VIRTIO_SERIAL_PORT_GET_CLASS(dev);
VirtIOSerial *vser = port->vser;
qemu_bh_delete(port->bh);
remove_port(port->vser, port->id);
QTAILQ_REMOVE(&vser->ports, port, next);
if (vsc->unrealize) {
vsc->unrealize(dev, errp);
}
}
static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOSerial *vser = VIRTIO_SERIAL(dev);
uint32_t i, max_supported_ports;
if (!vser->serial.max_virtserial_ports) {
error_setg(errp, "Maximum number of serial ports not specified");
return;
}
/* Each port takes 2 queues, and one pair is for the control queue */
max_supported_ports = VIRTIO_QUEUE_MAX / 2 - 1;
if (vser->serial.max_virtserial_ports > max_supported_ports) {
error_setg(errp, "maximum ports supported: %u", max_supported_ports);
return;
}
/* We don't support emergency write, skip it for now. */
/* TODO: cleaner fix, depending on host features. */
virtio_init(vdev, "virtio-serial", VIRTIO_ID_CONSOLE,
offsetof(struct virtio_console_config, emerg_wr));
/* Spawn a new virtio-serial bus on which the ports will ride as devices */
qbus_create_inplace(&vser->bus, sizeof(vser->bus), TYPE_VIRTIO_SERIAL_BUS,
dev, vdev->bus_name);
qbus_set_hotplug_handler(BUS(&vser->bus), DEVICE(vser), errp);
vser->bus.vser = vser;
QTAILQ_INIT(&vser->ports);
vser->bus.max_nr_ports = vser->serial.max_virtserial_ports;
vser->ivqs = g_malloc(vser->serial.max_virtserial_ports
* sizeof(VirtQueue *));
vser->ovqs = g_malloc(vser->serial.max_virtserial_ports
* sizeof(VirtQueue *));
/* Add a queue for host to guest transfers for port 0 (backward compat) */
vser->ivqs[0] = virtio_add_queue(vdev, 128, handle_input);
/* Add a queue for guest to host transfers for port 0 (backward compat) */
vser->ovqs[0] = virtio_add_queue(vdev, 128, handle_output);
/* TODO: host to guest notifications can get dropped
* if the queue fills up. Implement queueing in host,
* this might also make it possible to reduce the control
* queue size: as guest preposts buffers there,
* this will save 4Kbyte of guest memory per entry. */
/* control queue: host to guest */
vser->c_ivq = virtio_add_queue(vdev, 32, control_in);
/* control queue: guest to host */
vser->c_ovq = virtio_add_queue(vdev, 32, control_out);
for (i = 1; i < vser->bus.max_nr_ports; i++) {
/* Add a per-port queue for host to guest transfers */
vser->ivqs[i] = virtio_add_queue(vdev, 128, handle_input);
/* Add a per-per queue for guest to host transfers */
vser->ovqs[i] = virtio_add_queue(vdev, 128, handle_output);
}
vser->ports_map = g_malloc0(((vser->serial.max_virtserial_ports + 31) / 32)
* sizeof(vser->ports_map[0]));
/*
* Reserve location 0 for a console port for backward compat
* (old kernel, new qemu)
*/
mark_port_added(vser, 0);
vser->post_load = NULL;
/*
* Register for the savevm section with the virtio-console name
* to preserve backward compat
*/
register_savevm(dev, "virtio-console", -1, 3, virtio_serial_save,
virtio_serial_load, vser);
QLIST_INSERT_HEAD(&vserdevices.devices, vser, next);
}
static void virtio_serial_port_class_init(ObjectClass *klass, void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_INPUT, k->categories);
k->bus_type = TYPE_VIRTIO_SERIAL_BUS;
k->realize = virtser_port_device_realize;
k->unrealize = virtser_port_device_unrealize;
k->props = virtser_props;
}
static const TypeInfo virtio_serial_port_type_info = {
.name = TYPE_VIRTIO_SERIAL_PORT,
.parent = TYPE_DEVICE,
.instance_size = sizeof(VirtIOSerialPort),
.abstract = true,
.class_size = sizeof(VirtIOSerialPortClass),
.class_init = virtio_serial_port_class_init,
};
static void virtio_serial_device_unrealize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOSerial *vser = VIRTIO_SERIAL(dev);
QLIST_REMOVE(vser, next);
unregister_savevm(dev, "virtio-console", vser);
g_free(vser->ivqs);
g_free(vser->ovqs);
g_free(vser->ports_map);
if (vser->post_load) {
g_free(vser->post_load->connected);
timer_del(vser->post_load->timer);
timer_free(vser->post_load->timer);
g_free(vser->post_load);
}
virtio_cleanup(vdev);
}
static Property virtio_serial_properties[] = {
DEFINE_PROP_UINT32("max_ports", VirtIOSerial, serial.max_virtserial_ports,
31),
DEFINE_PROP_END_OF_LIST(),
};
static void virtio_serial_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
QLIST_INIT(&vserdevices.devices);
dc->props = virtio_serial_properties;
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
vdc->realize = virtio_serial_device_realize;
vdc->unrealize = virtio_serial_device_unrealize;
vdc->get_features = get_features;
vdc->get_config = get_config;
vdc->set_status = set_status;
vdc->reset = vser_reset;
vdc->save = virtio_serial_save_device;
vdc->load = virtio_serial_load_device;
hc->plug = virtser_port_device_plug;
hc->unplug = qdev_simple_device_unplug_cb;
}
static const TypeInfo virtio_device_info = {
.name = TYPE_VIRTIO_SERIAL,
.parent = TYPE_VIRTIO_DEVICE,
.instance_size = sizeof(VirtIOSerial),
.class_init = virtio_serial_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
};
static void virtio_serial_register_types(void)
{
type_register_static(&virtser_bus_info);
type_register_static(&virtio_serial_port_type_info);
type_register_static(&virtio_device_info);
}
type_init(virtio_serial_register_types)
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_1706_0 |
crossvul-cpp_data_good_4522_0 | /*
* Copyright (c) 2003 Sun Microsystems, Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistribution of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistribution in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* Neither the name of Sun Microsystems, Inc. or the names of
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* This software is provided "AS IS," without a warranty of any kind.
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED.
* SUN MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE
* FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING
* OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL
* SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA,
* OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR
* PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF
* LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE,
* EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <ipmitool/ipmi.h>
#include <ipmitool/log.h>
#include <ipmitool/helper.h>
#include <ipmitool/ipmi_cc.h>
#include <ipmitool/ipmi_intf.h>
#include <ipmitool/ipmi_fru.h>
#include <ipmitool/ipmi_mc.h>
#include <ipmitool/ipmi_sdr.h>
#include <ipmitool/ipmi_strings.h> /* IANA id strings */
#include <ipmitool/ipmi_time.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <errno.h>
#if HAVE_CONFIG_H
# include <config.h>
#endif
#define FRU_MULTIREC_CHUNK_SIZE (255 + sizeof(struct fru_multirec_header))
#define FRU_FIELD_VALID(a) (a && a[0])
static const char *section_id[4] = {
"Internal Use Section",
"Chassis Section",
"Board Section",
"Product Section"
};
static const char * combined_voltage_desc[] = {
"12 V",
"-12 V",
"5 V",
"3.3 V"
};
static const char * chassis_type_desc[] = {
"Unspecified",
"Other",
"Unknown",
"Desktop",
"Low Profile Desktop",
"Pizza Box",
"Mini Tower",
"Tower",
"Portable",
"LapTop",
"Notebook",
"Hand Held",
"Docking Station",
"All in One",
"Sub Notebook",
"Space-saving",
"Lunch Box",
"Main Server Chassis",
"Expansion Chassis",
"SubChassis",
"Bus Expansion Chassis",
"Peripheral Chassis",
"RAID Chassis",
"Rack Mount Chassis",
"Sealed-case PC",
"Multi-system Chassis",
"CompactPCI",
"AdvancedTCA",
"Blade",
"Blade Enclosure"
};
static inline bool fru_cc_rq2big(int code) {
return (code == IPMI_CC_REQ_DATA_INV_LENGTH
|| code == IPMI_CC_REQ_DATA_FIELD_EXCEED
|| code == IPMI_CC_CANT_RET_NUM_REQ_BYTES);
}
/* From lib/dimm_spd.c: */
int
ipmi_spd_print_fru(struct ipmi_intf * intf, uint8_t id);
extern int verbose;
static void ipmi_fru_read_to_bin(struct ipmi_intf * intf, char * pFileName, uint8_t fruId);
static void ipmi_fru_write_from_bin(struct ipmi_intf * intf, char * pFileName, uint8_t fruId);
static int ipmi_fru_upg_ekeying(struct ipmi_intf * intf, char * pFileName, uint8_t fruId);
static int ipmi_fru_get_multirec_location_from_fru(struct ipmi_intf * intf, uint8_t fruId,
struct fru_info *pFruInfo, uint32_t * pRetLocation,
uint32_t * pRetSize);
static int ipmi_fru_get_multirec_from_file(char * pFileName, uint8_t * pBufArea,
uint32_t size, uint32_t offset);
static int ipmi_fru_get_multirec_size_from_file(char * pFileName, uint32_t * pSize, uint32_t * pOffset);
int ipmi_fru_get_adjust_size_from_buffer(uint8_t *pBufArea, uint32_t *pSize);
static void ipmi_fru_picmg_ext_print(uint8_t * fru_data, int off, int length);
static int ipmi_fru_set_field_string(struct ipmi_intf * intf, unsigned
char fruId, uint8_t f_type, uint8_t f_index, char *f_string);
static int
ipmi_fru_set_field_string_rebuild(struct ipmi_intf * intf, uint8_t fruId,
struct fru_info fru, struct fru_header header,
uint8_t f_type, uint8_t f_index, char *f_string);
static void
fru_area_print_multirec_bloc(struct ipmi_intf * intf, struct fru_info * fru,
uint8_t id, uint32_t offset);
int
read_fru_area(struct ipmi_intf * intf, struct fru_info *fru, uint8_t id,
uint32_t offset, uint32_t length, uint8_t *frubuf);
void free_fru_bloc(t_ipmi_fru_bloc *bloc);
/* get_fru_area_str - Parse FRU area string from raw data
*
* @data: raw FRU data
* @offset: offset into data for area
*
* returns pointer to FRU area string
*/
char * get_fru_area_str(uint8_t * data, uint32_t * offset)
{
static const char bcd_plus[] = "0123456789 -.:,_";
char * str;
int len, off, size, i, j, k, typecode, char_idx;
union {
uint32_t bits;
char chars[4];
} u;
size = 0;
off = *offset;
/* bits 6:7 contain format */
typecode = ((data[off] & 0xC0) >> 6);
// printf("Typecode:%i\n", typecode);
/* bits 0:5 contain length */
len = data[off++];
len &= 0x3f;
switch (typecode) {
case 0: /* 00b: binary/unspecified */
case 1: /* 01b: BCD plus */
/* hex dump or BCD -> 2x length */
size = (len * 2);
break;
case 2: /* 10b: 6-bit ASCII */
/* 4 chars per group of 1-3 bytes */
size = (((len * 4 + 2) / 3) & ~3);
break;
case 3: /* 11b: 8-bit ASCII */
/* no length adjustment */
size = len;
break;
}
if (size < 1) {
*offset = off;
return NULL;
}
str = malloc(size+1);
if (!str)
return NULL;
memset(str, 0, size+1);
if (size == 0) {
str[0] = '\0';
*offset = off;
return str;
}
switch (typecode) {
case 0: /* Binary */
strncpy(str, buf2str(&data[off], len), size);
break;
case 1: /* BCD plus */
for (k = 0; k < size; k++)
str[k] = bcd_plus[((data[off + k / 2] >> ((k % 2) ? 0 : 4)) & 0x0f)];
str[k] = '\0';
break;
case 2: /* 6-bit ASCII */
for (i = j = 0; i < len; i += 3) {
u.bits = 0;
k = ((len - i) < 3 ? (len - i) : 3);
#if WORDS_BIGENDIAN
u.chars[3] = data[off+i];
u.chars[2] = (k > 1 ? data[off+i+1] : 0);
u.chars[1] = (k > 2 ? data[off+i+2] : 0);
char_idx = 3;
#else
memcpy((void *)&u.bits, &data[off+i], k);
char_idx = 0;
#endif
for (k=0; k<4; k++) {
str[j++] = ((u.chars[char_idx] & 0x3f) + 0x20);
u.bits >>= 6;
}
}
str[j] = '\0';
break;
case 3:
memcpy(str, &data[off], size);
str[size] = '\0';
break;
}
off += len;
*offset = off;
return str;
}
/* is_valid_filename - checks file/path supplied by user
*
* input_filename - user input string
*
* returns 0 if path is ok
* returns -1 if path is NULL
* returns -2 if path is too short
* returns -3 if path is too long
*/
int
is_valid_filename(const char *input_filename)
{
if (!input_filename) {
lprintf(LOG_ERR, "ERROR: NULL pointer passed.");
return -1;
}
if (strlen(input_filename) < 1) {
lprintf(LOG_ERR, "File/path is invalid.");
return -2;
}
if (strlen(input_filename) >= 512) {
lprintf(LOG_ERR, "File/path must be shorter than 512 bytes.");
return -3;
}
return 0;
} /* is_valid_filename() */
/* build_fru_bloc - build fru bloc for write protection
*
* @intf: ipmi interface
* @fru_info: information about FRU device
* @id : Fru id
* @soffset : Source offset (from buffer)
* @doffset : Destination offset (in device)
* @length : Size of data to write (in bytes)
* @pFrubuf : Pointer on data to write
*
* returns 0 on success
* returns -1 on error
*/
#define FRU_NUM_BLOC_COMMON_HEADER 6
t_ipmi_fru_bloc *
build_fru_bloc(struct ipmi_intf * intf, struct fru_info *fru, uint8_t id)
{
t_ipmi_fru_bloc * p_first, * p_bloc, * p_new;
struct ipmi_rs * rsp;
struct ipmi_rq req;
struct fru_header header;
struct fru_multirec_header rec_hdr;
uint8_t msg_data[4];
uint32_t off;
uint16_t i;
/*
* get COMMON Header format
*/
msg_data[0] = id;
msg_data[1] = 0;
msg_data[2] = 0;
msg_data[3] = 8;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_DATA;
req.msg.data = msg_data;
req.msg.data_len = 4;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
lprintf(LOG_ERR, " Device not present (No Response)");
return NULL;
}
if (rsp->ccode) {
lprintf(LOG_ERR," Device not present (%s)",
val2str(rsp->ccode, completion_code_vals));
return NULL;
}
if (verbose > 1) {
printbuf(rsp->data, rsp->data_len, "FRU DATA");
}
memcpy(&header, rsp->data + 1, 8);
/* verify header checksum */
if (ipmi_csum((uint8_t *)&header, 8)) {
lprintf(LOG_ERR, " Bad header checksum");
return NULL;
}
if (header.version != 1) {
lprintf(LOG_ERR, " Unknown FRU header version 0x%02x", header.version);
return NULL;
}
/******************************************
Malloc and fill up the bloc contents
*******************************************/
// Common header
p_first = malloc(sizeof(struct ipmi_fru_bloc));
if (!p_first) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
return NULL;
}
p_bloc = p_first;
p_bloc->next = NULL;
p_bloc->start= 0;
p_bloc->size = fru->size;
strcpy((char *)p_bloc->blocId, "Common Header Section");
for (i = 0; i < 4; i++) {
if (header.offsets[i]) {
p_new = malloc(sizeof(struct ipmi_fru_bloc));
if (!p_new) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
free_fru_bloc(p_first);
return NULL;
}
p_new->next = NULL;
p_new->start = header.offsets[i] * 8;
p_new->size = fru->size - p_new->start;
strncpy((char *)p_new->blocId, section_id[i], sizeof(p_new->blocId));
/* Make sure string is null terminated */
p_new->blocId[sizeof(p_new->blocId)-1] = 0;
p_bloc->next = p_new;
p_bloc->size = p_new->start - p_bloc->start;
p_bloc = p_new;
}
}
// Multi
if (header.offset.multi) {
off = header.offset.multi * 8;
do {
/*
* check for odd offset for the case of fru devices
* accessed by words
*/
if (fru->access && (off & 1)) {
lprintf(LOG_ERR, " Unaligned offset for a block: %d", off);
/* increment offset */
off++;
break;
}
if (read_fru_area(intf, fru, id, off, 5,
(uint8_t *) &rec_hdr) < 0) {
break;
}
p_new = malloc(sizeof(struct ipmi_fru_bloc));
if (!p_new) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
free_fru_bloc(p_first);
return NULL;
}
p_new->next = NULL;
p_new->start = off;
p_new->size = fru->size - p_new->start;
sprintf((char *)p_new->blocId, "Multi-Rec Area: Type %i",
rec_hdr.type);
p_bloc->next = p_new;
p_bloc->size = p_new->start - p_bloc->start;
p_bloc = p_new;
off += rec_hdr.len + sizeof(struct fru_multirec_header);
/* verify record header */
if (ipmi_csum((uint8_t *)&rec_hdr,
sizeof(struct fru_multirec_header))) {
/* can't reliably judge for the rest space */
break;
}
} while (!(rec_hdr.format & 0x80) && (off < fru->size));
lprintf(LOG_DEBUG,"Multi-Record area ends at: %i (%xh)", off, off);
if (fru->size > off) {
// Bloc for remaining space
p_new = malloc(sizeof(struct ipmi_fru_bloc));
if (!p_new) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
free_fru_bloc(p_first);
return NULL;
}
p_new->next = NULL;
p_new->start = off;
p_new->size = fru->size - p_new->start;
strcpy((char *)p_new->blocId, "Unused space");
p_bloc->next = p_new;
p_bloc->size = p_new->start - p_bloc->start;
}
}
/* Dump blocs */
for(p_bloc = p_first, i = 0; p_bloc; p_bloc = p_bloc->next) {
lprintf(LOG_DEBUG ,"Bloc Numb : %i", i++);
lprintf(LOG_DEBUG ,"Bloc Id : %s", p_bloc->blocId);
lprintf(LOG_DEBUG ,"Bloc Start: %i", p_bloc->start);
lprintf(LOG_DEBUG ,"Bloc Size : %i", p_bloc->size);
lprintf(LOG_DEBUG ,"");
}
return p_first;
}
void
free_fru_bloc(t_ipmi_fru_bloc *bloc)
{
t_ipmi_fru_bloc * del;
while (bloc) {
del = bloc;
bloc = bloc->next;
free_n(&del);
}
}
/* By how many bytes to reduce a write command on a size failure. */
#define FRU_BLOCK_SZ 8
/* Baseline for a large enough piece to reduce via steps instead of bytes. */
#define FRU_AREA_MAXIMUM_BLOCK_SZ 32
/*
* write FRU[doffset:length] from the pFrubuf[soffset:length]
* rc=1 on success
**/
int
write_fru_area(struct ipmi_intf * intf, struct fru_info *fru, uint8_t id,
uint16_t soffset, uint16_t doffset,
uint16_t length, uint8_t *pFrubuf)
{
uint16_t tmp, finish;
struct ipmi_rs * rsp;
struct ipmi_rq req;
uint8_t msg_data[255+3];
uint16_t writeLength;
uint16_t found_bloc = 0;
finish = doffset + length; /* destination offset */
if (finish > fru->size)
{
lprintf(LOG_ERROR, "Return error");
return -1;
}
if (fru->access && ((doffset & 1) || (length & 1))) {
lprintf(LOG_ERROR, "Odd offset or length specified");
return -1;
}
t_ipmi_fru_bloc * fru_bloc = build_fru_bloc(intf, fru, id);
t_ipmi_fru_bloc * saved_fru_bloc = fru_bloc;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = SET_FRU_DATA;
req.msg.data = msg_data;
/* initialize request size only once */
if (fru->max_write_size == 0) {
uint16_t max_rq_size = ipmi_intf_get_max_request_data_size(intf);
/* validate lower bound of the maximum request data size */
if (max_rq_size <= 3) {
lprintf(LOG_ERROR, "Maximum request size is too small to send "
"a write request");
return -1;
}
/*
* Write FRU Info command returns the number of written bytes in
* a single byte field.
*/
if (max_rq_size - 3 > 255) {
/* Limit the max write size with 255 bytes. */
fru->max_write_size = 255;
} else {
/* subtract 1 byte for FRU ID an 2 bytes for offset */
fru->max_write_size = max_rq_size - 3;
}
/* check word access */
if (fru->access) {
fru->max_write_size &= ~1;
}
}
do {
uint16_t end_bloc;
uint8_t protected_bloc = 0;
/* Write per bloc, try to find the end of a bloc*/
while (fru_bloc && fru_bloc->start + fru_bloc->size <= doffset) {
fru_bloc = fru_bloc->next;
found_bloc++;
}
if (fru_bloc && fru_bloc->start + fru_bloc->size < finish) {
end_bloc = fru_bloc->start + fru_bloc->size;
} else {
end_bloc = finish;
}
/* calculate write length */
tmp = end_bloc - doffset;
/* check that write length is more than maximum request size */
if (tmp > fru->max_write_size) {
writeLength = fru->max_write_size;
} else {
writeLength = tmp;
}
/* copy fru data */
memcpy(&msg_data[3], pFrubuf + soffset, writeLength);
/* check word access */
if (fru->access) {
writeLength &= ~1;
}
tmp = doffset;
if (fru->access) {
tmp >>= 1;
}
msg_data[0] = id;
msg_data[1] = (uint8_t)tmp;
msg_data[2] = (uint8_t)(tmp >> 8);
req.msg.data_len = writeLength + 3;
if(fru_bloc) {
lprintf(LOG_INFO,"Writing %d bytes (Bloc #%i: %s)",
writeLength, found_bloc, fru_bloc->blocId);
} else {
lprintf(LOG_INFO,"Writing %d bytes", writeLength);
}
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
break;
}
if (fru_cc_rq2big(rsp->ccode)) {
if (fru->max_write_size > FRU_AREA_MAXIMUM_BLOCK_SZ) {
fru->max_write_size -= FRU_BLOCK_SZ;
lprintf(LOG_INFO, "Retrying FRU write with request size %d",
fru->max_write_size);
continue;
}
} else if (rsp->ccode == IPMI_CC_FRU_WRITE_PROTECTED_OFFSET) {
rsp->ccode = IPMI_CC_OK;
// Write protected section
protected_bloc = 1;
}
if (rsp->ccode)
break;
if (protected_bloc == 0) {
// Write OK, bloc not protected, continue
lprintf(LOG_INFO,"Wrote %d bytes", writeLength);
doffset += writeLength;
soffset += writeLength;
} else {
if(fru_bloc) {
// Bloc protected, advise user and jump over protected bloc
lprintf(LOG_INFO,
"Bloc [%s] protected at offset: %i (size %i bytes)",
fru_bloc->blocId, fru_bloc->start, fru_bloc->size);
lprintf(LOG_INFO,"Jumping over this bloc");
} else {
lprintf(LOG_INFO,
"Remaining FRU is protected following offset: %i",
doffset);
}
soffset += end_bloc - doffset;
doffset = end_bloc;
}
} while (doffset < finish);
if (saved_fru_bloc) {
free_fru_bloc(saved_fru_bloc);
}
return doffset >= finish;
}
/* read_fru_area - fill in frubuf[offset:length] from the FRU[offset:length]
*
* @intf: ipmi interface
* @fru: fru info
* @id: fru id
* @offset: offset into buffer
* @length: how much to read
* @frubuf: buffer read into
*
* returns -1 on error
* returns 0 if successful
*/
int
read_fru_area(struct ipmi_intf * intf, struct fru_info *fru, uint8_t id,
uint32_t offset, uint32_t length, uint8_t *frubuf)
{
uint32_t off = offset;
uint32_t tmp;
uint32_t finish;
uint32_t size_left_in_buffer;
struct ipmi_rs * rsp;
struct ipmi_rq req;
uint8_t msg_data[4];
if (offset > fru->size) {
lprintf(LOG_ERR, "Read FRU Area offset incorrect: %d > %d",
offset, fru->size);
return -1;
}
finish = offset + length;
if (finish > fru->size) {
memset(frubuf + fru->size, 0, length - fru->size);
finish = fru->size;
lprintf(LOG_NOTICE, "Read FRU Area length %d too large, "
"Adjusting to %d",
offset + length, finish - offset);
length = finish - offset;
}
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_DATA;
req.msg.data = msg_data;
req.msg.data_len = 4;
if (fru->max_read_size == 0) {
uint16_t max_rs_size = ipmi_intf_get_max_response_data_size(intf) - 1;
/* validate lower bound of the maximum response data size */
if (max_rs_size <= 1) {
lprintf(LOG_ERROR, "Maximum response size is too small to send "
"a read request");
return -1;
}
/*
* Read FRU Info command may read up to 255 bytes of data.
*/
if (max_rs_size - 1 > 255) {
/* Limit the max read size with 255 bytes. */
fru->max_read_size = 255;
} else {
/* subtract 1 byte for bytes count */
fru->max_read_size = max_rs_size - 1;
}
/* check word access */
if (fru->access) {
fru->max_read_size &= ~1;
}
}
size_left_in_buffer = length;
do {
tmp = fru->access ? off >> 1 : off;
msg_data[0] = id;
msg_data[1] = (uint8_t)(tmp & 0xff);
msg_data[2] = (uint8_t)(tmp >> 8);
tmp = finish - off;
if (tmp > fru->max_read_size)
msg_data[3] = (uint8_t)fru->max_read_size;
else
msg_data[3] = (uint8_t)tmp;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
lprintf(LOG_NOTICE, "FRU Read failed");
break;
}
if (rsp->ccode) {
/* if we get C7h or C8h or CAh return code then we requested too
* many bytes at once so try again with smaller size */
if (fru_cc_rq2big(rsp->ccode)
&& fru->max_read_size > FRU_BLOCK_SZ)
{
if (fru->max_read_size > FRU_AREA_MAXIMUM_BLOCK_SZ) {
/* subtract read length more aggressively */
fru->max_read_size -= FRU_BLOCK_SZ;
} else {
/* subtract length less aggressively */
fru->max_read_size--;
}
lprintf(LOG_INFO, "Retrying FRU read with request size %d",
fru->max_read_size);
continue;
}
lprintf(LOG_NOTICE, "FRU Read failed: %s",
val2str(rsp->ccode, completion_code_vals));
break;
}
tmp = fru->access ? rsp->data[0] << 1 : rsp->data[0];
if(rsp->data_len < 1
|| tmp > rsp->data_len - 1
|| tmp > size_left_in_buffer)
{
printf(" Not enough buffer size");
return -1;
}
memcpy(frubuf, rsp->data + 1, tmp);
off += tmp;
frubuf += tmp;
size_left_in_buffer -= tmp;
/* sometimes the size returned in the Info command
* is too large. return 0 so higher level function
* still attempts to parse what was returned */
if (tmp == 0 && off < finish) {
return 0;
}
} while (off < finish);
if (off < finish) {
return -1;
}
return 0;
}
/* read_fru_area - fill in frubuf[offset:length] from the FRU[offset:length]
*
* @intf: ipmi interface
* @fru: fru info
* @id: fru id
* @offset: offset into buffer
* @length: how much to read
* @frubuf: buffer read into
*
* returns -1 on error
* returns 0 if successful
*/
int
read_fru_area_section(struct ipmi_intf * intf, struct fru_info *fru, uint8_t id,
uint32_t offset, uint32_t length, uint8_t *frubuf)
{
static uint32_t fru_data_rqst_size = 20;
uint32_t off = offset;
uint32_t tmp, finish;
uint32_t size_left_in_buffer;
struct ipmi_rs * rsp;
struct ipmi_rq req;
uint8_t msg_data[4];
if (offset > fru->size) {
lprintf(LOG_ERR, "Read FRU Area offset incorrect: %d > %d",
offset, fru->size);
return -1;
}
finish = offset + length;
if (finish > fru->size) {
memset(frubuf + fru->size, 0, length - fru->size);
finish = fru->size;
lprintf(LOG_NOTICE, "Read FRU Area length %d too large, "
"Adjusting to %d",
offset + length, finish - offset);
length = finish - offset;
}
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_DATA;
req.msg.data = msg_data;
req.msg.data_len = 4;
#ifdef LIMIT_ALL_REQUEST_SIZE
if (fru_data_rqst_size > 16)
#else
if (fru->access && fru_data_rqst_size > 16)
#endif
fru_data_rqst_size = 16;
size_left_in_buffer = length;
do {
tmp = fru->access ? off >> 1 : off;
msg_data[0] = id;
msg_data[1] = (uint8_t)(tmp & 0xff);
msg_data[2] = (uint8_t)(tmp >> 8);
tmp = finish - off;
if (tmp > fru_data_rqst_size)
msg_data[3] = (uint8_t)fru_data_rqst_size;
else
msg_data[3] = (uint8_t)tmp;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
lprintf(LOG_NOTICE, "FRU Read failed");
break;
}
if (rsp->ccode) {
/* if we get C7 or C8 or CA return code then we requested too
* many bytes at once so try again with smaller size */
if (fru_cc_rq2big(rsp->ccode) && (--fru_data_rqst_size > FRU_BLOCK_SZ)) {
lprintf(LOG_INFO,
"Retrying FRU read with request size %d",
fru_data_rqst_size);
continue;
}
lprintf(LOG_NOTICE, "FRU Read failed: %s",
val2str(rsp->ccode, completion_code_vals));
break;
}
tmp = fru->access ? rsp->data[0] << 1 : rsp->data[0];
if(rsp->data_len < 1
|| tmp > rsp->data_len - 1
|| tmp > size_left_in_buffer)
{
printf(" Not enough buffer size");
return -1;
}
memcpy((frubuf + off)-offset, rsp->data + 1, tmp);
off += tmp;
size_left_in_buffer -= tmp;
/* sometimes the size returned in the Info command
* is too large. return 0 so higher level function
* still attempts to parse what was returned */
if (tmp == 0 && off < finish)
return 0;
} while (off < finish);
if (off < finish)
return -1;
return 0;
}
static void
fru_area_print_multirec_bloc(struct ipmi_intf * intf, struct fru_info * fru,
uint8_t id, uint32_t offset)
{
uint8_t * fru_data = NULL;
uint32_t i;
struct fru_multirec_header * h;
uint32_t last_off, len;
i = last_off = offset;
fru_data = malloc(fru->size + 1);
if (!fru_data) {
lprintf(LOG_ERR, " Out of memory!");
return;
}
memset(fru_data, 0, fru->size + 1);
do {
h = (struct fru_multirec_header *) (fru_data + i);
// read area in (at most) FRU_MULTIREC_CHUNK_SIZE bytes at a time
if ((last_off < (i + sizeof(*h))) || (last_off < (i + h->len)))
{
len = fru->size - last_off;
if (len > FRU_MULTIREC_CHUNK_SIZE)
len = FRU_MULTIREC_CHUNK_SIZE;
if (read_fru_area(intf, fru, id, last_off, len, fru_data) < 0)
break;
last_off += len;
}
//printf("Bloc Numb : %i\n", counter);
printf("Bloc Start: %i\n", i);
printf("Bloc Size : %i\n", h->len);
printf("\n");
i += h->len + sizeof (struct fru_multirec_header);
} while (!(h->format & 0x80));
i = offset;
do {
h = (struct fru_multirec_header *) (fru_data + i);
printf("Bloc Start: %i\n", i);
printf("Bloc Size : %i\n", h->len);
printf("\n");
i += h->len + sizeof (struct fru_multirec_header);
} while (!(h->format & 0x80));
lprintf(LOG_DEBUG ,"Multi-Record area ends at: %i (%xh)",i,i);
free_n(&fru_data);
}
/* fru_area_print_chassis - Print FRU Chassis Area
*
* @intf: ipmi interface
* @fru: fru info
* @id: fru id
* @offset: offset pointer
*/
static void
fru_area_print_chassis(struct ipmi_intf * intf, struct fru_info * fru,
uint8_t id, uint32_t offset)
{
char * fru_area;
uint8_t * fru_data;
uint32_t fru_len, i;
uint8_t tmp[2];
size_t chassis_type;
fru_len = 0;
/* read enough to check length field */
if (read_fru_area(intf, fru, id, offset, 2, tmp) == 0) {
fru_len = 8 * tmp[1];
}
if (fru_len == 0) {
return;
}
fru_data = malloc(fru_len);
if (!fru_data) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
return;
}
memset(fru_data, 0, fru_len);
/* read in the full fru */
if (read_fru_area(intf, fru, id, offset, fru_len, fru_data) < 0) {
free_n(&fru_data);
return;
}
/*
* skip first two bytes which specify
* fru area version and fru area length
*/
i = 2;
chassis_type = (fru_data[i] > ARRAY_SIZE(chassis_type_desc) - 1)
? 2
: fru_data[i];
printf(" Chassis Type : %s\n", chassis_type_desc[chassis_type]);
i++;
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Chassis Part Number : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Chassis Serial : %s\n", fru_area);
}
free_n(&fru_area);
}
/* read any extra fields */
while ((i < fru_len) && (fru_data[i] != FRU_END_OF_FIELDS)) {
int j = i;
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Chassis Extra : %s\n", fru_area);
}
free_n(&fru_area);
}
if (i == j) {
break;
}
}
free_n(&fru_data);
}
/* fru_area_print_board - Print FRU Board Area
*
* @intf: ipmi interface
* @fru: fru info
* @id: fru id
* @offset: offset pointer
*/
static void
fru_area_print_board(struct ipmi_intf * intf, struct fru_info * fru,
uint8_t id, uint32_t offset)
{
char * fru_area;
uint8_t * fru_data;
uint32_t fru_len;
uint32_t i;
time_t ts;
uint8_t tmp[2];
fru_len = 0;
/* read enough to check length field */
if (read_fru_area(intf, fru, id, offset, 2, tmp) == 0) {
fru_len = 8 * tmp[1];
}
if (fru_len <= 0) {
return;
}
fru_data = malloc(fru_len);
if (!fru_data) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
return;
}
memset(fru_data, 0, fru_len);
/* read in the full fru */
if (read_fru_area(intf, fru, id, offset, fru_len, fru_data) < 0) {
free_n(&fru_data);
return;
}
/*
* skip first three bytes which specify
* fru area version, fru area length
* and fru board language
*/
i = 3;
ts = ipmi_fru2time_t(&fru_data[i]);
printf(" Board Mfg Date : %s\n", ipmi_timestamp_string(ts));
i += 3; /* skip mfg. date time */
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Board Mfg : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Board Product : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Board Serial : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Board Part Number : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0 && verbose > 0) {
printf(" Board FRU ID : %s\n", fru_area);
}
free_n(&fru_area);
}
/* read any extra fields */
while ((i < fru_len) && (fru_data[i] != FRU_END_OF_FIELDS)) {
int j = i;
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Board Extra : %s\n", fru_area);
}
free_n(&fru_area);
}
if (i == j)
break;
}
free_n(&fru_data);
}
/* fru_area_print_product - Print FRU Product Area
*
* @intf: ipmi interface
* @fru: fru info
* @id: fru id
* @offset: offset pointer
*/
static void
fru_area_print_product(struct ipmi_intf * intf, struct fru_info * fru,
uint8_t id, uint32_t offset)
{
char * fru_area;
uint8_t * fru_data;
uint32_t fru_len, i;
uint8_t tmp[2];
fru_len = 0;
/* read enough to check length field */
if (read_fru_area(intf, fru, id, offset, 2, tmp) == 0) {
fru_len = 8 * tmp[1];
}
if (fru_len == 0) {
return;
}
fru_data = malloc(fru_len);
if (!fru_data) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
return;
}
memset(fru_data, 0, fru_len);
/* read in the full fru */
if (read_fru_area(intf, fru, id, offset, fru_len, fru_data) < 0) {
free_n(&fru_data);
return;
}
/*
* skip first three bytes which specify
* fru area version, fru area length
* and fru board language
*/
i = 3;
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Product Manufacturer : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Product Name : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Product Part Number : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Product Version : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Product Serial : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Product Asset Tag : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0 && verbose > 0) {
printf(" Product FRU ID : %s\n", fru_area);
}
free_n(&fru_area);
}
/* read any extra fields */
while ((i < fru_len) && (fru_data[i] != FRU_END_OF_FIELDS)) {
int j = i;
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Product Extra : %s\n", fru_area);
}
free_n(&fru_area);
}
if (i == j)
break;
}
free_n(&fru_data);
}
/* fru_area_print_multirec - Print FRU Multi Record Area
*
* @intf: ipmi interface
* @fru: fru info
* @id: fru id
* @offset: offset pointer
*/
static void
fru_area_print_multirec(struct ipmi_intf * intf, struct fru_info * fru,
uint8_t id, uint32_t offset)
{
uint8_t * fru_data;
struct fru_multirec_header * h;
struct fru_multirec_powersupply * ps;
struct fru_multirec_dcoutput * dc;
struct fru_multirec_dcload * dl;
uint16_t peak_capacity;
uint8_t peak_hold_up_time;
uint32_t last_off;
last_off = offset;
fru_data = malloc(FRU_MULTIREC_CHUNK_SIZE);
if (!fru_data) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
return;
}
memset(fru_data, 0, FRU_MULTIREC_CHUNK_SIZE);
h = (struct fru_multirec_header *) (fru_data);
do {
if (read_fru_area(intf, fru, id, last_off, sizeof(*h), fru_data) < 0) {
break;
}
if (h->len && read_fru_area(intf, fru, id,
last_off + sizeof(*h), h->len, fru_data + sizeof(*h)) < 0) {
break;
}
last_off += h->len + sizeof(*h);
switch (h->type) {
case FRU_RECORD_TYPE_POWER_SUPPLY_INFORMATION:
ps = (struct fru_multirec_powersupply *)
(fru_data + sizeof(struct fru_multirec_header));
#if WORDS_BIGENDIAN
ps->capacity = BSWAP_16(ps->capacity);
ps->peak_va = BSWAP_16(ps->peak_va);
ps->lowend_input1 = BSWAP_16(ps->lowend_input1);
ps->highend_input1 = BSWAP_16(ps->highend_input1);
ps->lowend_input2 = BSWAP_16(ps->lowend_input2);
ps->highend_input2 = BSWAP_16(ps->highend_input2);
ps->combined_capacity = BSWAP_16(ps->combined_capacity);
ps->peak_cap_ht = BSWAP_16(ps->peak_cap_ht);
#endif
peak_hold_up_time = (ps->peak_cap_ht & 0xf000) >> 12;
peak_capacity = ps->peak_cap_ht & 0x0fff;
printf (" Power Supply Record\n");
printf (" Capacity : %d W\n",
ps->capacity);
printf (" Peak VA : %d VA\n",
ps->peak_va);
printf (" Inrush Current : %d A\n",
ps->inrush_current);
printf (" Inrush Interval : %d ms\n",
ps->inrush_interval);
printf (" Input Voltage Range 1 : %d-%d V\n",
ps->lowend_input1 / 100, ps->highend_input1 / 100);
printf (" Input Voltage Range 2 : %d-%d V\n",
ps->lowend_input2 / 100, ps->highend_input2 / 100);
printf (" Input Frequency Range : %d-%d Hz\n",
ps->lowend_freq, ps->highend_freq);
printf (" A/C Dropout Tolerance : %d ms\n",
ps->dropout_tolerance);
printf (" Flags : %s%s%s%s%s\n",
ps->predictive_fail ? "'Predictive fail' " : "",
ps->pfc ? "'Power factor correction' " : "",
ps->autoswitch ? "'Autoswitch voltage' " : "",
ps->hotswap ? "'Hot swap' " : "",
ps->predictive_fail ? ps->rps_threshold ?
ps->tach ? "'Two pulses per rotation'" : "'One pulse per rotation'" :
ps->tach ? "'Failure on pin de-assertion'" : "'Failure on pin assertion'" : "");
printf (" Peak capacity : %d W\n",
peak_capacity);
printf (" Peak capacity holdup : %d s\n",
peak_hold_up_time);
if (ps->combined_capacity == 0)
printf (" Combined capacity : not specified\n");
else
printf (" Combined capacity : %d W (%s and %s)\n",
ps->combined_capacity,
combined_voltage_desc [ps->combined_voltage1],
combined_voltage_desc [ps->combined_voltage2]);
if (ps->predictive_fail)
printf (" Fan lower threshold : %d RPS\n",
ps->rps_threshold);
break;
case FRU_RECORD_TYPE_DC_OUTPUT:
dc = (struct fru_multirec_dcoutput *)
(fru_data + sizeof(struct fru_multirec_header));
#if WORDS_BIGENDIAN
dc->nominal_voltage = BSWAP_16(dc->nominal_voltage);
dc->max_neg_dev = BSWAP_16(dc->max_neg_dev);
dc->max_pos_dev = BSWAP_16(dc->max_pos_dev);
dc->ripple_and_noise = BSWAP_16(dc->ripple_and_noise);
dc->min_current = BSWAP_16(dc->min_current);
dc->max_current = BSWAP_16(dc->max_current);
#endif
printf (" DC Output Record\n");
printf (" Output Number : %d\n",
dc->output_number);
printf (" Standby power : %s\n",
dc->standby ? "Yes" : "No");
printf (" Nominal voltage : %.2f V\n",
(double) dc->nominal_voltage / 100);
printf (" Max negative deviation : %.2f V\n",
(double) dc->max_neg_dev / 100);
printf (" Max positive deviation : %.2f V\n",
(double) dc->max_pos_dev / 100);
printf (" Ripple and noise pk-pk : %d mV\n",
dc->ripple_and_noise);
printf (" Minimum current draw : %.3f A\n",
(double) dc->min_current / 1000);
printf (" Maximum current draw : %.3f A\n",
(double) dc->max_current / 1000);
break;
case FRU_RECORD_TYPE_DC_LOAD:
dl = (struct fru_multirec_dcload *)
(fru_data + sizeof(struct fru_multirec_header));
#if WORDS_BIGENDIAN
dl->nominal_voltage = BSWAP_16(dl->nominal_voltage);
dl->min_voltage = BSWAP_16(dl->min_voltage);
dl->max_voltage = BSWAP_16(dl->max_voltage);
dl->ripple_and_noise = BSWAP_16(dl->ripple_and_noise);
dl->min_current = BSWAP_16(dl->min_current);
dl->max_current = BSWAP_16(dl->max_current);
#endif
printf (" DC Load Record\n");
printf (" Output Number : %d\n",
dl->output_number);
printf (" Nominal voltage : %.2f V\n",
(double) dl->nominal_voltage / 100);
printf (" Min voltage allowed : %.2f V\n",
(double) dl->min_voltage / 100);
printf (" Max voltage allowed : %.2f V\n",
(double) dl->max_voltage / 100);
printf (" Ripple and noise pk-pk : %d mV\n",
dl->ripple_and_noise);
printf (" Minimum current load : %.3f A\n",
(double) dl->min_current / 1000);
printf (" Maximum current load : %.3f A\n",
(double) dl->max_current / 1000);
break;
case FRU_RECORD_TYPE_OEM_EXTENSION:
{
struct fru_multirec_oem_header *oh=(struct fru_multirec_oem_header *)
&fru_data[sizeof(struct fru_multirec_header)];
uint32_t iana = oh->mfg_id[0] | oh->mfg_id[1]<<8 | oh->mfg_id[2]<<16;
/* Now makes sure this is really PICMG record */
if( iana == IPMI_OEM_PICMG ){
printf(" PICMG Extension Record\n");
ipmi_fru_picmg_ext_print(fru_data,
sizeof(struct fru_multirec_header),
h->len);
}
/* FIXME: Add OEM record support here */
else{
printf(" OEM (%s) Record\n", val2str( iana, ipmi_oem_info));
}
}
break;
}
} while (!(h->format & 0x80));
lprintf(LOG_DEBUG ,"Multi-Record area ends at: %i (%xh)", last_off, last_off);
free_n(&fru_data);
}
/* ipmi_fru_query_new_value - Query new values to replace original FRU content
*
* @data: FRU data
* @offset: offset of the bytes to be modified in data
* @len: size of the modified data
*
* returns : TRUE if data changed
* returns : FALSE if data not changed
*/
static
bool
ipmi_fru_query_new_value(uint8_t *data,int offset, size_t len)
{
bool status = false;
int ret;
char answer;
printf("Would you like to change this value <y/n> ? ");
ret = scanf("%c", &answer);
if (ret != 1) {
return false;
}
if( answer == 'y' || answer == 'Y' ){
int i;
unsigned int *holder;
holder = malloc(len);
printf(
"Enter hex values for each of the %d entries (lsb first), "
"hit <enter> between entries\n", (int)len);
/* I can't assign scanf' %x into a single char */
for( i=0;i<len;i++ ){
ret = scanf("%x", holder+i);
if (ret != 1) {
free_n(&holder);
return false;
}
}
for( i=0;i<len;i++ ){
data[offset++] = (unsigned char) *(holder+i);
}
/* &data[offset++] */
free_n(&holder);
status = true;
}
else{
printf("Entered %c\n",answer);
}
return status;
}
/* ipmi_fru_oemkontron_edit -
* Query new values to replace original FRU content
* This is a generic enough to support any type of 'OEM' record
* because the user supplies 'IANA number' , 'record Id' and 'record' version'
*
* However, the parser must have 'apriori' knowledge of the record format
* The currently supported record is :
*
* IANA : 15000 (Kontron)
* RECORD ID : 3
* RECORD VERSION: 0 (or 1)
*
* I would have like to put that stuff in an OEM specific file, but apart for
* the record format information, all commands are really standard 'FRU' command
*
*
* @data: FRU data
* @offset: start of the current multi record (start of header)
* @len: len of the current record (excluding header)
* @h: pointer to record header
* @oh: pointer to OEM /PICMG header
*
* returns: TRUE if data changed
* returns: FALSE if data not changed
*/
#define OEM_KONTRON_INFORMATION_RECORD 3
#define EDIT_OEM_KONTRON_COMPLETE_ARG_COUNT 12
#define GET_OEM_KONTRON_COMPLETE_ARG_COUNT 5
/*
./src/ipmitool fru edit 0
oem 15000 3 0 name instance FIELD1 FIELD2 FIELD3 crc32
*/
#define OEM_KONTRON_SUBCOMMAND_ARG_POS 2
#define OEM_KONTRON_IANA_ARG_POS 3
#define OEM_KONTRON_RECORDID_ARG_POS 4
#define OEM_KONTRON_FORMAT_ARG_POS 5
#define OEM_KONTRON_NAME_ARG_POS 6
#define OEM_KONTRON_INSTANCE_ARG_POS 7
#define OEM_KONTRON_VERSION_ARG_POS 8
#define OEM_KONTRON_BUILDDATE_ARG_POS 9
#define OEM_KONTRON_UPDATEDATE_ARG_POS 10
#define OEM_KONTRON_CRC32_ARG_POS 11
#define OEM_KONTRON_FIELD_SIZE 8
#define OEM_KONTRON_VERSION_FIELD_SIZE 10
#ifdef HAVE_PRAGMA_PACK
#pragma pack(1)
#endif
typedef struct OemKontronInformationRecordV0{
uint8_t field1TypeLength;
uint8_t field1[OEM_KONTRON_FIELD_SIZE];
uint8_t field2TypeLength;
uint8_t field2[OEM_KONTRON_FIELD_SIZE];
uint8_t field3TypeLength;
uint8_t field3[OEM_KONTRON_FIELD_SIZE];
uint8_t crcTypeLength;
uint8_t crc32[OEM_KONTRON_FIELD_SIZE];
}tOemKontronInformationRecordV0;
#ifdef HAVE_PRAGMA_PACK
#pragma pack(0)
#endif
#ifdef HAVE_PRAGMA_PACK
#pragma pack(1)
#endif
typedef struct OemKontronInformationRecordV1{
uint8_t field1TypeLength;
uint8_t field1[OEM_KONTRON_VERSION_FIELD_SIZE];
uint8_t field2TypeLength;
uint8_t field2[OEM_KONTRON_FIELD_SIZE];
uint8_t field3TypeLength;
uint8_t field3[OEM_KONTRON_FIELD_SIZE];
uint8_t crcTypeLength;
uint8_t crc32[OEM_KONTRON_FIELD_SIZE];
}tOemKontronInformationRecordV1;
#ifdef HAVE_PRAGMA_PACK
#pragma pack(0)
#endif
/*
./src/ipmitool fru get 0 oem iana 3
*/
static void ipmi_fru_oemkontron_get(int argc,
char ** argv,
uint8_t * fru_data,
int off,
struct fru_multirec_oem_header *oh)
{
static bool badParams = false;
int start = off;
int offset = start;
offset += sizeof(struct fru_multirec_oem_header);
if(!badParams){
/* the 'OEM' field is already checked in caller */
if( argc > OEM_KONTRON_SUBCOMMAND_ARG_POS ){
if(strncmp("oem", argv[OEM_KONTRON_SUBCOMMAND_ARG_POS],3)){
printf("usage: fru get <id> <oem>\n");
badParams = true;
return;
}
}
if( argc<GET_OEM_KONTRON_COMPLETE_ARG_COUNT ){
printf("usage: oem <iana> <recordid>\n");
printf("usage: oem 15000 3\n");
badParams = true;
return;
}
}
if (badParams) {
return;
}
if (oh->record_id != OEM_KONTRON_INFORMATION_RECORD) {
return;
}
uint8_t version;
printf("Kontron OEM Information Record\n");
version = oh->record_version;
uint8_t blockCount;
uint8_t blockIndex = 0;
uint8_t instance = 0;
if (str2uchar(argv[OEM_KONTRON_INSTANCE_ARG_POS], &instance) != 0) {
lprintf(LOG_ERR,
"Instance argument '%s' is either invalid or out of range.",
argv[OEM_KONTRON_INSTANCE_ARG_POS]);
badParams = true;
return;
}
blockCount = fru_data[offset++];
for (blockIndex = 0; blockIndex < blockCount; blockIndex++) {
void *pRecordData;
uint8_t nameLen;
nameLen = (fru_data[offset++] &= 0x3F);
printf(" Name: %*.*s\n", nameLen, nameLen,
(const char *)(fru_data + offset));
offset += nameLen;
pRecordData = &fru_data[offset];
printf(" Record Version: %d\n", version);
if (version == 0) {
printf(" Version: %*.*s\n",
OEM_KONTRON_FIELD_SIZE,
OEM_KONTRON_FIELD_SIZE,
((tOemKontronInformationRecordV0 *)pRecordData)->field1);
printf(" Build Date: %*.*s\n",
OEM_KONTRON_FIELD_SIZE,
OEM_KONTRON_FIELD_SIZE,
((tOemKontronInformationRecordV0 *)pRecordData)->field2);
printf(" Update Date: %*.*s\n",
OEM_KONTRON_FIELD_SIZE,
OEM_KONTRON_FIELD_SIZE,
((tOemKontronInformationRecordV0 *)pRecordData)->field3);
printf(" Checksum: %*.*s\n\n",
OEM_KONTRON_FIELD_SIZE,
OEM_KONTRON_FIELD_SIZE,
((tOemKontronInformationRecordV0 *)pRecordData)->crc32);
offset += sizeof(tOemKontronInformationRecordV0);
offset++;
} else if (version == 1) {
printf(" Version: %*.*s\n",
OEM_KONTRON_VERSION_FIELD_SIZE,
OEM_KONTRON_VERSION_FIELD_SIZE,
((tOemKontronInformationRecordV1 *)pRecordData)->field1);
printf(" Build Date: %*.*s\n",
OEM_KONTRON_FIELD_SIZE,
OEM_KONTRON_FIELD_SIZE,
((tOemKontronInformationRecordV1 *)pRecordData)->field2);
printf(" Update Date: %*.*s\n",
OEM_KONTRON_FIELD_SIZE,
OEM_KONTRON_FIELD_SIZE,
((tOemKontronInformationRecordV1 *)pRecordData)->field3);
printf(" Checksum: %*.*s\n\n",
OEM_KONTRON_FIELD_SIZE,
OEM_KONTRON_FIELD_SIZE,
((tOemKontronInformationRecordV1 *)pRecordData)->crc32);
offset += sizeof(tOemKontronInformationRecordV1);
offset++;
} else {
printf(" Unsupported version %d\n", version);
}
}
}
static
bool
ipmi_fru_oemkontron_edit( int argc, char ** argv,uint8_t * fru_data,
int off,int len,
struct fru_multirec_header *h,
struct fru_multirec_oem_header *oh)
{
static bool badParams=false;
bool hasChanged = false;
int start = off;
int offset = start;
int length = len;
int i;
uint8_t record_id = 0;
offset += sizeof(struct fru_multirec_oem_header);
if(!badParams){
/* the 'OEM' field is already checked in caller */
if( argc > OEM_KONTRON_SUBCOMMAND_ARG_POS ){
if(strncmp("oem", argv[OEM_KONTRON_SUBCOMMAND_ARG_POS],3)){
printf("usage: fru edit <id> <oem> <args...>\n");
badParams = true;
return hasChanged;
}
}
if( argc<EDIT_OEM_KONTRON_COMPLETE_ARG_COUNT ){
printf("usage: oem <iana> <recordid> <format> <args...>\n");
printf("usage: oem 15000 3 0 <name> <instance> <field1>"\
" <field2> <field3> <crc32>\n");
badParams = true;
return hasChanged;
}
if (str2uchar(argv[OEM_KONTRON_RECORDID_ARG_POS], &record_id) != 0) {
lprintf(LOG_ERR,
"Record ID argument '%s' is either invalid or out of range.",
argv[OEM_KONTRON_RECORDID_ARG_POS]);
badParams = true;
return hasChanged;
}
if (record_id == OEM_KONTRON_INFORMATION_RECORD) {
for(i=OEM_KONTRON_VERSION_ARG_POS;i<=OEM_KONTRON_CRC32_ARG_POS;i++){
if( (strlen(argv[i]) != OEM_KONTRON_FIELD_SIZE) &&
(strlen(argv[i]) != OEM_KONTRON_VERSION_FIELD_SIZE)) {
printf("error: version fields must have %d characters\n",
OEM_KONTRON_FIELD_SIZE);
badParams = true;
return hasChanged;
}
}
}
}
if(!badParams){
if(oh->record_id == OEM_KONTRON_INFORMATION_RECORD ) {
uint8_t formatVersion = 0;
uint8_t version;
if (str2uchar(argv[OEM_KONTRON_FORMAT_ARG_POS], &formatVersion) != 0) {
lprintf(LOG_ERR,
"Format argument '%s' is either invalid or out of range.",
argv[OEM_KONTRON_FORMAT_ARG_POS]);
badParams = true;
return hasChanged;
}
printf(" Kontron OEM Information Record\n");
version = oh->record_version;
if( version == formatVersion ){
uint8_t blockCount;
uint8_t blockIndex=0;
uint8_t matchInstance = 0;
uint8_t instance = 0;
if (str2uchar(argv[OEM_KONTRON_INSTANCE_ARG_POS], &instance) != 0) {
lprintf(LOG_ERR,
"Instance argument '%s' is either invalid or out of range.",
argv[OEM_KONTRON_INSTANCE_ARG_POS]);
badParams = true;
return hasChanged;
}
blockCount = fru_data[offset++];
printf(" blockCount: %d\n",blockCount);
for(blockIndex=0;blockIndex<blockCount;blockIndex++){
void * pRecordData;
uint8_t nameLen;
nameLen = ( fru_data[offset++] & 0x3F );
if( version == 0 || version == 1 )
{
if(!strncmp((char *)argv[OEM_KONTRON_NAME_ARG_POS],
(const char *)(fru_data+offset),nameLen)&& (matchInstance == instance)){
printf ("Found : %s\n",argv[OEM_KONTRON_NAME_ARG_POS]);
offset+=nameLen;
pRecordData = &fru_data[offset];
if( version == 0 )
{
memcpy( ((tOemKontronInformationRecordV0 *)
pRecordData)->field1 ,
argv[OEM_KONTRON_VERSION_ARG_POS] ,
OEM_KONTRON_FIELD_SIZE);
memcpy( ((tOemKontronInformationRecordV0 *)
pRecordData)->field2 ,
argv[OEM_KONTRON_BUILDDATE_ARG_POS],
OEM_KONTRON_FIELD_SIZE);
memcpy( ((tOemKontronInformationRecordV0 *)
pRecordData)->field3 ,
argv[OEM_KONTRON_UPDATEDATE_ARG_POS],
OEM_KONTRON_FIELD_SIZE);
memcpy( ((tOemKontronInformationRecordV0 *)
pRecordData)->crc32 ,
argv[OEM_KONTRON_CRC32_ARG_POS] ,
OEM_KONTRON_FIELD_SIZE);
}
else
{
memcpy( ((tOemKontronInformationRecordV1 *)
pRecordData)->field1 ,
argv[OEM_KONTRON_VERSION_ARG_POS] ,
OEM_KONTRON_VERSION_FIELD_SIZE);
memcpy( ((tOemKontronInformationRecordV1 *)
pRecordData)->field2 ,
argv[OEM_KONTRON_BUILDDATE_ARG_POS],
OEM_KONTRON_FIELD_SIZE);
memcpy( ((tOemKontronInformationRecordV1 *)
pRecordData)->field3 ,
argv[OEM_KONTRON_UPDATEDATE_ARG_POS],
OEM_KONTRON_FIELD_SIZE);
memcpy( ((tOemKontronInformationRecordV1 *)
pRecordData)->crc32 ,
argv[OEM_KONTRON_CRC32_ARG_POS] ,
OEM_KONTRON_FIELD_SIZE);
}
matchInstance++;
hasChanged = true;
}
else if(!strncmp((char *)argv[OEM_KONTRON_NAME_ARG_POS],
(const char *)(fru_data+offset), nameLen)){
printf ("Skipped : %s [instance %d]\n",argv[OEM_KONTRON_NAME_ARG_POS],
(unsigned int)matchInstance);
matchInstance++;
offset+=nameLen;
}
else {
offset+=nameLen;
}
if( version == 0 )
{
offset+= sizeof(tOemKontronInformationRecordV0);
}
else
{
offset+= sizeof(tOemKontronInformationRecordV1);
}
offset++;
}
else
{
printf (" Unsupported version %d\n",version);
}
}
}
else{
printf(" Version: %d\n",version);
}
}
if( hasChanged ){
uint8_t record_checksum =0;
uint8_t header_checksum =0;
int index;
lprintf(LOG_DEBUG,"Initial record checksum : %x",h->record_checksum);
lprintf(LOG_DEBUG,"Initial header checksum : %x",h->header_checksum);
for(index=0;index<length;index++){
record_checksum+= fru_data[start+index];
}
/* Update Record checksum */
h->record_checksum = ~record_checksum + 1;
for(index=0;index<(sizeof(struct fru_multirec_header) -1);index++){
uint8_t data= *( (uint8_t *)h+ index);
header_checksum+=data;
}
/* Update header checksum */
h->header_checksum = ~header_checksum + 1;
lprintf(LOG_DEBUG,"Final record checksum : %x",h->record_checksum);
lprintf(LOG_DEBUG,"Final header checksum : %x",h->header_checksum);
/* write back data */
}
}
return hasChanged;
}
/* ipmi_fru_picmg_ext_edit - Query new values to replace original FRU content
*
* @data: FRU data
* @offset: start of the current multi record (start of header)
* @len: len of the current record (excluding header)
* @h: pointer to record header
* @oh: pointer to OEM /PICMG header
*
* returns: TRUE if data changed
* returns: FALSE if data not changed
*/
static
bool
ipmi_fru_picmg_ext_edit(uint8_t * fru_data,
int off,int len,
struct fru_multirec_header *h,
struct fru_multirec_oem_header *oh)
{
bool hasChanged = false;
int start = off;
int offset = start;
int length = len;
offset += sizeof(struct fru_multirec_oem_header);
switch (oh->record_id)
{
case FRU_AMC_ACTIVATION:
printf(" FRU_AMC_ACTIVATION\n");
{
int index=offset;
uint16_t max_current;
max_current = fru_data[offset];
max_current |= fru_data[++offset]<<8;
printf(" Maximum Internal Current(@12V): %.2f A (0x%02x)\n",
(float)max_current / 10.0f, max_current);
if( ipmi_fru_query_new_value(fru_data,index,2) ){
max_current = fru_data[index];
max_current |= fru_data[++index]<<8;
printf(" New Maximum Internal Current(@12V): %.2f A (0x%02x)\n",
(float)max_current / 10.0f, max_current);
hasChanged = true;
}
printf(" Module Activation Readiness: %i sec.\n", fru_data[++offset]);
printf(" Descriptor Count: %i\n", fru_data[++offset]);
printf("\n");
for (++offset;
offset < (off + length);
offset += sizeof(struct fru_picmgext_activation_record)) {
struct fru_picmgext_activation_record * a =
(struct fru_picmgext_activation_record *) &fru_data[offset];
printf(" IPMB-Address: 0x%x\n", a->ibmb_addr);
printf(" Max. Module Current: %.2f A\n", (float)a->max_module_curr / 10.0f);
printf("\n");
}
}
break;
case FRU_AMC_CURRENT:
printf(" FRU_AMC_CURRENT\n");
{
int index=offset;
unsigned char current;
current = fru_data[index];
printf(" Current draw(@12V): %.2f A (0x%02x)\n",
(float)current / 10.0f, current);
if( ipmi_fru_query_new_value(fru_data, index, 1) ){
current = fru_data[index];
printf(" New Current draw(@12V): %.2f A (0x%02x)\n",
(float)current / 10.0f, current);
hasChanged = true;
}
}
break;
}
if( hasChanged ){
uint8_t record_checksum =0;
uint8_t header_checksum =0;
int index;
lprintf(LOG_DEBUG,"Initial record checksum : %x",h->record_checksum);
lprintf(LOG_DEBUG,"Initial header checksum : %x",h->header_checksum);
for(index=0;index<length;index++){
record_checksum+= fru_data[start+index];
}
/* Update Record checksum */
h->record_checksum = ~record_checksum + 1;
for(index=0;index<(sizeof(struct fru_multirec_header) -1);index++){
uint8_t data= *( (uint8_t *)h+ index);
header_checksum+=data;
}
/* Update header checksum */
h->header_checksum = ~header_checksum + 1;
lprintf(LOG_DEBUG,"Final record checksum : %x",h->record_checksum);
lprintf(LOG_DEBUG,"Final header checksum : %x",h->header_checksum);
/* write back data */
}
return hasChanged;
}
/* ipmi_fru_picmg_ext_print - prints OEM fru record (PICMG)
*
* @fru_data: FRU data
* @offset: offset of the bytes to be modified in data
* @length: size of the record
*
* returns : n/a
*/
static void ipmi_fru_picmg_ext_print(uint8_t * fru_data, int off, int length)
{
struct fru_multirec_oem_header *h;
int guid_count;
int offset = off;
int start_offset = off;
int i;
h = (struct fru_multirec_oem_header *) &fru_data[offset];
offset += sizeof(struct fru_multirec_oem_header);
switch (h->record_id)
{
case FRU_PICMG_BACKPLANE_P2P:
{
uint8_t index;
unsigned int data;
struct fru_picmgext_slot_desc *slot_d;
slot_d =
(struct fru_picmgext_slot_desc*)&fru_data[offset];
offset += sizeof(struct fru_picmgext_slot_desc);
printf(" FRU_PICMG_BACKPLANE_P2P\n");
while (offset <= (start_offset+length)) {
printf("\n");
printf(" Channel Type: ");
switch (slot_d->chan_type)
{
case 0x00:
case 0x07:
printf("PICMG 2.9\n");
break;
case 0x08:
printf("Single Port Fabric IF\n");
break;
case 0x09:
printf("Double Port Fabric IF\n");
break;
case 0x0a:
printf("Full Channel Fabric IF\n");
break;
case 0x0b:
printf("Base IF\n");
break;
case 0x0c:
printf("Update Channel IF\n");
break;
case 0x0d:
printf("ShMC Cross Connect\n");
break;
default:
printf("Unknown IF (0x%x)\n",
slot_d->chan_type);
break;
}
printf(" Slot Addr. : %02x\n",
slot_d->slot_addr );
printf(" Channel Count: %i\n",
slot_d->chn_count);
for (index = 0;
index < (slot_d->chn_count);
index++) {
struct fru_picmgext_chn_desc *d;
data = (fru_data[offset+0]) |
(fru_data[offset+1] << 8) |
(fru_data[offset+2] << 16);
d = (struct fru_picmgext_chn_desc *)&data;
if (verbose) {
printf( " "
"Chn: %02x -> "
"Chn: %02x in "
"Slot: %02x\n",
d->local_chn,
d->remote_chn,
d->remote_slot);
}
offset += FRU_PICMGEXT_CHN_DESC_RECORD_SIZE;
}
slot_d = (struct fru_picmgext_slot_desc*)&fru_data[offset];
offset += sizeof(struct fru_picmgext_slot_desc);
}
}
break;
case FRU_PICMG_ADDRESS_TABLE:
{
unsigned int hwaddr;
unsigned int sitetype;
unsigned int sitenum;
unsigned int entries;
unsigned int i;
char *picmg_site_type_strings[] = {
"AdvancedTCA Board",
"Power Entry",
"Shelf FRU Information",
"Dedicated ShMC",
"Fan Tray",
"Fan Filter Tray",
"Alarm",
"AdvancedMC Module",
"PMC",
"Rear Transition Module"};
printf(" FRU_PICMG_ADDRESS_TABLE\n");
printf(" Type/Len: 0x%02x\n", fru_data[offset++]);
printf(" Shelf Addr: ");
for (i=0;i<20;i++) {
printf("0x%02x ", fru_data[offset++]);
}
printf("\n");
entries = fru_data[offset++];
printf(" Addr Table Entries: 0x%02x\n", entries);
for (i=0; i<entries; i++) {
hwaddr = fru_data[offset];
sitenum = fru_data[offset + 1];
sitetype = fru_data[offset + 2];
printf(
" HWAddr: 0x%02x (0x%02x) SiteNum: %d SiteType: 0x%02x %s\n",
hwaddr, hwaddr * 2,
sitenum, sitetype,
(sitetype < 0xa) ?
picmg_site_type_strings[sitetype] :
"Reserved");
offset += 3;
}
}
break;
case FRU_PICMG_SHELF_POWER_DIST:
{
unsigned int entries;
unsigned int feeds;
unsigned int hwaddr;
unsigned int i;
unsigned int id;
unsigned int j;
unsigned int maxext;
unsigned int maxint;
unsigned int minexp;
printf(" FRU_PICMG_SHELF_POWER_DIST\n");
feeds = fru_data[offset++];
printf(" Number of Power Feeds: 0x%02x\n",
feeds);
for (i=0; i<feeds; i++) {
printf(" Feed %d:\n", i);
maxext = fru_data[offset] |
(fru_data[offset+1] << 8);
offset += 2;
maxint = fru_data[offset] |
(fru_data[offset+1] << 8);
offset += 2;
minexp = fru_data[offset];
offset += 1;
entries = fru_data[offset];
offset += 1;
printf(
" Max External Current: %d.%d Amps (0x%04x)\n",
maxext / 10, maxext % 10, maxext);
if (maxint < 0xffff) {
printf(
" Max Internal Current: %d.%d Amps (0x%04x)\n",
maxint / 10, maxint % 10,
maxint);
} else {
printf(
" Max Internal Current: Not Specified\n");
}
if (minexp >= 0x48 && minexp <= 0x90) {
printf(
" Min Expected Voltage: -%02d.%dV\n",
minexp / 2, (minexp % 2) * 5);
} else {
printf(
" Min Expected Voltage: -%dV (actual invalid value 0x%x)\n",
36, minexp);
}
for (j=0; j < entries; j++) {
hwaddr = fru_data[offset++];
id = fru_data[offset++];
printf(
" FRU HW Addr: 0x%02x (0x%02x)",
hwaddr, hwaddr * 2);
printf(
" FRU ID: 0x%02x\n",
id);
}
}
}
break;
case FRU_PICMG_SHELF_ACTIVATION:
{
unsigned int i;
unsigned int count = 0;
printf(" FRU_PICMG_SHELF_ACTIVATION\n");
printf(
" Allowance for FRU Act Readiness: 0x%02x\n",
fru_data[offset++]);
count = fru_data[offset++];
printf(
" FRU activation and Power Desc Cnt: 0x%02x\n",
count);
for (i=0; i<count; i++) {
printf(" HW Addr: 0x%02x ",
fru_data[offset++]);
printf(" FRU ID: 0x%02x ",
fru_data[offset++]);
printf(" Max FRU Power: 0x%04x ",
fru_data[offset+0] |
(fru_data[offset+1]<<8));
offset += 2;
printf(" Config: 0x%02x \n",
fru_data[offset++]);
}
}
break;
case FRU_PICMG_SHMC_IP_CONN:
printf(" FRU_PICMG_SHMC_IP_CONN\n");
break;
case FRU_PICMG_BOARD_P2P:
printf(" FRU_PICMG_BOARD_P2P\n");
guid_count = fru_data[offset++];
printf(" GUID count: %2d\n", guid_count);
for (i = 0 ; i < guid_count; i++ ) {
int j;
printf(" GUID [%2d]: 0x", i);
for (j=0; j < sizeof(struct fru_picmgext_guid);
j++) {
printf("%02x", fru_data[offset+j]);
}
printf("\n");
offset += sizeof(struct fru_picmgext_guid);
}
printf("\n");
for (; offset < off + length;
offset += sizeof(struct fru_picmgext_link_desc)) {
/* to solve little endian /big endian problem */
struct fru_picmgext_link_desc *d;
unsigned int data = (fru_data[offset+0]) |
(fru_data[offset+1] << 8) |
(fru_data[offset+2] << 16) |
(fru_data[offset+3] << 24);
d = (struct fru_picmgext_link_desc *) &data;
printf(" Link Grouping ID: 0x%02x\n",
d->grouping);
printf(" Link Type Extension: 0x%02x - ",
d->ext);
if (d->type == FRU_PICMGEXT_LINK_TYPE_BASE) {
switch (d->ext) {
case 0:
printf("10/100/1000BASE-T Link (four-pair)\n");
break;
case 1:
printf("ShMC Cross-connect (two-pair)\n");
break;
default:
printf("Unknown\n");
break;
}
} else if (d->type == FRU_PICMGEXT_LINK_TYPE_FABRIC_ETHERNET) {
switch (d->ext) {
case 0:
printf("1000Base-BX\n");
break;
case 1:
printf("10GBase-BX4 [XAUI]\n");
break;
case 2:
printf("FC-PI\n");
break;
case 3:
printf("1000Base-KX\n");
break;
case 4:
printf("10GBase-KX4\n");
break;
default:
printf("Unknown\n");
break;
}
} else if (d->type == FRU_PICMGEXT_LINK_TYPE_FABRIC_ETHERNET_10GBD) {
switch (d->ext) {
case 0:
printf("10GBase-KR\n");
break;
case 1:
printf("40GBase-KR4\n");
break;
default:
printf("Unknown\n");
break;
}
} else if (d->type == FRU_PICMGEXT_LINK_TYPE_FABRIC_INFINIBAND) {
printf("Unknown\n");
} else if (d->type == FRU_PICMGEXT_LINK_TYPE_FABRIC_STAR) {
printf("Unknown\n");
} else if (d->type == FRU_PICMGEXT_LINK_TYPE_PCIE) {
printf("Unknown\n");
} else {
printf("Unknown\n");
}
printf(" Link Type: 0x%02x - ",
d->type);
switch (d->type) {
case FRU_PICMGEXT_LINK_TYPE_BASE:
printf("PICMG 3.0 Base Interface 10/100/1000\n");
break;
case FRU_PICMGEXT_LINK_TYPE_FABRIC_ETHERNET:
printf("PICMG 3.1 Ethernet Fabric Interface\n");
printf(" Base signaling Link Class\n");
break;
case FRU_PICMGEXT_LINK_TYPE_FABRIC_INFINIBAND:
printf("PICMG 3.2 Infiniband Fabric Interface\n");
break;
case FRU_PICMGEXT_LINK_TYPE_FABRIC_STAR:
printf("PICMG 3.3 Star Fabric Interface\n");
break;
case FRU_PICMGEXT_LINK_TYPE_PCIE:
printf("PICMG 3.4 PCI Express Fabric Interface\n");
break;
case FRU_PICMGEXT_LINK_TYPE_FABRIC_ETHERNET_10GBD:
printf("PICMG 3.1 Ethernet Fabric Interface\n");
printf(" 10.3125Gbd signaling Link Class\n");
break;
default:
if (d->type == 0 || d->type == 0xff) {
printf("Reserved\n");
} else if (d->type >= 0x06 && d->type <= 0xef) {
printf("Reserved\n");
} else if (d->type >= 0xf0 && d->type <= 0xfe) {
printf("OEM GUID Definition\n");
} else {
printf("Invalid\n");
}
break;
}
printf(" Link Designator: \n");
printf(" Port Flag: 0x%02x\n",
d->desig_port);
printf(" Interface: 0x%02x - ",
d->desig_if);
switch (d->desig_if) {
case FRU_PICMGEXT_DESIGN_IF_BASE:
printf("Base Interface\n");
break;
case FRU_PICMGEXT_DESIGN_IF_FABRIC:
printf("Fabric Interface\n");
break;
case FRU_PICMGEXT_DESIGN_IF_UPDATE_CHANNEL:
printf("Update Channel\n");
break;
case FRU_PICMGEXT_DESIGN_IF_RESERVED:
printf("Reserved\n");
break;
default:
printf("Invalid");
break;
}
printf(" Channel Number: 0x%02x\n",
d->desig_channel);
printf("\n");
}
break;
case FRU_AMC_CURRENT:
{
unsigned char current;
printf(" FRU_AMC_CURRENT\n");
current = fru_data[offset];
printf(" Current draw(@12V): %.2f A [ %.2f Watt ]\n",
(float)current / 10.0f,
(float)current / 10.0f * 12.0f);
printf("\n");
}
break;
case FRU_AMC_ACTIVATION:
printf(" FRU_AMC_ACTIVATION\n");
{
uint16_t max_current;
max_current = fru_data[offset];
max_current |= fru_data[++offset]<<8;
printf(" Maximum Internal Current(@12V): %.2f A [ %.2f Watt ]\n",
(float)max_current / 10.0f,
(float)max_current / 10.0f * 12.0f);
printf(" Module Activation Readiness: %i sec.\n", fru_data[++offset]);
printf(" Descriptor Count: %i\n", fru_data[++offset]);
printf("\n");
for(++offset; offset < off + length;
offset += sizeof(struct fru_picmgext_activation_record))
{
struct fru_picmgext_activation_record *a;
a = (struct fru_picmgext_activation_record *)&fru_data[offset];
printf(" IPMB-Address: 0x%x\n",
a->ibmb_addr);
printf(" Max. Module Current: %.2f A\n",
(float)a->max_module_curr / 10.0f);
printf("\n");
}
}
break;
case FRU_AMC_CARRIER_P2P:
{
uint16_t index;
printf(" FRU_CARRIER_P2P\n");
for(; offset < off + length; ) {
struct fru_picmgext_carrier_p2p_record * h =
(struct fru_picmgext_carrier_p2p_record *)&fru_data[offset];
printf("\n");
printf(" Resource ID: %i",
(h->resource_id & 0x07));
printf(" Type: ");
if ((h->resource_id>>7) == 1) {
printf("AMC\n");
} else {
printf("Local\n");
}
printf(" Descriptor Count: %i\n",
h->p2p_count);
offset += sizeof(struct fru_picmgext_carrier_p2p_record);
for (index = 0; index < h->p2p_count; index++) {
/* to solve little endian /big endian problem */
unsigned char data[3];
struct fru_picmgext_carrier_p2p_descriptor * desc;
# ifndef WORDS_BIGENDIAN
data[0] = fru_data[offset+0];
data[1] = fru_data[offset+1];
data[2] = fru_data[offset+2];
# else
data[0] = fru_data[offset+2];
data[1] = fru_data[offset+1];
data[2] = fru_data[offset+0];
# endif
desc = (struct fru_picmgext_carrier_p2p_descriptor*)&data;
printf(" Port: %02d\t-> Remote Port: %02d\t",
desc->local_port, desc->remote_port);
if ((desc->remote_resource_id >> 7) == 1) {
printf("[ AMC ID: %02d ]\n",
desc->remote_resource_id & 0x0F);
} else {
printf("[ local ID: %02d ]\n",
desc->remote_resource_id & 0x0F);
}
offset += sizeof(struct fru_picmgext_carrier_p2p_descriptor);
}
}
}
break;
case FRU_AMC_P2P:
{
unsigned int index;
unsigned char channel_count;
struct fru_picmgext_amc_p2p_record * h;
printf(" FRU_AMC_P2P\n");
guid_count = fru_data[offset];
printf(" GUID count: %2d\n", guid_count);
for (i = 0 ; i < guid_count; i++) {
int j;
printf(" GUID %2d: ", i);
for (j=0; j < sizeof(struct fru_picmgext_guid);
j++) {
printf("%02x", fru_data[offset+j]);
offset += sizeof(struct fru_picmgext_guid);
printf("\n");
}
h = (struct fru_picmgext_amc_p2p_record *)&fru_data[++offset];
printf(" %s",
(h->record_type ?
"AMC Module:" : "On-Carrier Device"));
printf(" Resource ID: %i\n", h->resource_id);
offset += sizeof(struct fru_picmgext_amc_p2p_record);
channel_count = fru_data[offset++];
printf(" Descriptor Count: %i\n",
channel_count);
for (index = 0; index < channel_count; index++) {
unsigned int data;
struct fru_picmgext_amc_channel_desc_record *d;
/* pack the data in little endian format.
* Stupid intel...
*/
data = fru_data[offset] |
(fru_data[offset + 1] << 8) |
(fru_data[offset + 2] << 16);
d = (struct fru_picmgext_amc_channel_desc_record *)&data;
printf(" Lane 0 Port: %i\n",
d->lane0port);
printf(" Lane 1 Port: %i\n",
d->lane1port);
printf(" Lane 2 Port: %i\n",
d->lane2port);
printf(" Lane 3 Port: %i\n\n",
d->lane3port);
offset += FRU_PICMGEXT_AMC_CHANNEL_DESC_RECORD_SIZE;
}
for (; offset < off + length;) {
unsigned int data[2];
struct fru_picmgext_amc_link_desc_record *l;
l = (struct fru_picmgext_amc_link_desc_record *)&data[0];
data[0] = fru_data[offset] |
(fru_data[offset + 1] << 8) |
(fru_data[offset + 2] << 16) |
(fru_data[offset + 3] << 24);
data[1] = fru_data[offset + 4];
printf( " Link Designator: Channel ID: %i\n"
" Port Flag 0: %s%s%s%s\n",
l->channel_id,
(l->port_flag_0)?"o":"-",
(l->port_flag_1)?"o":"-",
(l->port_flag_2)?"o":"-",
(l->port_flag_3)?"o":"-" );
switch (l->type) {
case FRU_PICMGEXT_AMC_LINK_TYPE_PCIE:
/* AMC.1 */
printf( " Link Type: %02x - "
"AMC.1 PCI Express\n", l->type);
switch (l->type_ext) {
case AMC_LINK_TYPE_EXT_PCIE_G1_NSSC:
printf( " Link Type Ext: %i - "
" Gen 1 capable - non SSC\n",
l->type_ext);
break;
case AMC_LINK_TYPE_EXT_PCIE_G1_SSC:
printf( " Link Type Ext: %i - "
" Gen 1 capable - SSC\n",
l->type_ext);
break;
case AMC_LINK_TYPE_EXT_PCIE_G2_NSSC:
printf( " Link Type Ext: %i - "
" Gen 2 capable - non SSC\n",
l->type_ext);
break;
case AMC_LINK_TYPE_EXT_PCIE_G2_SSC:
printf( " Link Type Ext: %i - "
" Gen 2 capable - SSC\n",
l->type_ext);
break;
default:
printf( " Link Type Ext: %i - "
" Invalid\n",
l->type_ext);
break;
}
break;
case FRU_PICMGEXT_AMC_LINK_TYPE_PCIE_AS1:
case FRU_PICMGEXT_AMC_LINK_TYPE_PCIE_AS2:
/* AMC.1 */
printf( " Link Type: %02x - "
"AMC.1 PCI Express Advanced Switching\n",
l->type);
printf(" Link Type Ext: %i\n",
l->type_ext);
break;
case FRU_PICMGEXT_AMC_LINK_TYPE_ETHERNET:
/* AMC.2 */
printf( " Link Type: %02x - "
"AMC.2 Ethernet\n",
l->type);
switch (l->type_ext) {
case AMC_LINK_TYPE_EXT_ETH_1000_BX:
printf( " Link Type Ext: %i - "
" 1000Base-Bx (SerDES Gigabit) Ethernet Link\n",
l->type_ext);
break;
case AMC_LINK_TYPE_EXT_ETH_10G_XAUI:
printf( " Link Type Ext: %i - "
" 10Gbit XAUI Ethernet Link\n",
l->type_ext);
break;
default:
printf( " Link Type Ext: %i - "
" Invalid\n",
l->type_ext);
break;
}
break;
case FRU_PICMGEXT_AMC_LINK_TYPE_STORAGE:
/* AMC.3 */
printf( " Link Type: %02x - "
"AMC.3 Storage\n",
l->type);
switch (l->type_ext) {
case AMC_LINK_TYPE_EXT_STORAGE_FC:
printf( " Link Type Ext: %i - "
" Fibre Channel\n",
l->type_ext);
break;
case AMC_LINK_TYPE_EXT_STORAGE_SATA:
printf( " Link Type Ext: %i - "
" Serial ATA\n",
l->type_ext);
break;
case AMC_LINK_TYPE_EXT_STORAGE_SAS:
printf( " Link Type Ext: %i - "
" Serial Attached SCSI\n",
l->type_ext);
break;
default:
printf( " Link Type Ext: %i - "
" Invalid\n",
l->type_ext);
break;
}
break;
case FRU_PICMGEXT_AMC_LINK_TYPE_RAPIDIO:
/* AMC.4 */
printf( " Link Type: %02x - "
"AMC.4 Serial Rapid IO\n",
l->type);
printf(" Link Type Ext: %i\n",
l->type_ext);
break;
default:
printf( " Link Type: %02x - "
"reserved or OEM GUID",
l->type);
printf(" Link Type Ext: %i\n",
l->type_ext);
break;
}
printf(" Link group Id: %i\n",
l->group_id);
printf(" Link Asym Match: %i\n\n",
l->asym_match);
offset += FRU_PICMGEXT_AMC_LINK_DESC_RECORD_SIZE;
}
}
}
break;
case FRU_AMC_CARRIER_INFO:
{
unsigned char extVersion;
unsigned char siteCount;
printf(" FRU_CARRIER_INFO\n");
extVersion = fru_data[offset++];
siteCount = fru_data[offset++];
printf(" AMC.0 extension version: R%d.%d\n",
(extVersion >> 0)& 0x0F,
(extVersion >> 4)& 0x0F );
printf(" Carrier Sie Number Cnt: %d\n", siteCount);
for (i = 0 ; i < siteCount; i++ ){
printf(" Site ID: %i \n", fru_data[offset++]);
}
printf("\n");
}
break;
case FRU_PICMG_CLK_CARRIER_P2P:
{
unsigned char desc_count;
int i,j;
printf(" FRU_PICMG_CLK_CARRIER_P2P\n");
desc_count = fru_data[offset++];
for(i=0; i<desc_count; i++){
unsigned char resource_id;
unsigned char channel_count;
resource_id = fru_data[offset++];
channel_count = fru_data[offset++];
printf("\n");
printf(" Clock Resource ID: 0x%02x Type: ", resource_id);
if((resource_id & 0xC0)>>6 == 0) {printf("On-Carrier-Device\n");}
else if((resource_id & 0xC0)>>6 == 1) {printf("AMC slot\n");}
else if((resource_id & 0xC0)>>6 == 2) {printf("Backplane\n");}
else{ printf("reserved\n");}
printf(" Channel Count: 0x%02x\n", channel_count);
for(j=0; j<channel_count; j++){
unsigned char loc_channel, rem_channel, rem_resource;
loc_channel = fru_data[offset++];
rem_channel = fru_data[offset++];
rem_resource = fru_data[offset++];
printf(" CLK-ID: 0x%02x ->", loc_channel);
printf(" remote CLKID: 0x%02x ", rem_channel);
if((rem_resource & 0xC0)>>6 == 0) {printf("[ Carrier-Dev");}
else if((rem_resource & 0xC0)>>6 == 1) {printf("[ AMC slot ");}
else if((rem_resource & 0xC0)>>6 == 2) {printf("[ Backplane ");}
else{ printf("reserved ");}
printf(" 0x%02x ]\n", rem_resource&0xF);
}
}
printf("\n");
}
break;
case FRU_PICMG_CLK_CONFIG:
{
unsigned char resource_id, descr_count;
int i,j;
printf(" FRU_PICMG_CLK_CONFIG\n");
resource_id = fru_data[offset++];
descr_count = fru_data[offset++];
printf("\n");
printf(" Clock Resource ID: 0x%02x\n", resource_id);
printf(" Descr. Count: 0x%02x\n", descr_count);
for(i=0; i<descr_count; i++){
unsigned char channel_id, control;
unsigned char indirect_cnt, direct_cnt;
channel_id = fru_data[offset++];
control = fru_data[offset++];
printf(" CLK-ID: 0x%02x - ", channel_id);
printf("CTRL 0x%02x [ %12s ]\n",
control,
((control&0x1)==0)?"Carrier IPMC":"Application");
indirect_cnt = fru_data[offset++];
direct_cnt = fru_data[offset++];
printf(" Cnt: Indirect 0x%02x / Direct 0x%02x\n",
indirect_cnt,
direct_cnt);
/* indirect desc */
for(j=0; j<indirect_cnt; j++){
unsigned char feature;
unsigned char dep_chn_id;
feature = fru_data[offset++];
dep_chn_id = fru_data[offset++];
printf(" Feature: 0x%02x [%8s] - ", feature, (feature&0x1)==1?"Source":"Receiver");
printf(" Dep. CLK-ID: 0x%02x\n", dep_chn_id);
}
/* direct desc */
for(j=0; j<direct_cnt; j++){
unsigned char feature, family, accuracy;
unsigned int freq, min_freq, max_freq;
feature = fru_data[offset++];
family = fru_data[offset++];
accuracy = fru_data[offset++];
freq = (fru_data[offset+0] << 0 ) | (fru_data[offset+1] << 8 )
| (fru_data[offset+2] << 16) | (fru_data[offset+3] << 24);
offset += 4;
min_freq = (fru_data[offset+0] << 0 ) | (fru_data[offset+1] << 8 )
| (fru_data[offset+2] << 16) | (fru_data[offset+3] << 24);
offset += 4;
max_freq = (fru_data[offset+0] << 0 ) | (fru_data[offset+1] << 8 )
| (fru_data[offset+2] << 16) | (fru_data[offset+3] << 24);
offset += 4;
printf(" - Feature: 0x%02x - PLL: %x / Asym: %s\n",
feature,
(feature > 1) & 1,
(feature&1)?"Source":"Receiver");
printf(" Family: 0x%02x - AccLVL: 0x%02x\n", family, accuracy);
printf(" FRQ: %-9ld - min: %-9ld - max: %-9ld\n",
freq, min_freq, max_freq);
}
printf("\n");
}
printf("\n");
}
break;
case FRU_UTCA_FRU_INFO_TABLE:
case FRU_UTCA_CARRIER_MNG_IP:
case FRU_UTCA_CARRIER_INFO:
case FRU_UTCA_CARRIER_LOCATION:
case FRU_UTCA_SHMC_IP_LINK:
case FRU_UTCA_POWER_POLICY:
case FRU_UTCA_ACTIVATION:
case FRU_UTCA_PM_CAPABILTY:
case FRU_UTCA_FAN_GEOGRAPHY:
case FRU_UTCA_CLOCK_MAPPING:
case FRU_UTCA_MSG_BRIDGE_POLICY:
case FRU_UTCA_OEM_MODULE_DESC:
printf(" Not implemented yet. uTCA specific record found!!\n");
printf(" - Record ID: 0x%02x\n", h->record_id);
break;
default:
printf(" Unknown OEM Extension Record ID: %x\n", h->record_id);
break;
}
}
/* __ipmi_fru_print - Do actual work to print a FRU by its ID
*
* @intf: ipmi interface
* @id: fru id
*
* returns -1 on error
* returns 0 if successful
* returns 1 if device not present
*/
static int
__ipmi_fru_print(struct ipmi_intf * intf, uint8_t id)
{
struct ipmi_rs * rsp;
struct ipmi_rq req;
struct fru_info fru;
struct fru_header header;
uint8_t msg_data[4];
memset(&fru, 0, sizeof(struct fru_info));
memset(&header, 0, sizeof(struct fru_header));
/*
* get info about this FRU
*/
memset(msg_data, 0, 4);
msg_data[0] = id;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
printf(" Device not present (No Response)\n");
return -1;
}
if (rsp->ccode) {
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
return -1;
}
memset(&fru, 0, sizeof(fru));
fru.size = (rsp->data[1] << 8) | rsp->data[0];
fru.access = rsp->data[2] & 0x1;
lprintf(LOG_DEBUG, "fru.size = %d bytes (accessed by %s)",
fru.size, fru.access ? "words" : "bytes");
if (fru.size < 1) {
lprintf(LOG_ERR, " Invalid FRU size %d", fru.size);
return -1;
}
/*
* retrieve the FRU header
*/
msg_data[0] = id;
msg_data[1] = 0;
msg_data[2] = 0;
msg_data[3] = 8;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_DATA;
req.msg.data = msg_data;
req.msg.data_len = 4;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
printf(" Device not present (No Response)\n");
return 1;
}
if (rsp->ccode) {
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
return 1;
}
if (verbose > 1)
printbuf(rsp->data, rsp->data_len, "FRU DATA");
memcpy(&header, rsp->data + 1, 8);
if (header.version != 1) {
lprintf(LOG_ERR, " Unknown FRU header version 0x%02x",
header.version);
return -1;
}
/* offsets need converted to bytes
* but that conversion is not done to the structure
* because we may end up with offset > 255
* which would overflow our 1-byte offset field */
lprintf(LOG_DEBUG, "fru.header.version: 0x%x",
header.version);
lprintf(LOG_DEBUG, "fru.header.offset.internal: 0x%x",
header.offset.internal * 8);
lprintf(LOG_DEBUG, "fru.header.offset.chassis: 0x%x",
header.offset.chassis * 8);
lprintf(LOG_DEBUG, "fru.header.offset.board: 0x%x",
header.offset.board * 8);
lprintf(LOG_DEBUG, "fru.header.offset.product: 0x%x",
header.offset.product * 8);
lprintf(LOG_DEBUG, "fru.header.offset.multi: 0x%x",
header.offset.multi * 8);
/*
* rather than reading the entire part
* only read the areas we'll format
*/
/* chassis area */
if ((header.offset.chassis*8) >= sizeof(struct fru_header))
fru_area_print_chassis(intf, &fru, id, header.offset.chassis*8);
/* board area */
if ((header.offset.board*8) >= sizeof(struct fru_header))
fru_area_print_board(intf, &fru, id, header.offset.board*8);
/* product area */
if ((header.offset.product*8) >= sizeof(struct fru_header))
fru_area_print_product(intf, &fru, id, header.offset.product*8);
/* multirecord area */
if( verbose==0 ) /* scipp parsing multirecord */
return 0;
if ((header.offset.multi*8) >= sizeof(struct fru_header))
fru_area_print_multirec(intf, &fru, id, header.offset.multi*8);
return 0;
}
/* ipmi_fru_print - Print a FRU from its SDR locator record
*
* @intf: ipmi interface
* @fru: SDR FRU Locator Record
*
* returns -1 on error
*/
int
ipmi_fru_print(struct ipmi_intf * intf, struct sdr_record_fru_locator * fru)
{
char desc[17];
uint8_t bridged_request = 0;
uint32_t save_addr;
uint32_t save_channel;
int rc = 0;
if (!fru)
return __ipmi_fru_print(intf, 0);
/* Logical FRU Device
* dev_type == 0x10
* modifier
* 0x00 = IPMI FRU Inventory
* 0x01 = DIMM Memory ID
* 0x02 = IPMI FRU Inventory
* 0x03 = System Processor FRU
* 0xff = unspecified
*
* EEPROM 24C01 or equivalent
* dev_type >= 0x08 && dev_type <= 0x0f
* modifier
* 0x00 = unspecified
* 0x01 = DIMM Memory ID
* 0x02 = IPMI FRU Inventory
* 0x03 = System Processor Cartridge
*/
if (fru->dev_type != 0x10 &&
(fru->dev_type_modifier != 0x02 ||
fru->dev_type < 0x08 || fru->dev_type > 0x0f))
return -1;
if (fru->dev_slave_addr == IPMI_BMC_SLAVE_ADDR &&
fru->device_id == 0)
return 0;
memset(desc, 0, sizeof(desc));
memcpy(desc, fru->id_string, fru->id_code & 0x01f);
desc[fru->id_code & 0x01f] = 0;
printf("FRU Device Description : %s (ID %d)\n", desc, fru->device_id);
switch (fru->dev_type_modifier) {
case 0x00:
case 0x02:
if (BRIDGE_TO_SENSOR(intf, fru->dev_slave_addr,
fru->channel_num)) {
bridged_request = 1;
save_addr = intf->target_addr;
intf->target_addr = fru->dev_slave_addr;
save_channel = intf->target_channel;
intf->target_channel = fru->channel_num;
}
/* print FRU */
rc = __ipmi_fru_print(intf, fru->device_id);
if (bridged_request) {
intf->target_addr = save_addr;
intf->target_channel = save_channel;
}
break;
case 0x01:
rc = ipmi_spd_print_fru(intf, fru->device_id);
break;
default:
if (verbose)
printf(" Unsupported device 0x%02x "
"type 0x%02x with modifier 0x%02x\n",
fru->device_id, fru->dev_type,
fru->dev_type_modifier);
else
printf(" Unsupported device\n");
}
printf("\n");
return rc;
}
/* ipmi_fru_print_all - Print builtin FRU + SDR FRU Locator records
*
* @intf: ipmi interface
*
* returns -1 on error
*/
static int
ipmi_fru_print_all(struct ipmi_intf * intf)
{
struct ipmi_sdr_iterator * itr;
struct sdr_get_rs * header;
struct sdr_record_fru_locator * fru;
int rc;
struct ipmi_rs * rsp;
struct ipmi_rq req;
struct ipm_devid_rsp *devid;
struct sdr_record_mc_locator * mc;
uint32_t save_addr;
printf("FRU Device Description : Builtin FRU Device (ID 0)\n");
/* TODO: Figure out if FRU device 0 may show up in SDR records. */
/* Do a Get Device ID command to determine device support */
memset (&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_APP;
req.msg.cmd = BMC_GET_DEVICE_ID;
req.msg.data_len = 0;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
lprintf(LOG_ERR, "Get Device ID command failed");
return -1;
}
if (rsp->ccode) {
lprintf(LOG_ERR, "Get Device ID command failed: %s",
val2str(rsp->ccode, completion_code_vals));
return -1;
}
devid = (struct ipm_devid_rsp *) rsp->data;
/* Check the FRU inventory device bit to decide whether various */
/* FRU commands can be issued to FRU device #0 LUN 0 */
if (devid->adtl_device_support & 0x08) { /* FRU Inventory Device bit? */
rc = ipmi_fru_print(intf, NULL);
printf("\n");
}
itr = ipmi_sdr_start(intf, 0);
if (!itr)
return -1;
/* Walk the SDRs looking for FRU Devices and Management Controller Devices. */
/* For FRU devices, print the FRU from the SDR locator record. */
/* For MC devices, issue FRU commands to the satellite controller to print */
/* FRU data. */
while ((header = ipmi_sdr_get_next_header(intf, itr)))
{
if (header->type == SDR_RECORD_TYPE_MC_DEVICE_LOCATOR ) {
/* Check the capabilities of the Management Controller Device */
mc = (struct sdr_record_mc_locator *)
ipmi_sdr_get_record(intf, header, itr);
/* Does this MC device support FRU inventory device? */
if (mc && (mc->dev_support & 0x08) && /* FRU inventory device? */
intf->target_addr != mc->dev_slave_addr) {
/* Yes. Prepare to issue FRU commands to FRU device #0 LUN 0 */
/* using the slave address specified in the MC record. */
/* save current target address */
save_addr = intf->target_addr;
/* set new target address to satellite controller */
intf->target_addr = mc->dev_slave_addr;
printf("FRU Device Description : %-16s\n", mc->id_string);
/* print the FRU by issuing FRU commands to the satellite */
/* controller. */
rc = __ipmi_fru_print(intf, 0);
printf("\n");
/* restore previous target */
intf->target_addr = save_addr;
}
free_n(&mc);
continue;
}
if (header->type != SDR_RECORD_TYPE_FRU_DEVICE_LOCATOR)
continue;
/* Print the FRU from the SDR locator record. */
fru = (struct sdr_record_fru_locator *)
ipmi_sdr_get_record(intf, header, itr);
if (!fru || !fru->logical) {
free_n(&fru);
continue;
}
rc = ipmi_fru_print(intf, fru);
free_n(&fru);
}
ipmi_sdr_end(itr);
return rc;
}
/* ipmi_fru_read_help() - print help text for 'read'
*
* returns void
*/
void
ipmi_fru_read_help()
{
lprintf(LOG_NOTICE, "fru read <fru id> <fru file>");
lprintf(LOG_NOTICE, "Note: FRU ID and file(incl. full path) must be specified.");
lprintf(LOG_NOTICE, "Example: ipmitool fru read 0 /root/fru.bin");
} /* ipmi_fru_read_help() */
static void
ipmi_fru_read_to_bin(struct ipmi_intf * intf,
char * pFileName,
uint8_t fruId)
{
struct ipmi_rs * rsp;
struct ipmi_rq req;
struct fru_info fru;
uint8_t msg_data[4];
uint8_t * pFruBuf;
msg_data[0] = fruId;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp)
return;
if (rsp->ccode) {
if (rsp->ccode == IPMI_CC_TIMEOUT)
printf (" Timeout accessing FRU info. (Device not present?)\n");
return;
}
memset(&fru, 0, sizeof(fru));
fru.size = (rsp->data[1] << 8) | rsp->data[0];
fru.access = rsp->data[2] & 0x1;
if (verbose) {
printf("Fru Size = %d bytes\n",fru.size);
printf("Fru Access = %xh\n", fru.access);
}
pFruBuf = malloc(fru.size);
if (pFruBuf) {
printf("Fru Size : %d bytes\n",fru.size);
read_fru_area(intf, &fru, fruId, 0, fru.size, pFruBuf);
} else {
lprintf(LOG_ERR, "Cannot allocate %d bytes\n", fru.size);
return;
}
if(pFruBuf)
{
FILE * pFile;
pFile = fopen(pFileName,"wb");
if (pFile) {
fwrite(pFruBuf, fru.size, 1, pFile);
printf("Done\n");
} else {
lprintf(LOG_ERR, "Error opening file %s\n", pFileName);
free_n(&pFruBuf);
return;
}
fclose(pFile);
}
free_n(&pFruBuf);
}
static void
ipmi_fru_write_from_bin(struct ipmi_intf * intf,
char * pFileName,
uint8_t fruId)
{
struct ipmi_rs *rsp;
struct ipmi_rq req;
struct fru_info fru;
uint8_t msg_data[4];
uint8_t *pFruBuf;
uint16_t len = 0;
FILE *pFile;
msg_data[0] = fruId;
memset(&req, 0, sizeof (req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp)
return;
if (rsp->ccode) {
if (rsp->ccode == IPMI_CC_TIMEOUT)
printf(" Timeout accessing FRU info. (Device not present?)\n");
return;
}
memset(&fru, 0, sizeof(fru));
fru.size = (rsp->data[1] << 8) | rsp->data[0];
fru.access = rsp->data[2] & 0x1;
if (verbose) {
printf("Fru Size = %d bytes\n", fru.size);
printf("Fru Access = %xh\n", fru.access);
}
pFruBuf = malloc(fru.size);
if (!pFruBuf) {
lprintf(LOG_ERR, "Cannot allocate %d bytes\n", fru.size);
return;
}
pFile = fopen(pFileName, "rb");
if (pFile) {
len = fread(pFruBuf, 1, fru.size, pFile);
printf("Fru Size : %d bytes\n", fru.size);
printf("Size to Write : %d bytes\n", len);
fclose(pFile);
} else {
lprintf(LOG_ERR, "Error opening file %s\n", pFileName);
}
if (len != 0) {
write_fru_area(intf, &fru, fruId,0, 0, len, pFruBuf);
lprintf(LOG_INFO,"Done");
}
free_n(&pFruBuf);
}
/* ipmi_fru_write_help() - print help text for 'write'
*
* returns void
*/
void
ipmi_fru_write_help()
{
lprintf(LOG_NOTICE, "fru write <fru id> <fru file>");
lprintf(LOG_NOTICE, "Note: FRU ID and file(incl. full path) must be specified.");
lprintf(LOG_NOTICE, "Example: ipmitool fru write 0 /root/fru.bin");
} /* ipmi_fru_write_help() */
/* ipmi_fru_edit_help - print help text for 'fru edit' command
*
* returns void
*/
void
ipmi_fru_edit_help()
{
lprintf(LOG_NOTICE,
"fru edit <fruid> field <section> <index> <string> - edit FRU string");
lprintf(LOG_NOTICE,
"fru edit <fruid> oem iana <record> <format> <args> - limited OEM support");
} /* ipmi_fru_edit_help() */
/* ipmi_fru_edit_multirec - Query new values to replace original FRU content
*
* @intf: interface to use
* @id: FRU id to work on
*
* returns: nothing
*/
/* Work in progress, copy paste most of the stuff for other functions in this
file ... not elegant yet */
static int
ipmi_fru_edit_multirec(struct ipmi_intf * intf, uint8_t id ,
int argc, char ** argv)
{
struct ipmi_rs * rsp;
struct ipmi_rq req;
struct fru_info fru;
struct fru_header header;
uint8_t msg_data[4];
uint16_t retStatus = 0;
uint32_t offFruMultiRec;
uint32_t fruMultiRecSize = 0;
struct fru_info fruInfo;
retStatus = ipmi_fru_get_multirec_location_from_fru(intf, id, &fruInfo,
&offFruMultiRec,
&fruMultiRecSize);
if (retStatus != 0) {
return retStatus;
}
lprintf(LOG_DEBUG, "FRU Size : %lu\n", fruMultiRecSize);
lprintf(LOG_DEBUG, "Multi Rec offset: %lu\n", offFruMultiRec);
{
memset(&fru, 0, sizeof(struct fru_info));
memset(&header, 0, sizeof(struct fru_header));
/*
* get info about this FRU
*/
memset(msg_data, 0, 4);
msg_data[0] = id;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
printf(" Device not present (No Response)\n");
return -1;
}
if (rsp->ccode) {
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
return -1;
}
memset(&fru, 0, sizeof(fru));
fru.size = (rsp->data[1] << 8) | rsp->data[0];
fru.access = rsp->data[2] & 0x1;
lprintf(LOG_DEBUG, "fru.size = %d bytes (accessed by %s)",
fru.size, fru.access ? "words" : "bytes");
if (fru.size < 1) {
lprintf(LOG_ERR, " Invalid FRU size %d", fru.size);
return -1;
}
}
{
uint8_t * fru_data;
uint32_t i;
uint32_t offset= offFruMultiRec;
struct fru_multirec_header * h;
uint32_t last_off, len;
uint8_t error=0;
i = last_off = offset;
memset(&fru, 0, sizeof(fru));
fru_data = malloc(fru.size + 1);
if (!fru_data) {
lprintf(LOG_ERR, " Out of memory!");
return -1;
}
memset(fru_data, 0, fru.size + 1);
do {
h = (struct fru_multirec_header *) (fru_data + i);
/* read area in (at most) FRU_MULTIREC_CHUNK_SIZE bytes at a time */
if ((last_off < (i + sizeof(*h))) || (last_off < (i + h->len)))
{
len = fru.size - last_off;
if (len > FRU_MULTIREC_CHUNK_SIZE)
len = FRU_MULTIREC_CHUNK_SIZE;
if (read_fru_area(intf, &fru, id, last_off, len, fru_data) < 0)
break;
last_off += len;
}
if( h->type == FRU_RECORD_TYPE_OEM_EXTENSION ){
struct fru_multirec_oem_header *oh=(struct fru_multirec_oem_header *)
&fru_data[i + sizeof(struct fru_multirec_header)];
uint32_t iana = oh->mfg_id[0] | oh->mfg_id[1]<<8 | oh->mfg_id[2]<<16;
uint32_t suppliedIana = 0 ;
/* Now makes sure this is really PICMG record */
/* Default to PICMG for backward compatibility */
if( argc <=2 ) {
suppliedIana = IPMI_OEM_PICMG;
} else {
if( !strncmp( argv[2] , "oem" , 3 )) {
/* Expect IANA number next */
if( argc <= 3 ) {
lprintf(LOG_ERR, "oem iana <record> <format> [<args>]");
error = 1;
} else {
if (str2uint(argv[3], &suppliedIana) == 0) {
lprintf(LOG_DEBUG,
"using iana: %d",
suppliedIana);
} else {
lprintf(LOG_ERR,
"Given IANA '%s' is invalid.",
argv[3]);
error = 1;
}
}
}
}
if( suppliedIana == iana ) {
lprintf(LOG_DEBUG, "Matching record found" );
if( iana == IPMI_OEM_PICMG ){
if( ipmi_fru_picmg_ext_edit(fru_data,
i + sizeof(struct fru_multirec_header),
h->len, h, oh )){
/* The fru changed */
write_fru_area(intf,&fru,id, i,i,
h->len+ sizeof(struct fru_multirec_header), fru_data);
}
}
else if( iana == IPMI_OEM_KONTRON ) {
if( ipmi_fru_oemkontron_edit( argc,argv,fru_data,
i + sizeof(struct fru_multirec_header),
h->len, h, oh )){
/* The fru changed */
write_fru_area(intf,&fru,id, i,i,
h->len+ sizeof(struct fru_multirec_header), fru_data);
}
}
/* FIXME: Add OEM record support here */
else{
printf(" OEM IANA (%s) Record not support in this mode\n",
val2str( iana, ipmi_oem_info));
error = 1;
}
}
}
i += h->len + sizeof (struct fru_multirec_header);
} while (!(h->format & 0x80) && (error != 1));
free_n(&fru_data);
}
return 0;
}
/* ipmi_fru_get_help - print help text for 'fru get'
*
* returns void
*/
void
ipmi_fru_get_help()
{
lprintf(LOG_NOTICE,
"fru get <fruid> oem iana <record> <format> <args> - limited OEM support");
} /* ipmi_fru_get_help() */
void
ipmi_fru_internaluse_help()
{
lprintf(LOG_NOTICE,
"fru internaluse <fru id> info - get internal use area size");
lprintf(LOG_NOTICE,
"fru internaluse <fru id> print - print internal use area in hex");
lprintf(LOG_NOTICE,
"fru internaluse <fru id> read <fru file> - read internal use area to file");
lprintf(LOG_NOTICE,
"fru internaluse <fru id> write <fru file> - write internal use area from file");
} /* void ipmi_fru_internaluse_help() */
/* ipmi_fru_get_multirec - Query new values to replace original FRU content
*
* @intf: interface to use
* @id: FRU id to work on
*
* returns: nothing
*/
/* Work in progress, copy paste most of the stuff for other functions in this
file ... not elegant yet */
static int
ipmi_fru_get_multirec(struct ipmi_intf * intf, uint8_t id ,
int argc, char ** argv)
{
struct ipmi_rs * rsp;
struct ipmi_rq req;
struct fru_info fru;
struct fru_header header;
uint8_t msg_data[4];
uint16_t retStatus = 0;
uint32_t offFruMultiRec;
uint32_t fruMultiRecSize = 0;
struct fru_info fruInfo;
retStatus = ipmi_fru_get_multirec_location_from_fru(intf, id, &fruInfo,
&offFruMultiRec,
&fruMultiRecSize);
if (retStatus != 0) {
return retStatus;
}
lprintf(LOG_DEBUG, "FRU Size : %lu\n", fruMultiRecSize);
lprintf(LOG_DEBUG, "Multi Rec offset: %lu\n", offFruMultiRec);
{
memset(&fru, 0, sizeof(struct fru_info));
memset(&header, 0, sizeof(struct fru_header));
/*
* get info about this FRU
*/
memset(msg_data, 0, 4);
msg_data[0] = id;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
printf(" Device not present (No Response)\n");
return -1;
}
if (rsp->ccode) {
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
return -1;
}
memset(&fru, 0, sizeof(fru));
fru.size = (rsp->data[1] << 8) | rsp->data[0];
fru.access = rsp->data[2] & 0x1;
lprintf(LOG_DEBUG, "fru.size = %d bytes (accessed by %s)",
fru.size, fru.access ? "words" : "bytes");
if (fru.size < 1) {
lprintf(LOG_ERR, " Invalid FRU size %d", fru.size);
return -1;
}
}
{
uint8_t * fru_data;
uint32_t i;
uint32_t offset= offFruMultiRec;
struct fru_multirec_header * h;
uint32_t last_off, len;
uint8_t error=0;
i = last_off = offset;
fru_data = malloc(fru.size + 1);
if (!fru_data) {
lprintf(LOG_ERR, " Out of memory!");
return -1;
}
memset(fru_data, 0, fru.size + 1);
do {
h = (struct fru_multirec_header *) (fru_data + i);
/* read area in (at most) FRU_MULTIREC_CHUNK_SIZE bytes at a time */
if ((last_off < (i + sizeof(*h))) || (last_off < (i + h->len)))
{
len = fru.size - last_off;
if (len > FRU_MULTIREC_CHUNK_SIZE)
len = FRU_MULTIREC_CHUNK_SIZE;
if (read_fru_area(intf, &fru, id, last_off, len, fru_data) < 0)
break;
last_off += len;
}
if( h->type == FRU_RECORD_TYPE_OEM_EXTENSION ){
struct fru_multirec_oem_header *oh=(struct fru_multirec_oem_header *)
&fru_data[i + sizeof(struct fru_multirec_header)];
uint32_t iana = oh->mfg_id[0] | oh->mfg_id[1]<<8 | oh->mfg_id[2]<<16;
uint32_t suppliedIana = 0 ;
/* Now makes sure this is really PICMG record */
if( !strncmp( argv[2] , "oem" , 3 )) {
/* Expect IANA number next */
if( argc <= 3 ) {
lprintf(LOG_ERR, "oem iana <record> <format>");
error = 1;
} else {
if (str2uint(argv[3], &suppliedIana) == 0) {
lprintf(LOG_DEBUG,
"using iana: %d",
suppliedIana);
} else {
lprintf(LOG_ERR,
"Given IANA '%s' is invalid.",
argv[3]);
error = 1;
}
}
}
if( suppliedIana == iana ) {
lprintf(LOG_DEBUG, "Matching record found" );
if( iana == IPMI_OEM_KONTRON ) {
ipmi_fru_oemkontron_get(argc, argv, fru_data,
i + sizeof(struct fru_multirec_header),
oh);
}
/* FIXME: Add OEM record support here */
else{
printf(" OEM IANA (%s) Record not supported in this mode\n",
val2str( iana, ipmi_oem_info));
error = 1;
}
}
}
i += h->len + sizeof (struct fru_multirec_header);
} while (!(h->format & 0x80) && (error != 1));
free_n(&fru_data);
}
return 0;
}
#define ERR_EXIT do { rc = -1; goto exit; } while(0)
static
int
ipmi_fru_upg_ekeying(struct ipmi_intf *intf, char *pFileName, uint8_t fruId)
{
struct fru_info fruInfo = {0};
uint8_t *buf = NULL;
uint32_t offFruMultiRec = 0;
uint32_t fruMultiRecSize = 0;
uint32_t offFileMultiRec = 0;
uint32_t fileMultiRecSize = 0;
int rc = 0;
if (!pFileName) {
lprintf(LOG_ERR, "File expected, but none given.");
ERR_EXIT;
}
if (ipmi_fru_get_multirec_location_from_fru(intf, fruId, &fruInfo,
&offFruMultiRec, &fruMultiRecSize) != 0) {
lprintf(LOG_ERR, "Failed to get multirec location from FRU.");
ERR_EXIT;
}
lprintf(LOG_DEBUG, "FRU Size : %lu\n", fruMultiRecSize);
lprintf(LOG_DEBUG, "Multi Rec offset: %lu\n", offFruMultiRec);
if (ipmi_fru_get_multirec_size_from_file(pFileName, &fileMultiRecSize,
&offFileMultiRec) != 0) {
lprintf(LOG_ERR, "Failed to get multirec size from file '%s'.", pFileName);
ERR_EXIT;
}
buf = malloc(fileMultiRecSize);
if (!buf) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
ERR_EXIT;
}
if (ipmi_fru_get_multirec_from_file(pFileName, buf, fileMultiRecSize,
offFileMultiRec) != 0) {
lprintf(LOG_ERR, "Failed to get multirec from file '%s'.", pFileName);
ERR_EXIT;
}
if (ipmi_fru_get_adjust_size_from_buffer(buf, &fileMultiRecSize) != 0) {
lprintf(LOG_ERR, "Failed to adjust size from buffer.");
ERR_EXIT;
}
if (write_fru_area(intf, &fruInfo, fruId, 0, offFruMultiRec,
fileMultiRecSize, buf) != 0) {
lprintf(LOG_ERR, "Failed to write FRU area.");
ERR_EXIT;
}
lprintf(LOG_INFO, "Done upgrading Ekey.");
exit:
free_n(&buf);
return rc;
}
/* ipmi_fru_upgekey_help - print help text for 'upgEkey'
*
* returns void
*/
void
ipmi_fru_upgekey_help()
{
lprintf(LOG_NOTICE, "fru upgEkey <fru id> <fru file>");
lprintf(LOG_NOTICE, "Note: FRU ID and file(incl. full path) must be specified.");
lprintf(LOG_NOTICE, "Example: ipmitool fru upgEkey 0 /root/fru.bin");
} /* ipmi_fru_upgekey_help() */
static int
ipmi_fru_get_multirec_size_from_file(char * pFileName,
uint32_t * pSize,
uint32_t * pOffset)
{
struct fru_header header;
FILE * pFile;
uint8_t len = 0;
uint32_t end = 0;
*pSize = 0;
pFile = fopen(pFileName,"rb");
if (pFile) {
rewind(pFile);
len = fread(&header, 1, 8, pFile);
fseek(pFile, 0, SEEK_END);
end = ftell(pFile);
fclose(pFile);
}
lprintf(LOG_DEBUG, "File Size = %lu\n", end);
lprintf(LOG_DEBUG, "Len = %u\n", len);
if (len != 8) {
printf("Error with file %s in getting size\n", pFileName);
return -1;
}
if (header.version != 0x01) {
printf ("Unknown FRU header version %02x.\n", header.version);
return -1;
}
/* Retrieve length */
if (((header.offset.internal * 8) > (header.offset.internal * 8)) &&
((header.offset.internal * 8) < end))
end = (header.offset.internal * 8);
if (((header.offset.chassis * 8) > (header.offset.chassis * 8)) &&
((header.offset.chassis * 8) < end))
end = (header.offset.chassis * 8);
if (((header.offset.board * 8) > (header.offset.board * 8)) &&
((header.offset.board * 8) < end))
end = (header.offset.board * 8);
if (((header.offset.product * 8) > (header.offset.product * 8)) &&
((header.offset.product * 8) < end))
end = (header.offset.product * 8);
*pSize = end - (header.offset.multi * 8);
*pOffset = (header.offset.multi * 8);
return 0;
}
int
ipmi_fru_get_adjust_size_from_buffer(uint8_t * fru_data, uint32_t *pSize)
{
struct fru_multirec_header * head;
int status = 0;
uint8_t checksum = 0;
uint8_t counter = 0;
uint16_t count = 0;
do {
checksum = 0;
head = (struct fru_multirec_header *) (fru_data + count);
if (verbose) {
printf("Adding (");
}
for (counter = 0; counter < sizeof(struct fru_multirec_header); counter++) {
if (verbose) {
printf(" %02X", *(fru_data + count + counter));
}
checksum += *(fru_data + count + counter);
}
if (verbose) {
printf(")");
}
if (checksum != 0) {
lprintf(LOG_ERR, "Bad checksum in Multi Records");
status = -1;
if (verbose) {
printf("--> FAIL");
}
} else if (verbose) {
printf("--> OK");
}
if (verbose > 1 && checksum == 0) {
for (counter = 0; counter < head->len; counter++) {
printf(" %02X", *(fru_data + count + counter
+ sizeof(struct fru_multirec_header)));
}
}
if (verbose) {
printf("\n");
}
count += head->len + sizeof (struct fru_multirec_header);
} while ((!(head->format & 0x80)) && (status == 0));
*pSize = count;
lprintf(LOG_DEBUG, "Size of multirec: %lu\n", *pSize);
return status;
}
static int
ipmi_fru_get_multirec_from_file(char * pFileName, uint8_t * pBufArea,
uint32_t size, uint32_t offset)
{
FILE * pFile;
uint32_t len = 0;
if (!pFileName) {
lprintf(LOG_ERR, "Invalid file name given.");
return -1;
}
errno = 0;
pFile = fopen(pFileName, "rb");
if (!pFile) {
lprintf(LOG_ERR, "Error opening file '%s': %i -> %s.", pFileName, errno,
strerror(errno));
return -1;
}
errno = 0;
if (fseek(pFile, offset, SEEK_SET) != 0) {
lprintf(LOG_ERR, "Failed to seek in file '%s': %i -> %s.", pFileName, errno,
strerror(errno));
fclose(pFile);
return -1;
}
len = fread(pBufArea, size, 1, pFile);
fclose(pFile);
if (len != 1) {
lprintf(LOG_ERR, "Error in file '%s'.", pFileName);
return -1;
}
return 0;
}
static int
ipmi_fru_get_multirec_location_from_fru(struct ipmi_intf * intf,
uint8_t fruId,
struct fru_info *pFruInfo,
uint32_t * pRetLocation,
uint32_t * pRetSize)
{
struct ipmi_rs * rsp;
struct ipmi_rq req;
uint8_t msg_data[4];
uint32_t end;
struct fru_header header;
*pRetLocation = 0;
msg_data[0] = fruId;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
if (verbose > 1)
printf("no response\n");
return -1;
}
if (rsp->ccode) {
if (rsp->ccode == IPMI_CC_TIMEOUT)
printf (" Timeout accessing FRU info. (Device not present?)\n");
else
printf (" CCODE = 0x%02x\n", rsp->ccode);
return -1;
}
pFruInfo->size = (rsp->data[1] << 8) | rsp->data[0];
pFruInfo->access = rsp->data[2] & 0x1;
if (verbose > 1)
printf("pFruInfo->size = %d bytes (accessed by %s)\n",
pFruInfo->size, pFruInfo->access ? "words" : "bytes");
if (!pFruInfo->size)
return -1;
msg_data[0] = fruId;
msg_data[1] = 0;
msg_data[2] = 0;
msg_data[3] = 8;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_DATA;
req.msg.data = msg_data;
req.msg.data_len = 4;
rsp = intf->sendrecv(intf, &req);
if (!rsp)
return -1;
if (rsp->ccode) {
if (rsp->ccode == IPMI_CC_TIMEOUT)
printf (" Timeout while reading FRU data. (Device not present?)\n");
return -1;
}
if (verbose > 1)
printbuf(rsp->data, rsp->data_len, "FRU DATA");
memcpy(&header, rsp->data + 1, 8);
if (header.version != 0x01) {
printf (" Unknown FRU header version %02x.\n", header.version);
return -1;
}
end = pFruInfo->size;
/* Retrieve length */
if (((header.offset.internal * 8) > (header.offset.internal * 8)) &&
((header.offset.internal * 8) < end))
end = (header.offset.internal * 8);
if (((header.offset.chassis * 8) > (header.offset.chassis * 8)) &&
((header.offset.chassis * 8) < end))
end = (header.offset.chassis * 8);
if (((header.offset.board * 8) > (header.offset.board * 8)) &&
((header.offset.board * 8) < end))
end = (header.offset.board * 8);
if (((header.offset.product * 8) > (header.offset.product * 8)) &&
((header.offset.product * 8) < end))
end = (header.offset.product * 8);
*pRetSize = end;
*pRetLocation = 8 * header.offset.multi;
return 0;
}
/* ipmi_fru_get_internal_use_offset - Retrieve internal use offset
*
* @intf: ipmi interface
* @id: fru id
*
* returns -1 on error
* returns 0 if successful
* returns 1 if device not present
*/
static int
ipmi_fru_get_internal_use_info( struct ipmi_intf * intf,
uint8_t id,
struct fru_info * fru,
uint16_t * size,
uint16_t * offset)
{
struct ipmi_rs * rsp;
struct ipmi_rq req;
struct fru_header header;
uint8_t msg_data[4];
// Init output value
* offset = 0;
* size = 0;
memset(fru, 0, sizeof(struct fru_info));
memset(&header, 0, sizeof(struct fru_header));
/*
* get info about this FRU
*/
memset(msg_data, 0, 4);
msg_data[0] = id;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
printf(" Device not present (No Response)\n");
return -1;
}
if (rsp->ccode) {
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
return -1;
}
fru->size = (rsp->data[1] << 8) | rsp->data[0];
fru->access = rsp->data[2] & 0x1;
lprintf(LOG_DEBUG, "fru.size = %d bytes (accessed by %s)",
fru->size, fru->access ? "words" : "bytes");
if (fru->size < 1) {
lprintf(LOG_ERR, " Invalid FRU size %d", fru->size);
return -1;
}
/*
* retrieve the FRU header
*/
msg_data[0] = id;
msg_data[1] = 0;
msg_data[2] = 0;
msg_data[3] = 8;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_DATA;
req.msg.data = msg_data;
req.msg.data_len = 4;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
printf(" Device not present (No Response)\n");
return 1;
}
if (rsp->ccode) {
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
return 1;
}
if (verbose > 1)
printbuf(rsp->data, rsp->data_len, "FRU DATA");
memcpy(&header, rsp->data + 1, 8);
if (header.version != 1) {
lprintf(LOG_ERR, " Unknown FRU header version 0x%02x",
header.version);
return -1;
}
lprintf(LOG_DEBUG, "fru.header.version: 0x%x",
header.version);
lprintf(LOG_DEBUG, "fru.header.offset.internal: 0x%x",
header.offset.internal * 8);
lprintf(LOG_DEBUG, "fru.header.offset.chassis: 0x%x",
header.offset.chassis * 8);
lprintf(LOG_DEBUG, "fru.header.offset.board: 0x%x",
header.offset.board * 8);
lprintf(LOG_DEBUG, "fru.header.offset.product: 0x%x",
header.offset.product * 8);
lprintf(LOG_DEBUG, "fru.header.offset.multi: 0x%x",
header.offset.multi * 8);
if((header.offset.internal*8) == 0)
{
* size = 0;
* offset = 0;
}
else
{
(* offset) = (header.offset.internal*8);
if(header.offset.chassis != 0)
{
(* size) = ((header.offset.chassis*8)-(* offset));
}
else if(header.offset.board != 0)
{
(* size) = ((header.offset.board*8)-(* offset));
}
else if(header.offset.product != 0)
{
(* size) = ((header.offset.product*8)-(* offset));
}
else if(header.offset.multi != 0)
{
(* size) = ((header.offset.multi*8)-(* offset));
}
else
{
(* size) = (fru->size - (* offset));
}
}
return 0;
}
/* ipmi_fru_info_internal_use - print internal use info
*
* @intf: ipmi interface
* @id: fru id
*
* returns -1 on error
* returns 0 if successful
* returns 1 if device not present
*/
static int
ipmi_fru_info_internal_use(struct ipmi_intf * intf, uint8_t id)
{
struct fru_info fru;
uint16_t size;
uint16_t offset;
int rc = 0;
rc = ipmi_fru_get_internal_use_info(intf, id, &fru, &size, &offset);
if(rc == 0)
{
lprintf(LOG_DEBUG, "Internal Use Area Offset: %i", offset);
printf( "Internal Use Area Size : %i\n", size);
}
else
{
lprintf(LOG_ERR, "Cannot access internal use area");
return -1;
}
return 0;
}
/* ipmi_fru_help - print help text for FRU subcommand
*
* returns void
*/
void
ipmi_fru_help()
{
lprintf(LOG_NOTICE,
"FRU Commands: print read write upgEkey edit internaluse get");
} /* ipmi_fru_help() */
/* ipmi_fru_read_internal_use - print internal use are in hex or file
*
* @intf: ipmi interface
* @id: fru id
*
* returns -1 on error
* returns 0 if successful
* returns 1 if device not present
*/
static int
ipmi_fru_read_internal_use(struct ipmi_intf * intf, uint8_t id, char * pFileName)
{
struct fru_info fru;
uint16_t size;
uint16_t offset;
int rc = 0;
rc = ipmi_fru_get_internal_use_info(intf, id, &fru, &size, &offset);
if(rc == 0)
{
uint8_t * frubuf;
lprintf(LOG_DEBUG, "Internal Use Area Offset: %i", offset);
printf( "Internal Use Area Size : %i\n", size);
frubuf = malloc( size );
if(frubuf)
{
rc = read_fru_area_section(intf, &fru, id, offset, size, frubuf);
if(rc == 0)
{
if(!pFileName)
{
uint16_t counter;
for(counter = 0; counter < size; counter ++)
{
if((counter % 16) == 0)
printf("\n%02i- ", (counter / 16));
printf("%02X ", frubuf[counter]);
}
}
else
{
FILE * pFile;
pFile = fopen(pFileName,"wb");
if (pFile)
{
fwrite(frubuf, size, 1, pFile);
printf("Done\n");
}
else
{
lprintf(LOG_ERR, "Error opening file %s\n", pFileName);
free_n(&frubuf);
return -1;
}
fclose(pFile);
}
}
printf("\n");
free_n(&frubuf);
}
}
else
{
lprintf(LOG_ERR, "Cannot access internal use area");
}
return 0;
}
/* ipmi_fru_write_internal_use - print internal use are in hex or file
*
* @intf: ipmi interface
* @id: fru id
*
* returns -1 on error
* returns 0 if successful
* returns 1 if device not present
*/
static int
ipmi_fru_write_internal_use(struct ipmi_intf * intf, uint8_t id, char * pFileName)
{
struct fru_info fru;
uint16_t size;
uint16_t offset;
int rc = 0;
rc = ipmi_fru_get_internal_use_info(intf, id, &fru, &size, &offset);
if(rc == 0)
{
uint8_t * frubuf;
FILE * fp;
uint32_t fileLength = 0;
lprintf(LOG_DEBUG, "Internal Use Area Offset: %i", offset);
printf( "Internal Use Area Size : %i\n", size);
fp = fopen(pFileName, "r");
if(fp)
{
/* Retrieve file length, check if it's fits the Eeprom Size */
fseek(fp, 0 ,SEEK_END);
fileLength = ftell(fp);
lprintf(LOG_ERR, "File Size: %i", fileLength);
lprintf(LOG_ERR, "Area Size: %i", size);
if(fileLength != size)
{
lprintf(LOG_ERR, "File size does not fit Eeprom Size");
fclose(fp);
fp = NULL;
}
else
{
fseek(fp, 0 ,SEEK_SET);
}
}
if(fp)
{
frubuf = malloc( size );
if(frubuf)
{
uint16_t fru_read_size;
fru_read_size = fread(frubuf, 1, size, fp);
if(fru_read_size == size)
{
rc = write_fru_area(intf, &fru, id, 0, offset, size, frubuf);
if(rc == 0)
{
lprintf(LOG_INFO, "Done\n");
}
}
else
{
lprintf(LOG_ERR, "Unable to read file: %i\n", fru_read_size);
}
free_n(&frubuf);
}
fclose(fp);
fp = NULL;
}
}
else
{
lprintf(LOG_ERR, "Cannot access internal use area");
}
return 0;
}
int
ipmi_fru_main(struct ipmi_intf * intf, int argc, char ** argv)
{
int rc = 0;
uint8_t fru_id = 0;
if (argc < 1) {
rc = ipmi_fru_print_all(intf);
}
else if (strncmp(argv[0], "help", 4) == 0) {
ipmi_fru_help();
return 0;
}
else if (strncmp(argv[0], "print", 5) == 0 ||
strncmp(argv[0], "list", 4) == 0) {
if (argc > 1) {
if (strcmp(argv[1], "help") == 0) {
lprintf(LOG_NOTICE, "fru print [fru id] - print information about FRU(s)");
return 0;
}
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
rc = __ipmi_fru_print(intf, fru_id);
} else {
rc = ipmi_fru_print_all(intf);
}
}
else if (!strncmp(argv[0], "read", 5)) {
if (argc > 1 && strcmp(argv[1], "help") == 0) {
ipmi_fru_read_help();
return 0;
} else if (argc < 3) {
lprintf(LOG_ERR, "Not enough parameters given.");
ipmi_fru_read_help();
return -1;
}
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
/* There is a file name in the parameters */
if (is_valid_filename(argv[2]) != 0)
return -1;
if (verbose) {
printf("FRU ID : %d\n", fru_id);
printf("FRU File : %s\n", argv[2]);
}
/* TODO - rc is missing */
ipmi_fru_read_to_bin(intf, argv[2], fru_id);
}
else if (!strncmp(argv[0], "write", 5)) {
if (argc > 1 && strcmp(argv[1], "help") == 0) {
ipmi_fru_write_help();
return 0;
} else if (argc < 3) {
lprintf(LOG_ERR, "Not enough parameters given.");
ipmi_fru_write_help();
return -1;
}
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
/* There is a file name in the parameters */
if (is_valid_filename(argv[2]) != 0)
return -1;
if (verbose) {
printf("FRU ID : %d\n", fru_id);
printf("FRU File : %s\n", argv[2]);
}
/* TODO - rc is missing */
ipmi_fru_write_from_bin(intf, argv[2], fru_id);
}
else if (!strncmp(argv[0], "upgEkey", 7)) {
if (argc > 1 && strcmp(argv[1], "help") == 0) {
ipmi_fru_upgekey_help();
return 0;
} else if (argc < 3) {
lprintf(LOG_ERR, "Not enough parameters given.");
ipmi_fru_upgekey_help();
return -1;
}
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
/* There is a file name in the parameters */
if (is_valid_filename(argv[2]) != 0)
return -1;
rc = ipmi_fru_upg_ekeying(intf, argv[2], fru_id);
}
else if (!strncmp(argv[0], "internaluse", 11)) {
if (argc > 1 && strcmp(argv[1], "help") == 0) {
ipmi_fru_internaluse_help();
return 0;
}
if ( (argc >= 3) && (!strncmp(argv[2], "info", 4)) ) {
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
rc = ipmi_fru_info_internal_use(intf, fru_id);
}
else if ( (argc >= 3) && (!strncmp(argv[2], "print", 5)) ) {
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
rc = ipmi_fru_read_internal_use(intf, fru_id, NULL);
}
else if ( (argc >= 4) && (!strncmp(argv[2], "read", 4)) ) {
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
/* There is a file name in the parameters */
if (is_valid_filename(argv[3]) != 0)
return -1;
lprintf(LOG_DEBUG, "FRU ID : %d", fru_id);
lprintf(LOG_DEBUG, "FRU File : %s", argv[3]);
rc = ipmi_fru_read_internal_use(intf, fru_id, argv[3]);
}
else if ( (argc >= 4) && (!strncmp(argv[2], "write", 5)) ) {
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
/* There is a file name in the parameters */
if (is_valid_filename(argv[3]) != 0)
return -1;
lprintf(LOG_DEBUG, "FRU ID : %d", fru_id);
lprintf(LOG_DEBUG, "FRU File : %s", argv[3]);
rc = ipmi_fru_write_internal_use(intf, fru_id, argv[3]);
} else {
lprintf(LOG_ERR,
"Either unknown command or not enough parameters given.");
ipmi_fru_internaluse_help();
return -1;
}
}
else if (!strncmp(argv[0], "edit", 4)) {
if (argc > 1 && strcmp(argv[1], "help") == 0) {
ipmi_fru_edit_help();
return 0;
} else if (argc < 2) {
lprintf(LOG_ERR, "Not enough parameters given.");
ipmi_fru_edit_help();
return -1;
}
if (argc >= 2) {
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
if (verbose) {
printf("FRU ID : %d\n", fru_id);
}
} else {
printf("Using default FRU ID: %d\n", fru_id);
}
if (argc >= 3) {
if (!strncmp(argv[2], "field", 5)) {
if (argc != 6) {
lprintf(LOG_ERR, "Not enough parameters given.");
ipmi_fru_edit_help();
return -1;
}
rc = ipmi_fru_set_field_string(intf, fru_id, *argv[3], *argv[4],
(char *) argv[5]);
} else if (!strncmp(argv[2], "oem", 3)) {
rc = ipmi_fru_edit_multirec(intf, fru_id, argc, argv);
} else {
lprintf(LOG_ERR, "Invalid command: %s", argv[2]);
ipmi_fru_edit_help();
return -1;
}
} else {
rc = ipmi_fru_edit_multirec(intf, fru_id, argc, argv);
}
}
else if (!strncmp(argv[0], "get", 4)) {
if (argc > 1 && (strncmp(argv[1], "help", 4) == 0)) {
ipmi_fru_get_help();
return 0;
} else if (argc < 2) {
lprintf(LOG_ERR, "Not enough parameters given.");
ipmi_fru_get_help();
return -1;
}
if (argc >= 2) {
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
if (verbose) {
printf("FRU ID : %d\n", fru_id);
}
} else {
printf("Using default FRU ID: %d\n", fru_id);
}
if (argc >= 3) {
if (!strncmp(argv[2], "oem", 3)) {
rc = ipmi_fru_get_multirec(intf, fru_id, argc, argv);
} else {
lprintf(LOG_ERR, "Invalid command: %s", argv[2]);
ipmi_fru_get_help();
return -1;
}
} else {
rc = ipmi_fru_get_multirec(intf, fru_id, argc, argv);
}
}
else {
lprintf(LOG_ERR, "Invalid FRU command: %s", argv[0]);
ipmi_fru_help();
return -1;
}
return rc;
}
/* ipmi_fru_set_field_string - Set a field string to a new value, Need to be the same size. If
* size if not equal, the function ipmi_fru_set_field_string_rebuild
* will be called.
*
* @intf: ipmi interface
* @id: fru id
* @f_type: Type of the Field : c=Chassis b=Board p=Product
* @f_index: findex of the field, zero indexed.
* @f_string: NULL terminated string
*
* returns -1 on error
* returns 1 if successful
*/
static int
ipmi_fru_set_field_string(struct ipmi_intf * intf, uint8_t fruId, uint8_t
f_type, uint8_t f_index, char *f_string)
{
struct ipmi_rs *rsp;
struct ipmi_rq req;
struct fru_info fru;
struct fru_header header;
uint8_t msg_data[4];
uint8_t checksum;
int i = 0;
int rc = 1;
uint8_t *fru_data = NULL;
uint8_t *fru_area = NULL;
uint32_t fru_field_offset, fru_field_offset_tmp;
uint32_t fru_section_len, header_offset;
memset(msg_data, 0, 4);
msg_data[0] = fruId;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
printf(" Device not present (No Response)\n");
rc = -1;
goto ipmi_fru_set_field_string_out;
}
if (rsp->ccode) {
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
rc = -1;
goto ipmi_fru_set_field_string_out;
}
memset(&fru, 0, sizeof(fru));
fru.size = (rsp->data[1] << 8) | rsp->data[0];
fru.access = rsp->data[2] & 0x1;
if (fru.size < 1) {
printf(" Invalid FRU size %d", fru.size);
rc = -1;
goto ipmi_fru_set_field_string_out;
}
/*
* retrieve the FRU header
*/
msg_data[0] = fruId;
msg_data[1] = 0;
msg_data[2] = 0;
msg_data[3] = 8;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_DATA;
req.msg.data = msg_data;
req.msg.data_len = 4;
rsp = intf->sendrecv(intf, &req);
if (!rsp)
{
printf(" Device not present (No Response)\n");
rc = -1;
goto ipmi_fru_set_field_string_out;
}
if (rsp->ccode)
{
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
rc = -1;
goto ipmi_fru_set_field_string_out;
}
if (verbose > 1)
printbuf(rsp->data, rsp->data_len, "FRU DATA");
memcpy(&header, rsp->data + 1, 8);
if (header.version != 1) {
printf(" Unknown FRU header version 0x%02x",
header.version);
rc = -1;
goto ipmi_fru_set_field_string_out;
}
fru_data = malloc( fru.size );
if (!fru_data) {
printf("Out of memory!\n");
rc = -1;
goto ipmi_fru_set_field_string_out;
}
/* Setup offset from the field type */
/* Chassis type field */
if (f_type == 'c' ) {
header_offset = (header.offset.chassis * 8);
read_fru_area(intf ,&fru, fruId, header_offset , 3 , fru_data);
fru_field_offset = 3;
fru_section_len = *(fru_data + 1) * 8;
}
/* Board type field */
else if (f_type == 'b' ) {
header_offset = (header.offset.board * 8);
read_fru_area(intf ,&fru, fruId, header_offset , 3 , fru_data);
fru_field_offset = 6;
fru_section_len = *(fru_data + 1) * 8;
}
/* Product type field */
else if (f_type == 'p' ) {
header_offset = (header.offset.product * 8);
read_fru_area(intf ,&fru, fruId, header_offset , 3 , fru_data);
fru_field_offset = 3;
fru_section_len = *(fru_data + 1) * 8;
}
else
{
printf("Wrong field type.");
rc = -1;
goto ipmi_fru_set_field_string_out;
}
memset(fru_data, 0, fru.size);
if( read_fru_area(intf ,&fru, fruId, header_offset ,
fru_section_len , fru_data) < 0 )
{
rc = -1;
goto ipmi_fru_set_field_string_out;
}
/* Convert index from character to decimal */
f_index= f_index - 0x30;
/*Seek to field index */
for (i=0; i <= f_index; i++) {
fru_field_offset_tmp = fru_field_offset;
if (fru_area) {
free_n(&fru_area);
}
fru_area = (uint8_t *) get_fru_area_str(fru_data, &fru_field_offset);
}
if (!FRU_FIELD_VALID(fru_area)) {
printf("Field not found !\n");
rc = -1;
goto ipmi_fru_set_field_string_out;
}
if ( strlen((const char *)fru_area) == strlen((const char *)f_string) )
{
printf("Updating Field '%s' with '%s' ...\n", fru_area, f_string );
memcpy(fru_data + fru_field_offset_tmp + 1,
f_string, strlen(f_string));
checksum = 0;
/* Calculate Header Checksum */
for (i = 0; i < fru_section_len - 1; i++)
{
checksum += fru_data[i];
}
checksum = (~checksum) + 1;
fru_data[fru_section_len - 1] = checksum;
/* Write the updated section to the FRU data; source offset => 0 */
if( write_fru_area(intf, &fru, fruId, 0,
header_offset, fru_section_len, fru_data) < 0 )
{
printf("Write to FRU data failed.\n");
rc = -1;
goto ipmi_fru_set_field_string_out;
}
}
else {
printf("String size are not equal, resizing fru to fit new string\n");
if(
ipmi_fru_set_field_string_rebuild(intf,fruId,fru,header,f_type,f_index,f_string)
)
{
rc = -1;
goto ipmi_fru_set_field_string_out;
}
}
ipmi_fru_set_field_string_out:
free_n(&fru_data);
free_n(&fru_area);
return rc;
}
/*
This function can update a string within of the following section when the size is not equal:
Chassis
Product
Board
*/
/* ipmi_fru_set_field_string_rebuild - Set a field string to a new value, When size are not
* the same size.
*
* This function can update a string within of the following section when the size is not equal:
*
* - Chassis
* - Product
* - Board
*
* @intf: ipmi interface
* @fruId: fru id
* @fru: info about fru
* @header: contain the header of the FRU
* @f_type: Type of the Field : c=Chassis b=Board p=Product
* @f_index: findex of the field, zero indexed.
* @f_string: NULL terminated string
*
* returns -1 on error
* returns 1 if successful
*/
#define DBG_RESIZE_FRU
static int
ipmi_fru_set_field_string_rebuild(struct ipmi_intf * intf, uint8_t fruId,
struct fru_info fru, struct fru_header header,
uint8_t f_type, uint8_t f_index, char *f_string)
{
int i = 0;
uint8_t *fru_data_old = NULL;
uint8_t *fru_data_new = NULL;
uint8_t *fru_area = NULL;
uint32_t fru_field_offset, fru_field_offset_tmp;
uint32_t fru_section_len, header_offset;
uint32_t chassis_offset, board_offset, product_offset;
uint32_t chassis_len, board_len, product_len, product_len_new;
int num_byte_change = 0, padding_len = 0;
uint32_t counter;
unsigned char cksum;
int rc = 1;
fru_data_old = calloc( fru.size, sizeof(uint8_t) );
fru_data_new = malloc( fru.size );
if (!fru_data_old || !fru_data_new) {
printf("Out of memory!\n");
rc = -1;
goto ipmi_fru_set_field_string_rebuild_out;
}
/*************************
1) Read ALL FRU */
printf("Read All FRU area\n");
printf("Fru Size : %u bytes\n", fru.size);
/* Read current fru data */
read_fru_area(intf ,&fru, fruId, 0, fru.size , fru_data_old);
#ifdef DBG_RESIZE_FRU
printf("Copy to new FRU\n");
#endif
/*************************
2) Copy all FRU to new FRU */
memcpy(fru_data_new, fru_data_old, fru.size);
/* Build location of all modifiable components */
chassis_offset = (header.offset.chassis * 8);
board_offset = (header.offset.board * 8);
product_offset = (header.offset.product * 8);
/* Retrieve length of all modifiable components */
chassis_len = *(fru_data_old + chassis_offset + 1) * 8;
board_len = *(fru_data_old + board_offset + 1) * 8;
product_len = *(fru_data_old + product_offset + 1) * 8;
product_len_new = product_len;
/* Chassis type field */
if (f_type == 'c' )
{
header_offset = chassis_offset;
fru_field_offset = chassis_offset + 3;
fru_section_len = chassis_len;
}
/* Board type field */
else if (f_type == 'b' )
{
header_offset = board_offset;
fru_field_offset = board_offset + 6;
fru_section_len = board_len;
}
/* Product type field */
else if (f_type == 'p' )
{
header_offset = product_offset;
fru_field_offset = product_offset + 3;
fru_section_len = product_len;
}
else
{
printf("Wrong field type.");
rc = -1;
goto ipmi_fru_set_field_string_rebuild_out;
}
/*************************
3) Seek to field index */
for (i = 0;i <= f_index; i++) {
fru_field_offset_tmp = fru_field_offset;
free_n(&fru_area);
fru_area = (uint8_t *) get_fru_area_str(fru_data_old, &fru_field_offset);
}
if (!FRU_FIELD_VALID(fru_area)) {
printf("Field not found (1)!\n");
rc = -1;
goto ipmi_fru_set_field_string_rebuild_out;
}
#ifdef DBG_RESIZE_FRU
printf("Section Length: %u\n", fru_section_len);
#endif
/*************************
4) Check number of padding bytes and bytes changed */
for(counter = 2; counter < fru_section_len; counter ++)
{
if(*(fru_data_old + (header_offset + fru_section_len - counter)) == 0)
padding_len ++;
else
break;
}
num_byte_change = strlen(f_string) - strlen(fru_area);
#ifdef DBG_RESIZE_FRU
printf("Padding Length: %u\n", padding_len);
printf("NumByte Change: %i\n", num_byte_change);
printf("Start SecChnge: %x\n", *(fru_data_old + fru_field_offset_tmp));
printf("End SecChnge : %x\n", *(fru_data_old + fru_field_offset_tmp + strlen(f_string) + 1));
printf("Start Section : %x\n", *(fru_data_old + header_offset));
printf("End Sec wo Pad: %x\n", *(fru_data_old + header_offset + fru_section_len - 2 - padding_len));
printf("End Section : %x\n", *(fru_data_old + header_offset + fru_section_len - 1));
#endif
/* Calculate New Padding Length */
padding_len -= num_byte_change;
#ifdef DBG_RESIZE_FRU
printf("New Padding Length: %i\n", padding_len);
#endif
/*************************
5) Check if section must be resize. This occur when padding length is not between 0 and 7 */
if( (padding_len < 0) || (padding_len >= 8))
{
uint32_t remaining_offset = ((header.offset.product * 8) + product_len);
int change_size_by_8;
if(padding_len >= 8)
{
/* Section must be set smaller */
change_size_by_8 = ((padding_len) / 8) * (-1);
}
else
{
/* Section must be set bigger */
change_size_by_8 = 1 + (((padding_len+1) / 8) * (-1));
}
/* Recalculate padding and section length base on the section changes */
fru_section_len += (change_size_by_8 * 8);
padding_len += (change_size_by_8 * 8);
#ifdef DBG_RESIZE_FRU
printf("change_size_by_8: %i\n", change_size_by_8);
printf("New Padding Length: %i\n", padding_len);
printf("change_size_by_8: %i\n", change_size_by_8);
printf("header.offset.board: %i\n", header.offset.board);
#endif
/* Must move sections */
/* Section that can be modified are as follow
Chassis
Board
product */
/* Chassis type field */
if (f_type == 'c' )
{
printf("Moving Section Chassis, from %i to %i\n",
((header.offset.board) * 8),
((header.offset.board + change_size_by_8) * 8)
);
memcpy(
(fru_data_new + ((header.offset.board + change_size_by_8) * 8)),
(fru_data_old + (header.offset.board) * 8),
board_len
);
header.offset.board += change_size_by_8;
}
/* Board type field */
if ((f_type == 'c' ) || (f_type == 'b' ))
{
printf("Moving Section Product, from %i to %i\n",
((header.offset.product) * 8),
((header.offset.product + change_size_by_8) * 8)
);
memcpy(
(fru_data_new + ((header.offset.product + change_size_by_8) * 8)),
(fru_data_old + (header.offset.product) * 8),
product_len
);
header.offset.product += change_size_by_8;
}
if ((f_type == 'c' ) || (f_type == 'b' ) || (f_type == 'p' )) {
printf("Change multi offset from %d to %d\n", header.offset.multi, header.offset.multi + change_size_by_8);
header.offset.multi += change_size_by_8;
}
/* Adjust length of the section */
if (f_type == 'c')
{
*(fru_data_new + chassis_offset + 1) += change_size_by_8;
}
else if( f_type == 'b')
{
*(fru_data_new + board_offset + 1) += change_size_by_8;
}
else if( f_type == 'p')
{
*(fru_data_new + product_offset + 1) += change_size_by_8;
product_len_new = *(fru_data_new + product_offset + 1) * 8;
}
/* Rebuild Header checksum */
{
unsigned char * pfru_header = (unsigned char *) &header;
header.checksum = 0;
for(counter = 0; counter < (sizeof(struct fru_header) -1); counter ++)
{
header.checksum += pfru_header[counter];
}
header.checksum = (0 - header.checksum);
memcpy(fru_data_new, pfru_header, sizeof(struct fru_header));
}
/* Move remaining sections in 1 copy */
printf("Moving Remaining Bytes (Multi-Rec , etc..), from %i to %i\n",
remaining_offset,
((header.offset.product) * 8) + product_len_new
);
if(((header.offset.product * 8) + product_len_new - remaining_offset) < 0)
{
memcpy(
fru_data_new + (header.offset.product * 8) + product_len_new,
fru_data_old + remaining_offset,
fru.size - remaining_offset
);
}
else
{
memcpy(
fru_data_new + (header.offset.product * 8) + product_len_new,
fru_data_old + remaining_offset,
fru.size - ((header.offset.product * 8) + product_len_new)
);
}
}
/* Update only if it's fits padding length as defined in the spec, otherwise, it's an internal
error */
/*************************
6) Update Field and sections */
if( (padding_len >=0) && (padding_len < 8))
{
/* Do not requires any change in other section */
/* Change field length */
printf(
"Updating Field : '%s' with '%s' ... (Length from '%d' to '%d')\n",
fru_area, f_string,
(int)*(fru_data_old + fru_field_offset_tmp),
(int)(0xc0 + strlen(f_string)));
*(fru_data_new + fru_field_offset_tmp) = (0xc0 + strlen(f_string));
memcpy(fru_data_new + fru_field_offset_tmp + 1, f_string, strlen(f_string));
/* Copy remaining bytes in section */
#ifdef DBG_RESIZE_FRU
printf("Copying remaining of sections: %d \n",
(int)((fru_data_old + header_offset + fru_section_len - 1) -
(fru_data_old + fru_field_offset_tmp + strlen(f_string) + 1)));
#endif
memcpy((fru_data_new + fru_field_offset_tmp + 1 +
strlen(f_string)),
(fru_data_old + fru_field_offset_tmp + 1 +
strlen(fru_area)),
((fru_data_old + header_offset + fru_section_len - 1) -
(fru_data_old + fru_field_offset_tmp + strlen(f_string) + 1)));
/* Add Padding if required */
for(counter = 0; counter < padding_len; counter ++)
{
*(fru_data_new + header_offset + fru_section_len - 1 -
padding_len + counter) = 0;
}
/* Calculate New Checksum */
cksum = 0;
for( counter = 0; counter <fru_section_len-1; counter ++ )
{
cksum += *(fru_data_new + header_offset + counter);
}
*(fru_data_new + header_offset + fru_section_len - 1) = (0 - cksum);
#ifdef DBG_RESIZE_FRU
printf("Calculate New Checksum: %x\n", (0 - cksum));
#endif
}
else
{
printf( "Internal error, padding length %i (must be from 0 to 7) ", padding_len );
rc = -1;
goto ipmi_fru_set_field_string_rebuild_out;
}
/*************************
7) Finally, write new FRU */
printf("Writing new FRU.\n");
if( write_fru_area( intf, &fru, fruId, 0, 0, fru.size, fru_data_new ) < 0 )
{
printf("Write to FRU data failed.\n");
rc = -1;
goto ipmi_fru_set_field_string_rebuild_out;
}
printf("Done.\n");
ipmi_fru_set_field_string_rebuild_out:
free_n(&fru_area);
free_n(&fru_data_new);
free_n(&fru_data_old);
return rc;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_4522_0 |
crossvul-cpp_data_good_4012_3 | /* regcomp.c
*/
/*
* 'A fair jaw-cracker dwarf-language must be.' --Samwise Gamgee
*
* [p.285 of _The Lord of the Rings_, II/iii: "The Ring Goes South"]
*/
/* This file contains functions for compiling a regular expression. See
* also regexec.c which funnily enough, contains functions for executing
* a regular expression.
*
* This file is also copied at build time to ext/re/re_comp.c, where
* it's built with -DPERL_EXT_RE_BUILD -DPERL_EXT_RE_DEBUG -DPERL_EXT.
* This causes the main functions to be compiled under new names and with
* debugging support added, which makes "use re 'debug'" work.
*/
/* NOTE: this is derived from Henry Spencer's regexp code, and should not
* confused with the original package (see point 3 below). Thanks, Henry!
*/
/* Additional note: this code is very heavily munged from Henry's version
* in places. In some spots I've traded clarity for efficiency, so don't
* blame Henry for some of the lack of readability.
*/
/* The names of the functions have been changed from regcomp and
* regexec to pregcomp and pregexec in order to avoid conflicts
* with the POSIX routines of the same names.
*/
#ifdef PERL_EXT_RE_BUILD
#include "re_top.h"
#endif
/*
* pregcomp and pregexec -- regsub and regerror are not used in perl
*
* Copyright (c) 1986 by University of Toronto.
* Written by Henry Spencer. Not derived from licensed software.
*
* Permission is granted to anyone to use this software for any
* purpose on any computer system, and to redistribute it freely,
* subject to the following restrictions:
*
* 1. The author is not responsible for the consequences of use of
* this software, no matter how awful, even if they arise
* from defects in it.
*
* 2. The origin of this software must not be misrepresented, either
* by explicit claim or by omission.
*
* 3. Altered versions must be plainly marked as such, and must not
* be misrepresented as being the original software.
*
*
**** Alterations to Henry's code are...
****
**** Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
**** 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
**** by Larry Wall and others
****
**** You may distribute under the terms of either the GNU General Public
**** License or the Artistic License, as specified in the README file.
*
* Beware that some of this code is subtly aware of the way operator
* precedence is structured in regular expressions. Serious changes in
* regular-expression syntax might require a total rethink.
*/
#include "EXTERN.h"
#define PERL_IN_REGCOMP_C
#include "perl.h"
#define REG_COMP_C
#ifdef PERL_IN_XSUB_RE
# include "re_comp.h"
EXTERN_C const struct regexp_engine my_reg_engine;
#else
# include "regcomp.h"
#endif
#include "dquote_inline.h"
#include "invlist_inline.h"
#include "unicode_constants.h"
#define HAS_NONLATIN1_FOLD_CLOSURE(i) \
_HAS_NONLATIN1_FOLD_CLOSURE_ONLY_FOR_USE_BY_REGCOMP_DOT_C_AND_REGEXEC_DOT_C(i)
#define HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(i) \
_HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE_ONLY_FOR_USE_BY_REGCOMP_DOT_C_AND_REGEXEC_DOT_C(i)
#define IS_NON_FINAL_FOLD(c) _IS_NON_FINAL_FOLD_ONLY_FOR_USE_BY_REGCOMP_DOT_C(c)
#define IS_IN_SOME_FOLD_L1(c) _IS_IN_SOME_FOLD_ONLY_FOR_USE_BY_REGCOMP_DOT_C(c)
#ifndef STATIC
#define STATIC static
#endif
/* this is a chain of data about sub patterns we are processing that
need to be handled separately/specially in study_chunk. Its so
we can simulate recursion without losing state. */
struct scan_frame;
typedef struct scan_frame {
regnode *last_regnode; /* last node to process in this frame */
regnode *next_regnode; /* next node to process when last is reached */
U32 prev_recursed_depth;
I32 stopparen; /* what stopparen do we use */
bool in_gosub; /* this or an outer frame is for GOSUB */
struct scan_frame *this_prev_frame; /* this previous frame */
struct scan_frame *prev_frame; /* previous frame */
struct scan_frame *next_frame; /* next frame */
} scan_frame;
/* Certain characters are output as a sequence with the first being a
* backslash. */
#define isBACKSLASHED_PUNCT(c) strchr("-[]\\^", c)
struct RExC_state_t {
U32 flags; /* RXf_* are we folding, multilining? */
U32 pm_flags; /* PMf_* stuff from the calling PMOP */
char *precomp; /* uncompiled string. */
char *precomp_end; /* pointer to end of uncompiled string. */
REGEXP *rx_sv; /* The SV that is the regexp. */
regexp *rx; /* perl core regexp structure */
regexp_internal *rxi; /* internal data for regexp object
pprivate field */
char *start; /* Start of input for compile */
char *end; /* End of input for compile */
char *parse; /* Input-scan pointer. */
char *copy_start; /* start of copy of input within
constructed parse string */
char *save_copy_start; /* Provides one level of saving
and restoring 'copy_start' */
char *copy_start_in_input; /* Position in input string
corresponding to copy_start */
SSize_t whilem_seen; /* number of WHILEM in this expr */
regnode *emit_start; /* Start of emitted-code area */
regnode_offset emit; /* Code-emit pointer */
I32 naughty; /* How bad is this pattern? */
I32 sawback; /* Did we see \1, ...? */
U32 seen;
SSize_t size; /* Number of regnode equivalents in
pattern */
/* position beyond 'precomp' of the warning message furthest away from
* 'precomp'. During the parse, no warnings are raised for any problems
* earlier in the parse than this position. This works if warnings are
* raised the first time a given spot is parsed, and if only one
* independent warning is raised for any given spot */
Size_t latest_warn_offset;
I32 npar; /* Capture buffer count so far in the
parse, (OPEN) plus one. ("par" 0 is
the whole pattern)*/
I32 total_par; /* During initial parse, is either 0,
or -1; the latter indicating a
reparse is needed. After that pass,
it is what 'npar' became after the
pass. Hence, it being > 0 indicates
we are in a reparse situation */
I32 nestroot; /* root parens we are in - used by
accept */
I32 seen_zerolen;
regnode_offset *open_parens; /* offsets to open parens */
regnode_offset *close_parens; /* offsets to close parens */
I32 parens_buf_size; /* #slots malloced open/close_parens */
regnode *end_op; /* END node in program */
I32 utf8; /* whether the pattern is utf8 or not */
I32 orig_utf8; /* whether the pattern was originally in utf8 */
/* XXX use this for future optimisation of case
* where pattern must be upgraded to utf8. */
I32 uni_semantics; /* If a d charset modifier should use unicode
rules, even if the pattern is not in
utf8 */
HV *paren_names; /* Paren names */
regnode **recurse; /* Recurse regops */
I32 recurse_count; /* Number of recurse regops we have generated */
U8 *study_chunk_recursed; /* bitmap of which subs we have moved
through */
U32 study_chunk_recursed_bytes; /* bytes in bitmap */
I32 in_lookbehind;
I32 contains_locale;
I32 override_recoding;
#ifdef EBCDIC
I32 recode_x_to_native;
#endif
I32 in_multi_char_class;
struct reg_code_blocks *code_blocks;/* positions of literal (?{})
within pattern */
int code_index; /* next code_blocks[] slot */
SSize_t maxlen; /* mininum possible number of chars in string to match */
scan_frame *frame_head;
scan_frame *frame_last;
U32 frame_count;
AV *warn_text;
HV *unlexed_names;
#ifdef ADD_TO_REGEXEC
char *starttry; /* -Dr: where regtry was called. */
#define RExC_starttry (pRExC_state->starttry)
#endif
SV *runtime_code_qr; /* qr with the runtime code blocks */
#ifdef DEBUGGING
const char *lastparse;
I32 lastnum;
AV *paren_name_list; /* idx -> name */
U32 study_chunk_recursed_count;
SV *mysv1;
SV *mysv2;
#define RExC_lastparse (pRExC_state->lastparse)
#define RExC_lastnum (pRExC_state->lastnum)
#define RExC_paren_name_list (pRExC_state->paren_name_list)
#define RExC_study_chunk_recursed_count (pRExC_state->study_chunk_recursed_count)
#define RExC_mysv (pRExC_state->mysv1)
#define RExC_mysv1 (pRExC_state->mysv1)
#define RExC_mysv2 (pRExC_state->mysv2)
#endif
bool seen_d_op;
bool strict;
bool study_started;
bool in_script_run;
bool use_BRANCHJ;
};
#define RExC_flags (pRExC_state->flags)
#define RExC_pm_flags (pRExC_state->pm_flags)
#define RExC_precomp (pRExC_state->precomp)
#define RExC_copy_start_in_input (pRExC_state->copy_start_in_input)
#define RExC_copy_start_in_constructed (pRExC_state->copy_start)
#define RExC_save_copy_start_in_constructed (pRExC_state->save_copy_start)
#define RExC_precomp_end (pRExC_state->precomp_end)
#define RExC_rx_sv (pRExC_state->rx_sv)
#define RExC_rx (pRExC_state->rx)
#define RExC_rxi (pRExC_state->rxi)
#define RExC_start (pRExC_state->start)
#define RExC_end (pRExC_state->end)
#define RExC_parse (pRExC_state->parse)
#define RExC_latest_warn_offset (pRExC_state->latest_warn_offset )
#define RExC_whilem_seen (pRExC_state->whilem_seen)
#define RExC_seen_d_op (pRExC_state->seen_d_op) /* Seen something that differs
under /d from /u ? */
#ifdef RE_TRACK_PATTERN_OFFSETS
# define RExC_offsets (RExC_rxi->u.offsets) /* I am not like the
others */
#endif
#define RExC_emit (pRExC_state->emit)
#define RExC_emit_start (pRExC_state->emit_start)
#define RExC_sawback (pRExC_state->sawback)
#define RExC_seen (pRExC_state->seen)
#define RExC_size (pRExC_state->size)
#define RExC_maxlen (pRExC_state->maxlen)
#define RExC_npar (pRExC_state->npar)
#define RExC_total_parens (pRExC_state->total_par)
#define RExC_parens_buf_size (pRExC_state->parens_buf_size)
#define RExC_nestroot (pRExC_state->nestroot)
#define RExC_seen_zerolen (pRExC_state->seen_zerolen)
#define RExC_utf8 (pRExC_state->utf8)
#define RExC_uni_semantics (pRExC_state->uni_semantics)
#define RExC_orig_utf8 (pRExC_state->orig_utf8)
#define RExC_open_parens (pRExC_state->open_parens)
#define RExC_close_parens (pRExC_state->close_parens)
#define RExC_end_op (pRExC_state->end_op)
#define RExC_paren_names (pRExC_state->paren_names)
#define RExC_recurse (pRExC_state->recurse)
#define RExC_recurse_count (pRExC_state->recurse_count)
#define RExC_study_chunk_recursed (pRExC_state->study_chunk_recursed)
#define RExC_study_chunk_recursed_bytes \
(pRExC_state->study_chunk_recursed_bytes)
#define RExC_in_lookbehind (pRExC_state->in_lookbehind)
#define RExC_contains_locale (pRExC_state->contains_locale)
#ifdef EBCDIC
# define RExC_recode_x_to_native (pRExC_state->recode_x_to_native)
#endif
#define RExC_in_multi_char_class (pRExC_state->in_multi_char_class)
#define RExC_frame_head (pRExC_state->frame_head)
#define RExC_frame_last (pRExC_state->frame_last)
#define RExC_frame_count (pRExC_state->frame_count)
#define RExC_strict (pRExC_state->strict)
#define RExC_study_started (pRExC_state->study_started)
#define RExC_warn_text (pRExC_state->warn_text)
#define RExC_in_script_run (pRExC_state->in_script_run)
#define RExC_use_BRANCHJ (pRExC_state->use_BRANCHJ)
#define RExC_unlexed_names (pRExC_state->unlexed_names)
/* Heuristic check on the complexity of the pattern: if TOO_NAUGHTY, we set
* a flag to disable back-off on the fixed/floating substrings - if it's
* a high complexity pattern we assume the benefit of avoiding a full match
* is worth the cost of checking for the substrings even if they rarely help.
*/
#define RExC_naughty (pRExC_state->naughty)
#define TOO_NAUGHTY (10)
#define MARK_NAUGHTY(add) \
if (RExC_naughty < TOO_NAUGHTY) \
RExC_naughty += (add)
#define MARK_NAUGHTY_EXP(exp, add) \
if (RExC_naughty < TOO_NAUGHTY) \
RExC_naughty += RExC_naughty / (exp) + (add)
#define ISMULT1(c) ((c) == '*' || (c) == '+' || (c) == '?')
#define ISMULT2(s) ((*s) == '*' || (*s) == '+' || (*s) == '?' || \
((*s) == '{' && regcurly(s)))
/*
* Flags to be passed up and down.
*/
#define WORST 0 /* Worst case. */
#define HASWIDTH 0x01 /* Known to not match null strings, could match
non-null ones. */
/* Simple enough to be STAR/PLUS operand; in an EXACTish node must be a single
* character. (There needs to be a case: in the switch statement in regexec.c
* for any node marked SIMPLE.) Note that this is not the same thing as
* REGNODE_SIMPLE */
#define SIMPLE 0x02
#define SPSTART 0x04 /* Starts with * or + */
#define POSTPONED 0x08 /* (?1),(?&name), (??{...}) or similar */
#define TRYAGAIN 0x10 /* Weeded out a declaration. */
#define RESTART_PARSE 0x20 /* Need to redo the parse */
#define NEED_UTF8 0x40 /* In conjunction with RESTART_PARSE, need to
calcuate sizes as UTF-8 */
#define REG_NODE_NUM(x) ((x) ? (int)((x)-RExC_emit_start) : -1)
/* whether trie related optimizations are enabled */
#if PERL_ENABLE_EXTENDED_TRIE_OPTIMISATION
#define TRIE_STUDY_OPT
#define FULL_TRIE_STUDY
#define TRIE_STCLASS
#endif
#define PBYTE(u8str,paren) ((U8*)(u8str))[(paren) >> 3]
#define PBITVAL(paren) (1 << ((paren) & 7))
#define PAREN_TEST(u8str,paren) ( PBYTE(u8str,paren) & PBITVAL(paren))
#define PAREN_SET(u8str,paren) PBYTE(u8str,paren) |= PBITVAL(paren)
#define PAREN_UNSET(u8str,paren) PBYTE(u8str,paren) &= (~PBITVAL(paren))
#define REQUIRE_UTF8(flagp) STMT_START { \
if (!UTF) { \
*flagp = RESTART_PARSE|NEED_UTF8; \
return 0; \
} \
} STMT_END
/* Change from /d into /u rules, and restart the parse. RExC_uni_semantics is
* a flag that indicates we need to override /d with /u as a result of
* something in the pattern. It should only be used in regards to calling
* set_regex_charset() or get_regex_charse() */
#define REQUIRE_UNI_RULES(flagp, restart_retval) \
STMT_START { \
if (DEPENDS_SEMANTICS) { \
set_regex_charset(&RExC_flags, REGEX_UNICODE_CHARSET); \
RExC_uni_semantics = 1; \
if (RExC_seen_d_op && LIKELY(! IN_PARENS_PASS)) { \
/* No need to restart the parse if we haven't seen \
* anything that differs between /u and /d, and no need \
* to restart immediately if we're going to reparse \
* anyway to count parens */ \
*flagp |= RESTART_PARSE; \
return restart_retval; \
} \
} \
} STMT_END
#define REQUIRE_BRANCHJ(flagp, restart_retval) \
STMT_START { \
RExC_use_BRANCHJ = 1; \
*flagp |= RESTART_PARSE; \
return restart_retval; \
} STMT_END
/* Until we have completed the parse, we leave RExC_total_parens at 0 or
* less. After that, it must always be positive, because the whole re is
* considered to be surrounded by virtual parens. Setting it to negative
* indicates there is some construct that needs to know the actual number of
* parens to be properly handled. And that means an extra pass will be
* required after we've counted them all */
#define ALL_PARENS_COUNTED (RExC_total_parens > 0)
#define REQUIRE_PARENS_PASS \
STMT_START { /* No-op if have completed a pass */ \
if (! ALL_PARENS_COUNTED) RExC_total_parens = -1; \
} STMT_END
#define IN_PARENS_PASS (RExC_total_parens < 0)
/* This is used to return failure (zero) early from the calling function if
* various flags in 'flags' are set. Two flags always cause a return:
* 'RESTART_PARSE' and 'NEED_UTF8'. 'extra' can be used to specify any
* additional flags that should cause a return; 0 if none. If the return will
* be done, '*flagp' is first set to be all of the flags that caused the
* return. */
#define RETURN_FAIL_ON_RESTART_OR_FLAGS(flags,flagp,extra) \
STMT_START { \
if ((flags) & (RESTART_PARSE|NEED_UTF8|(extra))) { \
*(flagp) = (flags) & (RESTART_PARSE|NEED_UTF8|(extra)); \
return 0; \
} \
} STMT_END
#define MUST_RESTART(flags) ((flags) & (RESTART_PARSE))
#define RETURN_FAIL_ON_RESTART(flags,flagp) \
RETURN_FAIL_ON_RESTART_OR_FLAGS( flags, flagp, 0)
#define RETURN_FAIL_ON_RESTART_FLAGP(flagp) \
if (MUST_RESTART(*(flagp))) return 0
/* This converts the named class defined in regcomp.h to its equivalent class
* number defined in handy.h. */
#define namedclass_to_classnum(class) ((int) ((class) / 2))
#define classnum_to_namedclass(classnum) ((classnum) * 2)
#define _invlist_union_complement_2nd(a, b, output) \
_invlist_union_maybe_complement_2nd(a, b, TRUE, output)
#define _invlist_intersection_complement_2nd(a, b, output) \
_invlist_intersection_maybe_complement_2nd(a, b, TRUE, output)
/* About scan_data_t.
During optimisation we recurse through the regexp program performing
various inplace (keyhole style) optimisations. In addition study_chunk
and scan_commit populate this data structure with information about
what strings MUST appear in the pattern. We look for the longest
string that must appear at a fixed location, and we look for the
longest string that may appear at a floating location. So for instance
in the pattern:
/FOO[xX]A.*B[xX]BAR/
Both 'FOO' and 'A' are fixed strings. Both 'B' and 'BAR' are floating
strings (because they follow a .* construct). study_chunk will identify
both FOO and BAR as being the longest fixed and floating strings respectively.
The strings can be composites, for instance
/(f)(o)(o)/
will result in a composite fixed substring 'foo'.
For each string some basic information is maintained:
- min_offset
This is the position the string must appear at, or not before.
It also implicitly (when combined with minlenp) tells us how many
characters must match before the string we are searching for.
Likewise when combined with minlenp and the length of the string it
tells us how many characters must appear after the string we have
found.
- max_offset
Only used for floating strings. This is the rightmost point that
the string can appear at. If set to SSize_t_MAX it indicates that the
string can occur infinitely far to the right.
For fixed strings, it is equal to min_offset.
- minlenp
A pointer to the minimum number of characters of the pattern that the
string was found inside. This is important as in the case of positive
lookahead or positive lookbehind we can have multiple patterns
involved. Consider
/(?=FOO).*F/
The minimum length of the pattern overall is 3, the minimum length
of the lookahead part is 3, but the minimum length of the part that
will actually match is 1. So 'FOO's minimum length is 3, but the
minimum length for the F is 1. This is important as the minimum length
is used to determine offsets in front of and behind the string being
looked for. Since strings can be composites this is the length of the
pattern at the time it was committed with a scan_commit. Note that
the length is calculated by study_chunk, so that the minimum lengths
are not known until the full pattern has been compiled, thus the
pointer to the value.
- lookbehind
In the case of lookbehind the string being searched for can be
offset past the start point of the final matching string.
If this value was just blithely removed from the min_offset it would
invalidate some of the calculations for how many chars must match
before or after (as they are derived from min_offset and minlen and
the length of the string being searched for).
When the final pattern is compiled and the data is moved from the
scan_data_t structure into the regexp structure the information
about lookbehind is factored in, with the information that would
have been lost precalculated in the end_shift field for the
associated string.
The fields pos_min and pos_delta are used to store the minimum offset
and the delta to the maximum offset at the current point in the pattern.
*/
struct scan_data_substrs {
SV *str; /* longest substring found in pattern */
SSize_t min_offset; /* earliest point in string it can appear */
SSize_t max_offset; /* latest point in string it can appear */
SSize_t *minlenp; /* pointer to the minlen relevant to the string */
SSize_t lookbehind; /* is the pos of the string modified by LB */
I32 flags; /* per substring SF_* and SCF_* flags */
};
typedef struct scan_data_t {
/*I32 len_min; unused */
/*I32 len_delta; unused */
SSize_t pos_min;
SSize_t pos_delta;
SV *last_found;
SSize_t last_end; /* min value, <0 unless valid. */
SSize_t last_start_min;
SSize_t last_start_max;
U8 cur_is_floating; /* whether the last_* values should be set as
* the next fixed (0) or floating (1)
* substring */
/* [0] is longest fixed substring so far, [1] is longest float so far */
struct scan_data_substrs substrs[2];
I32 flags; /* common SF_* and SCF_* flags */
I32 whilem_c;
SSize_t *last_closep;
regnode_ssc *start_class;
} scan_data_t;
/*
* Forward declarations for pregcomp()'s friends.
*/
static const scan_data_t zero_scan_data = {
0, 0, NULL, 0, 0, 0, 0,
{
{ NULL, 0, 0, 0, 0, 0 },
{ NULL, 0, 0, 0, 0, 0 },
},
0, 0, NULL, NULL
};
/* study flags */
#define SF_BEFORE_SEOL 0x0001
#define SF_BEFORE_MEOL 0x0002
#define SF_BEFORE_EOL (SF_BEFORE_SEOL|SF_BEFORE_MEOL)
#define SF_IS_INF 0x0040
#define SF_HAS_PAR 0x0080
#define SF_IN_PAR 0x0100
#define SF_HAS_EVAL 0x0200
/* SCF_DO_SUBSTR is the flag that tells the regexp analyzer to track the
* longest substring in the pattern. When it is not set the optimiser keeps
* track of position, but does not keep track of the actual strings seen,
*
* So for instance /foo/ will be parsed with SCF_DO_SUBSTR being true, but
* /foo/i will not.
*
* Similarly, /foo.*(blah|erm|huh).*fnorble/ will have "foo" and "fnorble"
* parsed with SCF_DO_SUBSTR on, but while processing the (...) it will be
* turned off because of the alternation (BRANCH). */
#define SCF_DO_SUBSTR 0x0400
#define SCF_DO_STCLASS_AND 0x0800
#define SCF_DO_STCLASS_OR 0x1000
#define SCF_DO_STCLASS (SCF_DO_STCLASS_AND|SCF_DO_STCLASS_OR)
#define SCF_WHILEM_VISITED_POS 0x2000
#define SCF_TRIE_RESTUDY 0x4000 /* Do restudy? */
#define SCF_SEEN_ACCEPT 0x8000
#define SCF_TRIE_DOING_RESTUDY 0x10000
#define SCF_IN_DEFINE 0x20000
#define UTF cBOOL(RExC_utf8)
/* The enums for all these are ordered so things work out correctly */
#define LOC (get_regex_charset(RExC_flags) == REGEX_LOCALE_CHARSET)
#define DEPENDS_SEMANTICS (get_regex_charset(RExC_flags) \
== REGEX_DEPENDS_CHARSET)
#define UNI_SEMANTICS (get_regex_charset(RExC_flags) == REGEX_UNICODE_CHARSET)
#define AT_LEAST_UNI_SEMANTICS (get_regex_charset(RExC_flags) \
>= REGEX_UNICODE_CHARSET)
#define ASCII_RESTRICTED (get_regex_charset(RExC_flags) \
== REGEX_ASCII_RESTRICTED_CHARSET)
#define AT_LEAST_ASCII_RESTRICTED (get_regex_charset(RExC_flags) \
>= REGEX_ASCII_RESTRICTED_CHARSET)
#define ASCII_FOLD_RESTRICTED (get_regex_charset(RExC_flags) \
== REGEX_ASCII_MORE_RESTRICTED_CHARSET)
#define FOLD cBOOL(RExC_flags & RXf_PMf_FOLD)
/* For programs that want to be strictly Unicode compatible by dying if any
* attempt is made to match a non-Unicode code point against a Unicode
* property. */
#define ALWAYS_WARN_SUPER ckDEAD(packWARN(WARN_NON_UNICODE))
#define OOB_NAMEDCLASS -1
/* There is no code point that is out-of-bounds, so this is problematic. But
* its only current use is to initialize a variable that is always set before
* looked at. */
#define OOB_UNICODE 0xDEADBEEF
#define CHR_SVLEN(sv) (UTF ? sv_len_utf8(sv) : SvCUR(sv))
/* length of regex to show in messages that don't mark a position within */
#define RegexLengthToShowInErrorMessages 127
/*
* If MARKER[12] are adjusted, be sure to adjust the constants at the top
* of t/op/regmesg.t, the tests in t/op/re_tests, and those in
* op/pragma/warn/regcomp.
*/
#define MARKER1 "<-- HERE" /* marker as it appears in the description */
#define MARKER2 " <-- HERE " /* marker as it appears within the regex */
#define REPORT_LOCATION " in regex; marked by " MARKER1 \
" in m/%" UTF8f MARKER2 "%" UTF8f "/"
/* The code in this file in places uses one level of recursion with parsing
* rebased to an alternate string constructed by us in memory. This can take
* the form of something that is completely different from the input, or
* something that uses the input as part of the alternate. In the first case,
* there should be no possibility of an error, as we are in complete control of
* the alternate string. But in the second case we don't completely control
* the input portion, so there may be errors in that. Here's an example:
* /[abc\x{DF}def]/ui
* is handled specially because \x{df} folds to a sequence of more than one
* character: 'ss'. What is done is to create and parse an alternate string,
* which looks like this:
* /(?:\x{DF}|[abc\x{DF}def])/ui
* where it uses the input unchanged in the middle of something it constructs,
* which is a branch for the DF outside the character class, and clustering
* parens around the whole thing. (It knows enough to skip the DF inside the
* class while in this substitute parse.) 'abc' and 'def' may have errors that
* need to be reported. The general situation looks like this:
*
* |<------- identical ------>|
* sI tI xI eI
* Input: ---------------------------------------------------------------
* Constructed: ---------------------------------------------------
* sC tC xC eC EC
* |<------- identical ------>|
*
* sI..eI is the portion of the input pattern we are concerned with here.
* sC..EC is the constructed substitute parse string.
* sC..tC is constructed by us
* tC..eC is an exact duplicate of the portion of the input pattern tI..eI.
* In the diagram, these are vertically aligned.
* eC..EC is also constructed by us.
* xC is the position in the substitute parse string where we found a
* problem.
* xI is the position in the original pattern corresponding to xC.
*
* We want to display a message showing the real input string. Thus we need to
* translate from xC to xI. We know that xC >= tC, since the portion of the
* string sC..tC has been constructed by us, and so shouldn't have errors. We
* get:
* xI = tI + (xC - tC)
*
* When the substitute parse is constructed, the code needs to set:
* RExC_start (sC)
* RExC_end (eC)
* RExC_copy_start_in_input (tI)
* RExC_copy_start_in_constructed (tC)
* and restore them when done.
*
* During normal processing of the input pattern, both
* 'RExC_copy_start_in_input' and 'RExC_copy_start_in_constructed' are set to
* sI, so that xC equals xI.
*/
#define sI RExC_precomp
#define eI RExC_precomp_end
#define sC RExC_start
#define eC RExC_end
#define tI RExC_copy_start_in_input
#define tC RExC_copy_start_in_constructed
#define xI(xC) (tI + (xC - tC))
#define xI_offset(xC) (xI(xC) - sI)
#define REPORT_LOCATION_ARGS(xC) \
UTF8fARG(UTF, \
(xI(xC) > eI) /* Don't run off end */ \
? eI - sI /* Length before the <--HERE */ \
: ((xI_offset(xC) >= 0) \
? xI_offset(xC) \
: (Perl_croak(aTHX_ "panic: %s: %d: negative offset: %" \
IVdf " trying to output message for " \
" pattern %.*s", \
__FILE__, __LINE__, (IV) xI_offset(xC), \
((int) (eC - sC)), sC), 0)), \
sI), /* The input pattern printed up to the <--HERE */ \
UTF8fARG(UTF, \
(xI(xC) > eI) ? 0 : eI - xI(xC), /* Length after <--HERE */ \
(xI(xC) > eI) ? eI : xI(xC)) /* pattern after <--HERE */
/* Used to point after bad bytes for an error message, but avoid skipping
* past a nul byte. */
#define SKIP_IF_CHAR(s, e) (!*(s) ? 0 : UTF ? UTF8_SAFE_SKIP(s, e) : 1)
/* Set up to clean up after our imminent demise */
#define PREPARE_TO_DIE \
STMT_START { \
if (RExC_rx_sv) \
SAVEFREESV(RExC_rx_sv); \
if (RExC_open_parens) \
SAVEFREEPV(RExC_open_parens); \
if (RExC_close_parens) \
SAVEFREEPV(RExC_close_parens); \
} STMT_END
/*
* Calls SAVEDESTRUCTOR_X if needed, then calls Perl_croak with the given
* arg. Show regex, up to a maximum length. If it's too long, chop and add
* "...".
*/
#define _FAIL(code) STMT_START { \
const char *ellipses = ""; \
IV len = RExC_precomp_end - RExC_precomp; \
\
PREPARE_TO_DIE; \
if (len > RegexLengthToShowInErrorMessages) { \
/* chop 10 shorter than the max, to ensure meaning of "..." */ \
len = RegexLengthToShowInErrorMessages - 10; \
ellipses = "..."; \
} \
code; \
} STMT_END
#define FAIL(msg) _FAIL( \
Perl_croak(aTHX_ "%s in regex m/%" UTF8f "%s/", \
msg, UTF8fARG(UTF, len, RExC_precomp), ellipses))
#define FAIL2(msg,arg) _FAIL( \
Perl_croak(aTHX_ msg " in regex m/%" UTF8f "%s/", \
arg, UTF8fARG(UTF, len, RExC_precomp), ellipses))
/*
* Simple_vFAIL -- like FAIL, but marks the current location in the scan
*/
#define Simple_vFAIL(m) STMT_START { \
Perl_croak(aTHX_ "%s" REPORT_LOCATION, \
m, REPORT_LOCATION_ARGS(RExC_parse)); \
} STMT_END
/*
* Calls SAVEDESTRUCTOR_X if needed, then Simple_vFAIL()
*/
#define vFAIL(m) STMT_START { \
PREPARE_TO_DIE; \
Simple_vFAIL(m); \
} STMT_END
/*
* Like Simple_vFAIL(), but accepts two arguments.
*/
#define Simple_vFAIL2(m,a1) STMT_START { \
S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, \
REPORT_LOCATION_ARGS(RExC_parse)); \
} STMT_END
/*
* Calls SAVEDESTRUCTOR_X if needed, then Simple_vFAIL2().
*/
#define vFAIL2(m,a1) STMT_START { \
PREPARE_TO_DIE; \
Simple_vFAIL2(m, a1); \
} STMT_END
/*
* Like Simple_vFAIL(), but accepts three arguments.
*/
#define Simple_vFAIL3(m, a1, a2) STMT_START { \
S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, a2, \
REPORT_LOCATION_ARGS(RExC_parse)); \
} STMT_END
/*
* Calls SAVEDESTRUCTOR_X if needed, then Simple_vFAIL3().
*/
#define vFAIL3(m,a1,a2) STMT_START { \
PREPARE_TO_DIE; \
Simple_vFAIL3(m, a1, a2); \
} STMT_END
/*
* Like Simple_vFAIL(), but accepts four arguments.
*/
#define Simple_vFAIL4(m, a1, a2, a3) STMT_START { \
S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, a2, a3, \
REPORT_LOCATION_ARGS(RExC_parse)); \
} STMT_END
#define vFAIL4(m,a1,a2,a3) STMT_START { \
PREPARE_TO_DIE; \
Simple_vFAIL4(m, a1, a2, a3); \
} STMT_END
/* A specialized version of vFAIL2 that works with UTF8f */
#define vFAIL2utf8f(m, a1) STMT_START { \
PREPARE_TO_DIE; \
S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, \
REPORT_LOCATION_ARGS(RExC_parse)); \
} STMT_END
#define vFAIL3utf8f(m, a1, a2) STMT_START { \
PREPARE_TO_DIE; \
S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, a2, \
REPORT_LOCATION_ARGS(RExC_parse)); \
} STMT_END
/* Setting this to NULL is a signal to not output warnings */
#define TURN_OFF_WARNINGS_IN_SUBSTITUTE_PARSE \
STMT_START { \
RExC_save_copy_start_in_constructed = RExC_copy_start_in_constructed;\
RExC_copy_start_in_constructed = NULL; \
} STMT_END
#define RESTORE_WARNINGS \
RExC_copy_start_in_constructed = RExC_save_copy_start_in_constructed
/* Since a warning can be generated multiple times as the input is reparsed, we
* output it the first time we come to that point in the parse, but suppress it
* otherwise. 'RExC_copy_start_in_constructed' being NULL is a flag to not
* generate any warnings */
#define TO_OUTPUT_WARNINGS(loc) \
( RExC_copy_start_in_constructed \
&& ((xI(loc)) - RExC_precomp) > (Ptrdiff_t) RExC_latest_warn_offset)
/* After we've emitted a warning, we save the position in the input so we don't
* output it again */
#define UPDATE_WARNINGS_LOC(loc) \
STMT_START { \
if (TO_OUTPUT_WARNINGS(loc)) { \
RExC_latest_warn_offset = MAX(sI, MIN(eI, xI(loc))) \
- RExC_precomp; \
} \
} STMT_END
/* 'warns' is the output of the packWARNx macro used in 'code' */
#define _WARN_HELPER(loc, warns, code) \
STMT_START { \
if (! RExC_copy_start_in_constructed) { \
Perl_croak( aTHX_ "panic! %s: %d: Tried to warn when none" \
" expected at '%s'", \
__FILE__, __LINE__, loc); \
} \
if (TO_OUTPUT_WARNINGS(loc)) { \
if (ckDEAD(warns)) \
PREPARE_TO_DIE; \
code; \
UPDATE_WARNINGS_LOC(loc); \
} \
} STMT_END
/* m is not necessarily a "literal string", in this macro */
#define reg_warn_non_literal_string(loc, m) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_warner(aTHX_ packWARN(WARN_REGEXP), \
"%s" REPORT_LOCATION, \
m, REPORT_LOCATION_ARGS(loc)))
#define ckWARNreg(loc,m) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
REPORT_LOCATION_ARGS(loc)))
#define vWARN(loc, m) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
REPORT_LOCATION_ARGS(loc))) \
#define vWARN_dep(loc, m) \
_WARN_HELPER(loc, packWARN(WARN_DEPRECATED), \
Perl_warner(aTHX_ packWARN(WARN_DEPRECATED), \
m REPORT_LOCATION, \
REPORT_LOCATION_ARGS(loc)))
#define ckWARNdep(loc,m) \
_WARN_HELPER(loc, packWARN(WARN_DEPRECATED), \
Perl_ck_warner_d(aTHX_ packWARN(WARN_DEPRECATED), \
m REPORT_LOCATION, \
REPORT_LOCATION_ARGS(loc)))
#define ckWARNregdep(loc,m) \
_WARN_HELPER(loc, packWARN2(WARN_DEPRECATED, WARN_REGEXP), \
Perl_ck_warner_d(aTHX_ packWARN2(WARN_DEPRECATED, \
WARN_REGEXP), \
m REPORT_LOCATION, \
REPORT_LOCATION_ARGS(loc)))
#define ckWARN2reg_d(loc,m, a1) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_ck_warner_d(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
a1, REPORT_LOCATION_ARGS(loc)))
#define ckWARN2reg(loc, m, a1) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
a1, REPORT_LOCATION_ARGS(loc)))
#define vWARN3(loc, m, a1, a2) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
a1, a2, REPORT_LOCATION_ARGS(loc)))
#define ckWARN3reg(loc, m, a1, a2) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
a1, a2, \
REPORT_LOCATION_ARGS(loc)))
#define vWARN4(loc, m, a1, a2, a3) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
a1, a2, a3, \
REPORT_LOCATION_ARGS(loc)))
#define ckWARN4reg(loc, m, a1, a2, a3) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
a1, a2, a3, \
REPORT_LOCATION_ARGS(loc)))
#define vWARN5(loc, m, a1, a2, a3, a4) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
a1, a2, a3, a4, \
REPORT_LOCATION_ARGS(loc)))
#define ckWARNexperimental(loc, class, m) \
_WARN_HELPER(loc, packWARN(class), \
Perl_ck_warner_d(aTHX_ packWARN(class), \
m REPORT_LOCATION, \
REPORT_LOCATION_ARGS(loc)))
/* Convert between a pointer to a node and its offset from the beginning of the
* program */
#define REGNODE_p(offset) (RExC_emit_start + (offset))
#define REGNODE_OFFSET(node) ((node) - RExC_emit_start)
/* Macros for recording node offsets. 20001227 mjd@plover.com
* Nodes are numbered 1, 2, 3, 4. Node #n's position is recorded in
* element 2*n-1 of the array. Element #2n holds the byte length node #n.
* Element 0 holds the number n.
* Position is 1 indexed.
*/
#ifndef RE_TRACK_PATTERN_OFFSETS
#define Set_Node_Offset_To_R(offset,byte)
#define Set_Node_Offset(node,byte)
#define Set_Cur_Node_Offset
#define Set_Node_Length_To_R(node,len)
#define Set_Node_Length(node,len)
#define Set_Node_Cur_Length(node,start)
#define Node_Offset(n)
#define Node_Length(n)
#define Set_Node_Offset_Length(node,offset,len)
#define ProgLen(ri) ri->u.proglen
#define SetProgLen(ri,x) ri->u.proglen = x
#define Track_Code(code)
#else
#define ProgLen(ri) ri->u.offsets[0]
#define SetProgLen(ri,x) ri->u.offsets[0] = x
#define Set_Node_Offset_To_R(offset,byte) STMT_START { \
MJD_OFFSET_DEBUG(("** (%d) offset of node %d is %d.\n", \
__LINE__, (int)(offset), (int)(byte))); \
if((offset) < 0) { \
Perl_croak(aTHX_ "value of node is %d in Offset macro", \
(int)(offset)); \
} else { \
RExC_offsets[2*(offset)-1] = (byte); \
} \
} STMT_END
#define Set_Node_Offset(node,byte) \
Set_Node_Offset_To_R(REGNODE_OFFSET(node), (byte)-RExC_start)
#define Set_Cur_Node_Offset Set_Node_Offset(RExC_emit, RExC_parse)
#define Set_Node_Length_To_R(node,len) STMT_START { \
MJD_OFFSET_DEBUG(("** (%d) size of node %d is %d.\n", \
__LINE__, (int)(node), (int)(len))); \
if((node) < 0) { \
Perl_croak(aTHX_ "value of node is %d in Length macro", \
(int)(node)); \
} else { \
RExC_offsets[2*(node)] = (len); \
} \
} STMT_END
#define Set_Node_Length(node,len) \
Set_Node_Length_To_R(REGNODE_OFFSET(node), len)
#define Set_Node_Cur_Length(node, start) \
Set_Node_Length(node, RExC_parse - start)
/* Get offsets and lengths */
#define Node_Offset(n) (RExC_offsets[2*(REGNODE_OFFSET(n))-1])
#define Node_Length(n) (RExC_offsets[2*(REGNODE_OFFSET(n))])
#define Set_Node_Offset_Length(node,offset,len) STMT_START { \
Set_Node_Offset_To_R(REGNODE_OFFSET(node), (offset)); \
Set_Node_Length_To_R(REGNODE_OFFSET(node), (len)); \
} STMT_END
#define Track_Code(code) STMT_START { code } STMT_END
#endif
#if PERL_ENABLE_EXPERIMENTAL_REGEX_OPTIMISATIONS
#define EXPERIMENTAL_INPLACESCAN
#endif /*PERL_ENABLE_EXPERIMENTAL_REGEX_OPTIMISATIONS*/
#ifdef DEBUGGING
int
Perl_re_printf(pTHX_ const char *fmt, ...)
{
va_list ap;
int result;
PerlIO *f= Perl_debug_log;
PERL_ARGS_ASSERT_RE_PRINTF;
va_start(ap, fmt);
result = PerlIO_vprintf(f, fmt, ap);
va_end(ap);
return result;
}
int
Perl_re_indentf(pTHX_ const char *fmt, U32 depth, ...)
{
va_list ap;
int result;
PerlIO *f= Perl_debug_log;
PERL_ARGS_ASSERT_RE_INDENTF;
va_start(ap, depth);
PerlIO_printf(f, "%*s", ( (int)depth % 20 ) * 2, "");
result = PerlIO_vprintf(f, fmt, ap);
va_end(ap);
return result;
}
#endif /* DEBUGGING */
#define DEBUG_RExC_seen() \
DEBUG_OPTIMISE_MORE_r({ \
Perl_re_printf( aTHX_ "RExC_seen: "); \
\
if (RExC_seen & REG_ZERO_LEN_SEEN) \
Perl_re_printf( aTHX_ "REG_ZERO_LEN_SEEN "); \
\
if (RExC_seen & REG_LOOKBEHIND_SEEN) \
Perl_re_printf( aTHX_ "REG_LOOKBEHIND_SEEN "); \
\
if (RExC_seen & REG_GPOS_SEEN) \
Perl_re_printf( aTHX_ "REG_GPOS_SEEN "); \
\
if (RExC_seen & REG_RECURSE_SEEN) \
Perl_re_printf( aTHX_ "REG_RECURSE_SEEN "); \
\
if (RExC_seen & REG_TOP_LEVEL_BRANCHES_SEEN) \
Perl_re_printf( aTHX_ "REG_TOP_LEVEL_BRANCHES_SEEN "); \
\
if (RExC_seen & REG_VERBARG_SEEN) \
Perl_re_printf( aTHX_ "REG_VERBARG_SEEN "); \
\
if (RExC_seen & REG_CUTGROUP_SEEN) \
Perl_re_printf( aTHX_ "REG_CUTGROUP_SEEN "); \
\
if (RExC_seen & REG_RUN_ON_COMMENT_SEEN) \
Perl_re_printf( aTHX_ "REG_RUN_ON_COMMENT_SEEN "); \
\
if (RExC_seen & REG_UNFOLDED_MULTI_SEEN) \
Perl_re_printf( aTHX_ "REG_UNFOLDED_MULTI_SEEN "); \
\
if (RExC_seen & REG_UNBOUNDED_QUANTIFIER_SEEN) \
Perl_re_printf( aTHX_ "REG_UNBOUNDED_QUANTIFIER_SEEN "); \
\
Perl_re_printf( aTHX_ "\n"); \
});
#define DEBUG_SHOW_STUDY_FLAG(flags,flag) \
if ((flags) & flag) Perl_re_printf( aTHX_ "%s ", #flag)
#ifdef DEBUGGING
static void
S_debug_show_study_flags(pTHX_ U32 flags, const char *open_str,
const char *close_str)
{
if (!flags)
return;
Perl_re_printf( aTHX_ "%s", open_str);
DEBUG_SHOW_STUDY_FLAG(flags, SF_BEFORE_SEOL);
DEBUG_SHOW_STUDY_FLAG(flags, SF_BEFORE_MEOL);
DEBUG_SHOW_STUDY_FLAG(flags, SF_IS_INF);
DEBUG_SHOW_STUDY_FLAG(flags, SF_HAS_PAR);
DEBUG_SHOW_STUDY_FLAG(flags, SF_IN_PAR);
DEBUG_SHOW_STUDY_FLAG(flags, SF_HAS_EVAL);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_SUBSTR);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_STCLASS_AND);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_STCLASS_OR);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_STCLASS);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_WHILEM_VISITED_POS);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_TRIE_RESTUDY);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_SEEN_ACCEPT);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_TRIE_DOING_RESTUDY);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_IN_DEFINE);
Perl_re_printf( aTHX_ "%s", close_str);
}
static void
S_debug_studydata(pTHX_ const char *where, scan_data_t *data,
U32 depth, int is_inf)
{
GET_RE_DEBUG_FLAGS_DECL;
DEBUG_OPTIMISE_MORE_r({
if (!data)
return;
Perl_re_indentf(aTHX_ "%s: Pos:%" IVdf "/%" IVdf " Flags: 0x%" UVXf,
depth,
where,
(IV)data->pos_min,
(IV)data->pos_delta,
(UV)data->flags
);
S_debug_show_study_flags(aTHX_ data->flags," [","]");
Perl_re_printf( aTHX_
" Whilem_c: %" IVdf " Lcp: %" IVdf " %s",
(IV)data->whilem_c,
(IV)(data->last_closep ? *((data)->last_closep) : -1),
is_inf ? "INF " : ""
);
if (data->last_found) {
int i;
Perl_re_printf(aTHX_
"Last:'%s' %" IVdf ":%" IVdf "/%" IVdf,
SvPVX_const(data->last_found),
(IV)data->last_end,
(IV)data->last_start_min,
(IV)data->last_start_max
);
for (i = 0; i < 2; i++) {
Perl_re_printf(aTHX_
" %s%s: '%s' @ %" IVdf "/%" IVdf,
data->cur_is_floating == i ? "*" : "",
i ? "Float" : "Fixed",
SvPVX_const(data->substrs[i].str),
(IV)data->substrs[i].min_offset,
(IV)data->substrs[i].max_offset
);
S_debug_show_study_flags(aTHX_ data->substrs[i].flags," [","]");
}
}
Perl_re_printf( aTHX_ "\n");
});
}
static void
S_debug_peep(pTHX_ const char *str, const RExC_state_t *pRExC_state,
regnode *scan, U32 depth, U32 flags)
{
GET_RE_DEBUG_FLAGS_DECL;
DEBUG_OPTIMISE_r({
regnode *Next;
if (!scan)
return;
Next = regnext(scan);
regprop(RExC_rx, RExC_mysv, scan, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "%s>%3d: %s (%d)",
depth,
str,
REG_NODE_NUM(scan), SvPV_nolen_const(RExC_mysv),
Next ? (REG_NODE_NUM(Next)) : 0 );
S_debug_show_study_flags(aTHX_ flags," [ ","]");
Perl_re_printf( aTHX_ "\n");
});
}
# define DEBUG_STUDYDATA(where, data, depth, is_inf) \
S_debug_studydata(aTHX_ where, data, depth, is_inf)
# define DEBUG_PEEP(str, scan, depth, flags) \
S_debug_peep(aTHX_ str, pRExC_state, scan, depth, flags)
#else
# define DEBUG_STUDYDATA(where, data, depth, is_inf) NOOP
# define DEBUG_PEEP(str, scan, depth, flags) NOOP
#endif
/* =========================================================
* BEGIN edit_distance stuff.
*
* This calculates how many single character changes of any type are needed to
* transform a string into another one. It is taken from version 3.1 of
*
* https://metacpan.org/pod/Text::Levenshtein::Damerau::XS
*/
/* Our unsorted dictionary linked list. */
/* Note we use UVs, not chars. */
struct dictionary{
UV key;
UV value;
struct dictionary* next;
};
typedef struct dictionary item;
PERL_STATIC_INLINE item*
push(UV key, item* curr)
{
item* head;
Newx(head, 1, item);
head->key = key;
head->value = 0;
head->next = curr;
return head;
}
PERL_STATIC_INLINE item*
find(item* head, UV key)
{
item* iterator = head;
while (iterator){
if (iterator->key == key){
return iterator;
}
iterator = iterator->next;
}
return NULL;
}
PERL_STATIC_INLINE item*
uniquePush(item* head, UV key)
{
item* iterator = head;
while (iterator){
if (iterator->key == key) {
return head;
}
iterator = iterator->next;
}
return push(key, head);
}
PERL_STATIC_INLINE void
dict_free(item* head)
{
item* iterator = head;
while (iterator) {
item* temp = iterator;
iterator = iterator->next;
Safefree(temp);
}
head = NULL;
}
/* End of Dictionary Stuff */
/* All calculations/work are done here */
STATIC int
S_edit_distance(const UV* src,
const UV* tgt,
const STRLEN x, /* length of src[] */
const STRLEN y, /* length of tgt[] */
const SSize_t maxDistance
)
{
item *head = NULL;
UV swapCount, swapScore, targetCharCount, i, j;
UV *scores;
UV score_ceil = x + y;
PERL_ARGS_ASSERT_EDIT_DISTANCE;
/* intialize matrix start values */
Newx(scores, ( (x + 2) * (y + 2)), UV);
scores[0] = score_ceil;
scores[1 * (y + 2) + 0] = score_ceil;
scores[0 * (y + 2) + 1] = score_ceil;
scores[1 * (y + 2) + 1] = 0;
head = uniquePush(uniquePush(head, src[0]), tgt[0]);
/* work loops */
/* i = src index */
/* j = tgt index */
for (i=1;i<=x;i++) {
if (i < x)
head = uniquePush(head, src[i]);
scores[(i+1) * (y + 2) + 1] = i;
scores[(i+1) * (y + 2) + 0] = score_ceil;
swapCount = 0;
for (j=1;j<=y;j++) {
if (i == 1) {
if(j < y)
head = uniquePush(head, tgt[j]);
scores[1 * (y + 2) + (j + 1)] = j;
scores[0 * (y + 2) + (j + 1)] = score_ceil;
}
targetCharCount = find(head, tgt[j-1])->value;
swapScore = scores[targetCharCount * (y + 2) + swapCount] + i - targetCharCount - 1 + j - swapCount;
if (src[i-1] != tgt[j-1]){
scores[(i+1) * (y + 2) + (j + 1)] = MIN(swapScore,(MIN(scores[i * (y + 2) + j], MIN(scores[(i+1) * (y + 2) + j], scores[i * (y + 2) + (j + 1)])) + 1));
}
else {
swapCount = j;
scores[(i+1) * (y + 2) + (j + 1)] = MIN(scores[i * (y + 2) + j], swapScore);
}
}
find(head, src[i-1])->value = i;
}
{
IV score = scores[(x+1) * (y + 2) + (y + 1)];
dict_free(head);
Safefree(scores);
return (maxDistance != 0 && maxDistance < score)?(-1):score;
}
}
/* END of edit_distance() stuff
* ========================================================= */
/* is c a control character for which we have a mnemonic? */
#define isMNEMONIC_CNTRL(c) _IS_MNEMONIC_CNTRL_ONLY_FOR_USE_BY_REGCOMP_DOT_C(c)
STATIC const char *
S_cntrl_to_mnemonic(const U8 c)
{
/* Returns the mnemonic string that represents character 'c', if one
* exists; NULL otherwise. The only ones that exist for the purposes of
* this routine are a few control characters */
switch (c) {
case '\a': return "\\a";
case '\b': return "\\b";
case ESC_NATIVE: return "\\e";
case '\f': return "\\f";
case '\n': return "\\n";
case '\r': return "\\r";
case '\t': return "\\t";
}
return NULL;
}
/* Mark that we cannot extend a found fixed substring at this point.
Update the longest found anchored substring or the longest found
floating substrings if needed. */
STATIC void
S_scan_commit(pTHX_ const RExC_state_t *pRExC_state, scan_data_t *data,
SSize_t *minlenp, int is_inf)
{
const STRLEN l = CHR_SVLEN(data->last_found);
SV * const longest_sv = data->substrs[data->cur_is_floating].str;
const STRLEN old_l = CHR_SVLEN(longest_sv);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_SCAN_COMMIT;
if ((l >= old_l) && ((l > old_l) || (data->flags & SF_BEFORE_EOL))) {
const U8 i = data->cur_is_floating;
SvSetMagicSV(longest_sv, data->last_found);
data->substrs[i].min_offset = l ? data->last_start_min : data->pos_min;
if (!i) /* fixed */
data->substrs[0].max_offset = data->substrs[0].min_offset;
else { /* float */
data->substrs[1].max_offset = (l
? data->last_start_max
: (data->pos_delta > SSize_t_MAX - data->pos_min
? SSize_t_MAX
: data->pos_min + data->pos_delta));
if (is_inf
|| (STRLEN)data->substrs[1].max_offset > (STRLEN)SSize_t_MAX)
data->substrs[1].max_offset = SSize_t_MAX;
}
if (data->flags & SF_BEFORE_EOL)
data->substrs[i].flags |= (data->flags & SF_BEFORE_EOL);
else
data->substrs[i].flags &= ~SF_BEFORE_EOL;
data->substrs[i].minlenp = minlenp;
data->substrs[i].lookbehind = 0;
}
SvCUR_set(data->last_found, 0);
{
SV * const sv = data->last_found;
if (SvUTF8(sv) && SvMAGICAL(sv)) {
MAGIC * const mg = mg_find(sv, PERL_MAGIC_utf8);
if (mg)
mg->mg_len = 0;
}
}
data->last_end = -1;
data->flags &= ~SF_BEFORE_EOL;
DEBUG_STUDYDATA("commit", data, 0, is_inf);
}
/* An SSC is just a regnode_charclass_posix with an extra field: the inversion
* list that describes which code points it matches */
STATIC void
S_ssc_anything(pTHX_ regnode_ssc *ssc)
{
/* Set the SSC 'ssc' to match an empty string or any code point */
PERL_ARGS_ASSERT_SSC_ANYTHING;
assert(is_ANYOF_SYNTHETIC(ssc));
/* mortalize so won't leak */
ssc->invlist = sv_2mortal(_add_range_to_invlist(NULL, 0, UV_MAX));
ANYOF_FLAGS(ssc) |= SSC_MATCHES_EMPTY_STRING; /* Plus matches empty */
}
STATIC int
S_ssc_is_anything(const regnode_ssc *ssc)
{
/* Returns TRUE if the SSC 'ssc' can match the empty string and any code
* point; FALSE otherwise. Thus, this is used to see if using 'ssc' buys
* us anything: if the function returns TRUE, 'ssc' hasn't been restricted
* in any way, so there's no point in using it */
UV start, end;
bool ret;
PERL_ARGS_ASSERT_SSC_IS_ANYTHING;
assert(is_ANYOF_SYNTHETIC(ssc));
if (! (ANYOF_FLAGS(ssc) & SSC_MATCHES_EMPTY_STRING)) {
return FALSE;
}
/* See if the list consists solely of the range 0 - Infinity */
invlist_iterinit(ssc->invlist);
ret = invlist_iternext(ssc->invlist, &start, &end)
&& start == 0
&& end == UV_MAX;
invlist_iterfinish(ssc->invlist);
if (ret) {
return TRUE;
}
/* If e.g., both \w and \W are set, matches everything */
if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) {
int i;
for (i = 0; i < ANYOF_POSIXL_MAX; i += 2) {
if (ANYOF_POSIXL_TEST(ssc, i) && ANYOF_POSIXL_TEST(ssc, i+1)) {
return TRUE;
}
}
}
return FALSE;
}
STATIC void
S_ssc_init(pTHX_ const RExC_state_t *pRExC_state, regnode_ssc *ssc)
{
/* Initializes the SSC 'ssc'. This includes setting it to match an empty
* string, any code point, or any posix class under locale */
PERL_ARGS_ASSERT_SSC_INIT;
Zero(ssc, 1, regnode_ssc);
set_ANYOF_SYNTHETIC(ssc);
ARG_SET(ssc, ANYOF_ONLY_HAS_BITMAP);
ssc_anything(ssc);
/* If any portion of the regex is to operate under locale rules that aren't
* fully known at compile time, initialization includes it. The reason
* this isn't done for all regexes is that the optimizer was written under
* the assumption that locale was all-or-nothing. Given the complexity and
* lack of documentation in the optimizer, and that there are inadequate
* test cases for locale, many parts of it may not work properly, it is
* safest to avoid locale unless necessary. */
if (RExC_contains_locale) {
ANYOF_POSIXL_SETALL(ssc);
}
else {
ANYOF_POSIXL_ZERO(ssc);
}
}
STATIC int
S_ssc_is_cp_posixl_init(const RExC_state_t *pRExC_state,
const regnode_ssc *ssc)
{
/* Returns TRUE if the SSC 'ssc' is in its initial state with regard only
* to the list of code points matched, and locale posix classes; hence does
* not check its flags) */
UV start, end;
bool ret;
PERL_ARGS_ASSERT_SSC_IS_CP_POSIXL_INIT;
assert(is_ANYOF_SYNTHETIC(ssc));
invlist_iterinit(ssc->invlist);
ret = invlist_iternext(ssc->invlist, &start, &end)
&& start == 0
&& end == UV_MAX;
invlist_iterfinish(ssc->invlist);
if (! ret) {
return FALSE;
}
if (RExC_contains_locale && ! ANYOF_POSIXL_SSC_TEST_ALL_SET(ssc)) {
return FALSE;
}
return TRUE;
}
#define INVLIST_INDEX 0
#define ONLY_LOCALE_MATCHES_INDEX 1
#define DEFERRED_USER_DEFINED_INDEX 2
STATIC SV*
S_get_ANYOF_cp_list_for_ssc(pTHX_ const RExC_state_t *pRExC_state,
const regnode_charclass* const node)
{
/* Returns a mortal inversion list defining which code points are matched
* by 'node', which is of type ANYOF. Handles complementing the result if
* appropriate. If some code points aren't knowable at this time, the
* returned list must, and will, contain every code point that is a
* possibility. */
dVAR;
SV* invlist = NULL;
SV* only_utf8_locale_invlist = NULL;
unsigned int i;
const U32 n = ARG(node);
bool new_node_has_latin1 = FALSE;
const U8 flags = OP(node) == ANYOFH ? 0 : ANYOF_FLAGS(node);
PERL_ARGS_ASSERT_GET_ANYOF_CP_LIST_FOR_SSC;
/* Look at the data structure created by S_set_ANYOF_arg() */
if (n != ANYOF_ONLY_HAS_BITMAP) {
SV * const rv = MUTABLE_SV(RExC_rxi->data->data[n]);
AV * const av = MUTABLE_AV(SvRV(rv));
SV **const ary = AvARRAY(av);
assert(RExC_rxi->data->what[n] == 's');
if (av_tindex_skip_len_mg(av) >= DEFERRED_USER_DEFINED_INDEX) {
/* Here there are things that won't be known until runtime -- we
* have to assume it could be anything */
invlist = sv_2mortal(_new_invlist(1));
return _add_range_to_invlist(invlist, 0, UV_MAX);
}
else if (ary[INVLIST_INDEX]) {
/* Use the node's inversion list */
invlist = sv_2mortal(invlist_clone(ary[INVLIST_INDEX], NULL));
}
/* Get the code points valid only under UTF-8 locales */
if ( (flags & ANYOFL_FOLD)
&& av_tindex_skip_len_mg(av) >= ONLY_LOCALE_MATCHES_INDEX)
{
only_utf8_locale_invlist = ary[ONLY_LOCALE_MATCHES_INDEX];
}
}
if (! invlist) {
invlist = sv_2mortal(_new_invlist(0));
}
/* An ANYOF node contains a bitmap for the first NUM_ANYOF_CODE_POINTS
* code points, and an inversion list for the others, but if there are code
* points that should match only conditionally on the target string being
* UTF-8, those are placed in the inversion list, and not the bitmap.
* Since there are circumstances under which they could match, they are
* included in the SSC. But if the ANYOF node is to be inverted, we have
* to exclude them here, so that when we invert below, the end result
* actually does include them. (Think about "\xe0" =~ /[^\xc0]/di;). We
* have to do this here before we add the unconditionally matched code
* points */
if (flags & ANYOF_INVERT) {
_invlist_intersection_complement_2nd(invlist,
PL_UpperLatin1,
&invlist);
}
/* Add in the points from the bit map */
if (OP(node) != ANYOFH) {
for (i = 0; i < NUM_ANYOF_CODE_POINTS; i++) {
if (ANYOF_BITMAP_TEST(node, i)) {
unsigned int start = i++;
for (; i < NUM_ANYOF_CODE_POINTS
&& ANYOF_BITMAP_TEST(node, i); ++i)
{
/* empty */
}
invlist = _add_range_to_invlist(invlist, start, i-1);
new_node_has_latin1 = TRUE;
}
}
}
/* If this can match all upper Latin1 code points, have to add them
* as well. But don't add them if inverting, as when that gets done below,
* it would exclude all these characters, including the ones it shouldn't
* that were added just above */
if (! (flags & ANYOF_INVERT) && OP(node) == ANYOFD
&& (flags & ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER))
{
_invlist_union(invlist, PL_UpperLatin1, &invlist);
}
/* Similarly for these */
if (flags & ANYOF_MATCHES_ALL_ABOVE_BITMAP) {
_invlist_union_complement_2nd(invlist, PL_InBitmap, &invlist);
}
if (flags & ANYOF_INVERT) {
_invlist_invert(invlist);
}
else if (flags & ANYOFL_FOLD) {
if (new_node_has_latin1) {
/* Under /li, any 0-255 could fold to any other 0-255, depending on
* the locale. We can skip this if there are no 0-255 at all. */
_invlist_union(invlist, PL_Latin1, &invlist);
invlist = add_cp_to_invlist(invlist, LATIN_SMALL_LETTER_DOTLESS_I);
invlist = add_cp_to_invlist(invlist, LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE);
}
else {
if (_invlist_contains_cp(invlist, LATIN_SMALL_LETTER_DOTLESS_I)) {
invlist = add_cp_to_invlist(invlist, 'I');
}
if (_invlist_contains_cp(invlist,
LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE))
{
invlist = add_cp_to_invlist(invlist, 'i');
}
}
}
/* Similarly add the UTF-8 locale possible matches. These have to be
* deferred until after the non-UTF-8 locale ones are taken care of just
* above, or it leads to wrong results under ANYOF_INVERT */
if (only_utf8_locale_invlist) {
_invlist_union_maybe_complement_2nd(invlist,
only_utf8_locale_invlist,
flags & ANYOF_INVERT,
&invlist);
}
return invlist;
}
/* These two functions currently do the exact same thing */
#define ssc_init_zero ssc_init
#define ssc_add_cp(ssc, cp) ssc_add_range((ssc), (cp), (cp))
#define ssc_match_all_cp(ssc) ssc_add_range(ssc, 0, UV_MAX)
/* 'AND' a given class with another one. Can create false positives. 'ssc'
* should not be inverted. 'and_with->flags & ANYOF_MATCHES_POSIXL' should be
* 0 if 'and_with' is a regnode_charclass instead of a regnode_ssc. */
STATIC void
S_ssc_and(pTHX_ const RExC_state_t *pRExC_state, regnode_ssc *ssc,
const regnode_charclass *and_with)
{
/* Accumulate into SSC 'ssc' its 'AND' with 'and_with', which is either
* another SSC or a regular ANYOF class. Can create false positives. */
SV* anded_cp_list;
U8 and_with_flags = (OP(and_with) == ANYOFH) ? 0 : ANYOF_FLAGS(and_with);
U8 anded_flags;
PERL_ARGS_ASSERT_SSC_AND;
assert(is_ANYOF_SYNTHETIC(ssc));
/* 'and_with' is used as-is if it too is an SSC; otherwise have to extract
* the code point inversion list and just the relevant flags */
if (is_ANYOF_SYNTHETIC(and_with)) {
anded_cp_list = ((regnode_ssc *)and_with)->invlist;
anded_flags = and_with_flags;
/* XXX This is a kludge around what appears to be deficiencies in the
* optimizer. If we make S_ssc_anything() add in the WARN_SUPER flag,
* there are paths through the optimizer where it doesn't get weeded
* out when it should. And if we don't make some extra provision for
* it like the code just below, it doesn't get added when it should.
* This solution is to add it only when AND'ing, which is here, and
* only when what is being AND'ed is the pristine, original node
* matching anything. Thus it is like adding it to ssc_anything() but
* only when the result is to be AND'ed. Probably the same solution
* could be adopted for the same problem we have with /l matching,
* which is solved differently in S_ssc_init(), and that would lead to
* fewer false positives than that solution has. But if this solution
* creates bugs, the consequences are only that a warning isn't raised
* that should be; while the consequences for having /l bugs is
* incorrect matches */
if (ssc_is_anything((regnode_ssc *)and_with)) {
anded_flags |= ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER;
}
}
else {
anded_cp_list = get_ANYOF_cp_list_for_ssc(pRExC_state, and_with);
if (OP(and_with) == ANYOFD) {
anded_flags = and_with_flags & ANYOF_COMMON_FLAGS;
}
else {
anded_flags = and_with_flags
&( ANYOF_COMMON_FLAGS
|ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER
|ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP);
if (ANYOFL_UTF8_LOCALE_REQD(and_with_flags)) {
anded_flags &=
ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD;
}
}
}
ANYOF_FLAGS(ssc) &= anded_flags;
/* Below, C1 is the list of code points in 'ssc'; P1, its posix classes.
* C2 is the list of code points in 'and-with'; P2, its posix classes.
* 'and_with' may be inverted. When not inverted, we have the situation of
* computing:
* (C1 | P1) & (C2 | P2)
* = (C1 & (C2 | P2)) | (P1 & (C2 | P2))
* = ((C1 & C2) | (C1 & P2)) | ((P1 & C2) | (P1 & P2))
* <= ((C1 & C2) | P2)) | ( P1 | (P1 & P2))
* <= ((C1 & C2) | P1 | P2)
* Alternatively, the last few steps could be:
* = ((C1 & C2) | (C1 & P2)) | ((P1 & C2) | (P1 & P2))
* <= ((C1 & C2) | C1 ) | ( C2 | (P1 & P2))
* <= (C1 | C2 | (P1 & P2))
* We favor the second approach if either P1 or P2 is non-empty. This is
* because these components are a barrier to doing optimizations, as what
* they match cannot be known until the moment of matching as they are
* dependent on the current locale, 'AND"ing them likely will reduce or
* eliminate them.
* But we can do better if we know that C1,P1 are in their initial state (a
* frequent occurrence), each matching everything:
* (<everything>) & (C2 | P2) = C2 | P2
* Similarly, if C2,P2 are in their initial state (again a frequent
* occurrence), the result is a no-op
* (C1 | P1) & (<everything>) = C1 | P1
*
* Inverted, we have
* (C1 | P1) & ~(C2 | P2) = (C1 | P1) & (~C2 & ~P2)
* = (C1 & (~C2 & ~P2)) | (P1 & (~C2 & ~P2))
* <= (C1 & ~C2) | (P1 & ~P2)
* */
if ((and_with_flags & ANYOF_INVERT)
&& ! is_ANYOF_SYNTHETIC(and_with))
{
unsigned int i;
ssc_intersection(ssc,
anded_cp_list,
FALSE /* Has already been inverted */
);
/* If either P1 or P2 is empty, the intersection will be also; can skip
* the loop */
if (! (and_with_flags & ANYOF_MATCHES_POSIXL)) {
ANYOF_POSIXL_ZERO(ssc);
}
else if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) {
/* Note that the Posix class component P from 'and_with' actually
* looks like:
* P = Pa | Pb | ... | Pn
* where each component is one posix class, such as in [\w\s].
* Thus
* ~P = ~(Pa | Pb | ... | Pn)
* = ~Pa & ~Pb & ... & ~Pn
* <= ~Pa | ~Pb | ... | ~Pn
* The last is something we can easily calculate, but unfortunately
* is likely to have many false positives. We could do better
* in some (but certainly not all) instances if two classes in
* P have known relationships. For example
* :lower: <= :alpha: <= :alnum: <= \w <= :graph: <= :print:
* So
* :lower: & :print: = :lower:
* And similarly for classes that must be disjoint. For example,
* since \s and \w can have no elements in common based on rules in
* the POSIX standard,
* \w & ^\S = nothing
* Unfortunately, some vendor locales do not meet the Posix
* standard, in particular almost everything by Microsoft.
* The loop below just changes e.g., \w into \W and vice versa */
regnode_charclass_posixl temp;
int add = 1; /* To calculate the index of the complement */
Zero(&temp, 1, regnode_charclass_posixl);
ANYOF_POSIXL_ZERO(&temp);
for (i = 0; i < ANYOF_MAX; i++) {
assert(i % 2 != 0
|| ! ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i)
|| ! ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i + 1));
if (ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i)) {
ANYOF_POSIXL_SET(&temp, i + add);
}
add = 0 - add; /* 1 goes to -1; -1 goes to 1 */
}
ANYOF_POSIXL_AND(&temp, ssc);
} /* else ssc already has no posixes */
} /* else: Not inverted. This routine is a no-op if 'and_with' is an SSC
in its initial state */
else if (! is_ANYOF_SYNTHETIC(and_with)
|| ! ssc_is_cp_posixl_init(pRExC_state, (regnode_ssc *)and_with))
{
/* But if 'ssc' is in its initial state, the result is just 'and_with';
* copy it over 'ssc' */
if (ssc_is_cp_posixl_init(pRExC_state, ssc)) {
if (is_ANYOF_SYNTHETIC(and_with)) {
StructCopy(and_with, ssc, regnode_ssc);
}
else {
ssc->invlist = anded_cp_list;
ANYOF_POSIXL_ZERO(ssc);
if (and_with_flags & ANYOF_MATCHES_POSIXL) {
ANYOF_POSIXL_OR((regnode_charclass_posixl*) and_with, ssc);
}
}
}
else if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)
|| (and_with_flags & ANYOF_MATCHES_POSIXL))
{
/* One or the other of P1, P2 is non-empty. */
if (and_with_flags & ANYOF_MATCHES_POSIXL) {
ANYOF_POSIXL_AND((regnode_charclass_posixl*) and_with, ssc);
}
ssc_union(ssc, anded_cp_list, FALSE);
}
else { /* P1 = P2 = empty */
ssc_intersection(ssc, anded_cp_list, FALSE);
}
}
}
STATIC void
S_ssc_or(pTHX_ const RExC_state_t *pRExC_state, regnode_ssc *ssc,
const regnode_charclass *or_with)
{
/* Accumulate into SSC 'ssc' its 'OR' with 'or_with', which is either
* another SSC or a regular ANYOF class. Can create false positives if
* 'or_with' is to be inverted. */
SV* ored_cp_list;
U8 ored_flags;
U8 or_with_flags = (OP(or_with) == ANYOFH) ? 0 : ANYOF_FLAGS(or_with);
PERL_ARGS_ASSERT_SSC_OR;
assert(is_ANYOF_SYNTHETIC(ssc));
/* 'or_with' is used as-is if it too is an SSC; otherwise have to extract
* the code point inversion list and just the relevant flags */
if (is_ANYOF_SYNTHETIC(or_with)) {
ored_cp_list = ((regnode_ssc*) or_with)->invlist;
ored_flags = or_with_flags;
}
else {
ored_cp_list = get_ANYOF_cp_list_for_ssc(pRExC_state, or_with);
ored_flags = or_with_flags & ANYOF_COMMON_FLAGS;
if (OP(or_with) != ANYOFD) {
ored_flags
|= or_with_flags
& ( ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER
|ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP);
if (ANYOFL_UTF8_LOCALE_REQD(or_with_flags)) {
ored_flags |=
ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD;
}
}
}
ANYOF_FLAGS(ssc) |= ored_flags;
/* Below, C1 is the list of code points in 'ssc'; P1, its posix classes.
* C2 is the list of code points in 'or-with'; P2, its posix classes.
* 'or_with' may be inverted. When not inverted, we have the simple
* situation of computing:
* (C1 | P1) | (C2 | P2) = (C1 | C2) | (P1 | P2)
* If P1|P2 yields a situation with both a class and its complement are
* set, like having both \w and \W, this matches all code points, and we
* can delete these from the P component of the ssc going forward. XXX We
* might be able to delete all the P components, but I (khw) am not certain
* about this, and it is better to be safe.
*
* Inverted, we have
* (C1 | P1) | ~(C2 | P2) = (C1 | P1) | (~C2 & ~P2)
* <= (C1 | P1) | ~C2
* <= (C1 | ~C2) | P1
* (which results in actually simpler code than the non-inverted case)
* */
if ((or_with_flags & ANYOF_INVERT)
&& ! is_ANYOF_SYNTHETIC(or_with))
{
/* We ignore P2, leaving P1 going forward */
} /* else Not inverted */
else if (or_with_flags & ANYOF_MATCHES_POSIXL) {
ANYOF_POSIXL_OR((regnode_charclass_posixl*)or_with, ssc);
if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) {
unsigned int i;
for (i = 0; i < ANYOF_MAX; i += 2) {
if (ANYOF_POSIXL_TEST(ssc, i) && ANYOF_POSIXL_TEST(ssc, i + 1))
{
ssc_match_all_cp(ssc);
ANYOF_POSIXL_CLEAR(ssc, i);
ANYOF_POSIXL_CLEAR(ssc, i+1);
}
}
}
}
ssc_union(ssc,
ored_cp_list,
FALSE /* Already has been inverted */
);
}
PERL_STATIC_INLINE void
S_ssc_union(pTHX_ regnode_ssc *ssc, SV* const invlist, const bool invert2nd)
{
PERL_ARGS_ASSERT_SSC_UNION;
assert(is_ANYOF_SYNTHETIC(ssc));
_invlist_union_maybe_complement_2nd(ssc->invlist,
invlist,
invert2nd,
&ssc->invlist);
}
PERL_STATIC_INLINE void
S_ssc_intersection(pTHX_ regnode_ssc *ssc,
SV* const invlist,
const bool invert2nd)
{
PERL_ARGS_ASSERT_SSC_INTERSECTION;
assert(is_ANYOF_SYNTHETIC(ssc));
_invlist_intersection_maybe_complement_2nd(ssc->invlist,
invlist,
invert2nd,
&ssc->invlist);
}
PERL_STATIC_INLINE void
S_ssc_add_range(pTHX_ regnode_ssc *ssc, const UV start, const UV end)
{
PERL_ARGS_ASSERT_SSC_ADD_RANGE;
assert(is_ANYOF_SYNTHETIC(ssc));
ssc->invlist = _add_range_to_invlist(ssc->invlist, start, end);
}
PERL_STATIC_INLINE void
S_ssc_cp_and(pTHX_ regnode_ssc *ssc, const UV cp)
{
/* AND just the single code point 'cp' into the SSC 'ssc' */
SV* cp_list = _new_invlist(2);
PERL_ARGS_ASSERT_SSC_CP_AND;
assert(is_ANYOF_SYNTHETIC(ssc));
cp_list = add_cp_to_invlist(cp_list, cp);
ssc_intersection(ssc, cp_list,
FALSE /* Not inverted */
);
SvREFCNT_dec_NN(cp_list);
}
PERL_STATIC_INLINE void
S_ssc_clear_locale(regnode_ssc *ssc)
{
/* Set the SSC 'ssc' to not match any locale things */
PERL_ARGS_ASSERT_SSC_CLEAR_LOCALE;
assert(is_ANYOF_SYNTHETIC(ssc));
ANYOF_POSIXL_ZERO(ssc);
ANYOF_FLAGS(ssc) &= ~ANYOF_LOCALE_FLAGS;
}
#define NON_OTHER_COUNT NON_OTHER_COUNT_FOR_USE_ONLY_BY_REGCOMP_DOT_C
STATIC bool
S_is_ssc_worth_it(const RExC_state_t * pRExC_state, const regnode_ssc * ssc)
{
/* The synthetic start class is used to hopefully quickly winnow down
* places where a pattern could start a match in the target string. If it
* doesn't really narrow things down that much, there isn't much point to
* having the overhead of using it. This function uses some very crude
* heuristics to decide if to use the ssc or not.
*
* It returns TRUE if 'ssc' rules out more than half what it considers to
* be the "likely" possible matches, but of course it doesn't know what the
* actual things being matched are going to be; these are only guesses
*
* For /l matches, it assumes that the only likely matches are going to be
* in the 0-255 range, uniformly distributed, so half of that is 127
* For /a and /d matches, it assumes that the likely matches will be just
* the ASCII range, so half of that is 63
* For /u and there isn't anything matching above the Latin1 range, it
* assumes that that is the only range likely to be matched, and uses
* half that as the cut-off: 127. If anything matches above Latin1,
* it assumes that all of Unicode could match (uniformly), except for
* non-Unicode code points and things in the General Category "Other"
* (unassigned, private use, surrogates, controls and formats). This
* is a much large number. */
U32 count = 0; /* Running total of number of code points matched by
'ssc' */
UV start, end; /* Start and end points of current range in inversion
XXX outdated. UTF-8 locales are common, what about invert? list */
const U32 max_code_points = (LOC)
? 256
: (( ! UNI_SEMANTICS
|| invlist_highest(ssc->invlist) < 256)
? 128
: NON_OTHER_COUNT);
const U32 max_match = max_code_points / 2;
PERL_ARGS_ASSERT_IS_SSC_WORTH_IT;
invlist_iterinit(ssc->invlist);
while (invlist_iternext(ssc->invlist, &start, &end)) {
if (start >= max_code_points) {
break;
}
end = MIN(end, max_code_points - 1);
count += end - start + 1;
if (count >= max_match) {
invlist_iterfinish(ssc->invlist);
return FALSE;
}
}
return TRUE;
}
STATIC void
S_ssc_finalize(pTHX_ RExC_state_t *pRExC_state, regnode_ssc *ssc)
{
/* The inversion list in the SSC is marked mortal; now we need a more
* permanent copy, which is stored the same way that is done in a regular
* ANYOF node, with the first NUM_ANYOF_CODE_POINTS code points in a bit
* map */
SV* invlist = invlist_clone(ssc->invlist, NULL);
PERL_ARGS_ASSERT_SSC_FINALIZE;
assert(is_ANYOF_SYNTHETIC(ssc));
/* The code in this file assumes that all but these flags aren't relevant
* to the SSC, except SSC_MATCHES_EMPTY_STRING, which should be cleared
* by the time we reach here */
assert(! (ANYOF_FLAGS(ssc)
& ~( ANYOF_COMMON_FLAGS
|ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER
|ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP)));
populate_ANYOF_from_invlist( (regnode *) ssc, &invlist);
set_ANYOF_arg(pRExC_state, (regnode *) ssc, invlist, NULL, NULL);
/* Make sure is clone-safe */
ssc->invlist = NULL;
if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) {
ANYOF_FLAGS(ssc) |= ANYOF_MATCHES_POSIXL;
OP(ssc) = ANYOFPOSIXL;
}
else if (RExC_contains_locale) {
OP(ssc) = ANYOFL;
}
assert(! (ANYOF_FLAGS(ssc) & ANYOF_LOCALE_FLAGS) || RExC_contains_locale);
}
#define TRIE_LIST_ITEM(state,idx) (trie->states[state].trans.list)[ idx ]
#define TRIE_LIST_CUR(state) ( TRIE_LIST_ITEM( state, 0 ).forid )
#define TRIE_LIST_LEN(state) ( TRIE_LIST_ITEM( state, 0 ).newstate )
#define TRIE_LIST_USED(idx) ( trie->states[state].trans.list \
? (TRIE_LIST_CUR( idx ) - 1) \
: 0 )
#ifdef DEBUGGING
/*
dump_trie(trie,widecharmap,revcharmap)
dump_trie_interim_list(trie,widecharmap,revcharmap,next_alloc)
dump_trie_interim_table(trie,widecharmap,revcharmap,next_alloc)
These routines dump out a trie in a somewhat readable format.
The _interim_ variants are used for debugging the interim
tables that are used to generate the final compressed
representation which is what dump_trie expects.
Part of the reason for their existence is to provide a form
of documentation as to how the different representations function.
*/
/*
Dumps the final compressed table form of the trie to Perl_debug_log.
Used for debugging make_trie().
*/
STATIC void
S_dump_trie(pTHX_ const struct _reg_trie_data *trie, HV *widecharmap,
AV *revcharmap, U32 depth)
{
U32 state;
SV *sv=sv_newmortal();
int colwidth= widecharmap ? 6 : 4;
U16 word;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_DUMP_TRIE;
Perl_re_indentf( aTHX_ "Char : %-6s%-6s%-4s ",
depth+1, "Match","Base","Ofs" );
for( state = 0 ; state < trie->uniquecharcount ; state++ ) {
SV ** const tmp = av_fetch( revcharmap, state, 0);
if ( tmp ) {
Perl_re_printf( aTHX_ "%*s",
colwidth,
pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), colwidth,
PL_colors[0], PL_colors[1],
(SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) |
PERL_PV_ESCAPE_FIRSTCHAR
)
);
}
}
Perl_re_printf( aTHX_ "\n");
Perl_re_indentf( aTHX_ "State|-----------------------", depth+1);
for( state = 0 ; state < trie->uniquecharcount ; state++ )
Perl_re_printf( aTHX_ "%.*s", colwidth, "--------");
Perl_re_printf( aTHX_ "\n");
for( state = 1 ; state < trie->statecount ; state++ ) {
const U32 base = trie->states[ state ].trans.base;
Perl_re_indentf( aTHX_ "#%4" UVXf "|", depth+1, (UV)state);
if ( trie->states[ state ].wordnum ) {
Perl_re_printf( aTHX_ " W%4X", trie->states[ state ].wordnum );
} else {
Perl_re_printf( aTHX_ "%6s", "" );
}
Perl_re_printf( aTHX_ " @%4" UVXf " ", (UV)base );
if ( base ) {
U32 ofs = 0;
while( ( base + ofs < trie->uniquecharcount ) ||
( base + ofs - trie->uniquecharcount < trie->lasttrans
&& trie->trans[ base + ofs - trie->uniquecharcount ].check
!= state))
ofs++;
Perl_re_printf( aTHX_ "+%2" UVXf "[ ", (UV)ofs);
for ( ofs = 0 ; ofs < trie->uniquecharcount ; ofs++ ) {
if ( ( base + ofs >= trie->uniquecharcount )
&& ( base + ofs - trie->uniquecharcount
< trie->lasttrans )
&& trie->trans[ base + ofs
- trie->uniquecharcount ].check == state )
{
Perl_re_printf( aTHX_ "%*" UVXf, colwidth,
(UV)trie->trans[ base + ofs - trie->uniquecharcount ].next
);
} else {
Perl_re_printf( aTHX_ "%*s", colwidth," ." );
}
}
Perl_re_printf( aTHX_ "]");
}
Perl_re_printf( aTHX_ "\n" );
}
Perl_re_indentf( aTHX_ "word_info N:(prev,len)=",
depth);
for (word=1; word <= trie->wordcount; word++) {
Perl_re_printf( aTHX_ " %d:(%d,%d)",
(int)word, (int)(trie->wordinfo[word].prev),
(int)(trie->wordinfo[word].len));
}
Perl_re_printf( aTHX_ "\n" );
}
/*
Dumps a fully constructed but uncompressed trie in list form.
List tries normally only are used for construction when the number of
possible chars (trie->uniquecharcount) is very high.
Used for debugging make_trie().
*/
STATIC void
S_dump_trie_interim_list(pTHX_ const struct _reg_trie_data *trie,
HV *widecharmap, AV *revcharmap, U32 next_alloc,
U32 depth)
{
U32 state;
SV *sv=sv_newmortal();
int colwidth= widecharmap ? 6 : 4;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_DUMP_TRIE_INTERIM_LIST;
/* print out the table precompression. */
Perl_re_indentf( aTHX_ "State :Word | Transition Data\n",
depth+1 );
Perl_re_indentf( aTHX_ "%s",
depth+1, "------:-----+-----------------\n" );
for( state=1 ; state < next_alloc ; state ++ ) {
U16 charid;
Perl_re_indentf( aTHX_ " %4" UVXf " :",
depth+1, (UV)state );
if ( ! trie->states[ state ].wordnum ) {
Perl_re_printf( aTHX_ "%5s| ","");
} else {
Perl_re_printf( aTHX_ "W%4x| ",
trie->states[ state ].wordnum
);
}
for( charid = 1 ; charid <= TRIE_LIST_USED( state ) ; charid++ ) {
SV ** const tmp = av_fetch( revcharmap,
TRIE_LIST_ITEM(state, charid).forid, 0);
if ( tmp ) {
Perl_re_printf( aTHX_ "%*s:%3X=%4" UVXf " | ",
colwidth,
pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp),
colwidth,
PL_colors[0], PL_colors[1],
(SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0)
| PERL_PV_ESCAPE_FIRSTCHAR
) ,
TRIE_LIST_ITEM(state, charid).forid,
(UV)TRIE_LIST_ITEM(state, charid).newstate
);
if (!(charid % 10))
Perl_re_printf( aTHX_ "\n%*s| ",
(int)((depth * 2) + 14), "");
}
}
Perl_re_printf( aTHX_ "\n");
}
}
/*
Dumps a fully constructed but uncompressed trie in table form.
This is the normal DFA style state transition table, with a few
twists to facilitate compression later.
Used for debugging make_trie().
*/
STATIC void
S_dump_trie_interim_table(pTHX_ const struct _reg_trie_data *trie,
HV *widecharmap, AV *revcharmap, U32 next_alloc,
U32 depth)
{
U32 state;
U16 charid;
SV *sv=sv_newmortal();
int colwidth= widecharmap ? 6 : 4;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_DUMP_TRIE_INTERIM_TABLE;
/*
print out the table precompression so that we can do a visual check
that they are identical.
*/
Perl_re_indentf( aTHX_ "Char : ", depth+1 );
for( charid = 0 ; charid < trie->uniquecharcount ; charid++ ) {
SV ** const tmp = av_fetch( revcharmap, charid, 0);
if ( tmp ) {
Perl_re_printf( aTHX_ "%*s",
colwidth,
pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), colwidth,
PL_colors[0], PL_colors[1],
(SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) |
PERL_PV_ESCAPE_FIRSTCHAR
)
);
}
}
Perl_re_printf( aTHX_ "\n");
Perl_re_indentf( aTHX_ "State+-", depth+1 );
for( charid=0 ; charid < trie->uniquecharcount ; charid++ ) {
Perl_re_printf( aTHX_ "%.*s", colwidth,"--------");
}
Perl_re_printf( aTHX_ "\n" );
for( state=1 ; state < next_alloc ; state += trie->uniquecharcount ) {
Perl_re_indentf( aTHX_ "%4" UVXf " : ",
depth+1,
(UV)TRIE_NODENUM( state ) );
for( charid = 0 ; charid < trie->uniquecharcount ; charid++ ) {
UV v=(UV)SAFE_TRIE_NODENUM( trie->trans[ state + charid ].next );
if (v)
Perl_re_printf( aTHX_ "%*" UVXf, colwidth, v );
else
Perl_re_printf( aTHX_ "%*s", colwidth, "." );
}
if ( ! trie->states[ TRIE_NODENUM( state ) ].wordnum ) {
Perl_re_printf( aTHX_ " (%4" UVXf ")\n",
(UV)trie->trans[ state ].check );
} else {
Perl_re_printf( aTHX_ " (%4" UVXf ") W%4X\n",
(UV)trie->trans[ state ].check,
trie->states[ TRIE_NODENUM( state ) ].wordnum );
}
}
}
#endif
/* make_trie(startbranch,first,last,tail,word_count,flags,depth)
startbranch: the first branch in the whole branch sequence
first : start branch of sequence of branch-exact nodes.
May be the same as startbranch
last : Thing following the last branch.
May be the same as tail.
tail : item following the branch sequence
count : words in the sequence
flags : currently the OP() type we will be building one of /EXACT(|F|FA|FU|FU_SS|L|FLU8)/
depth : indent depth
Inplace optimizes a sequence of 2 or more Branch-Exact nodes into a TRIE node.
A trie is an N'ary tree where the branches are determined by digital
decomposition of the key. IE, at the root node you look up the 1st character and
follow that branch repeat until you find the end of the branches. Nodes can be
marked as "accepting" meaning they represent a complete word. Eg:
/he|she|his|hers/
would convert into the following structure. Numbers represent states, letters
following numbers represent valid transitions on the letter from that state, if
the number is in square brackets it represents an accepting state, otherwise it
will be in parenthesis.
+-h->+-e->[3]-+-r->(8)-+-s->[9]
| |
| (2)
| |
(1) +-i->(6)-+-s->[7]
|
+-s->(3)-+-h->(4)-+-e->[5]
Accept Word Mapping: 3=>1 (he),5=>2 (she), 7=>3 (his), 9=>4 (hers)
This shows that when matching against the string 'hers' we will begin at state 1
read 'h' and move to state 2, read 'e' and move to state 3 which is accepting,
then read 'r' and go to state 8 followed by 's' which takes us to state 9 which
is also accepting. Thus we know that we can match both 'he' and 'hers' with a
single traverse. We store a mapping from accepting to state to which word was
matched, and then when we have multiple possibilities we try to complete the
rest of the regex in the order in which they occurred in the alternation.
The only prior NFA like behaviour that would be changed by the TRIE support is
the silent ignoring of duplicate alternations which are of the form:
/ (DUPE|DUPE) X? (?{ ... }) Y /x
Thus EVAL blocks following a trie may be called a different number of times with
and without the optimisation. With the optimisations dupes will be silently
ignored. This inconsistent behaviour of EVAL type nodes is well established as
the following demonstrates:
'words'=~/(word|word|word)(?{ print $1 })[xyz]/
which prints out 'word' three times, but
'words'=~/(word|word|word)(?{ print $1 })S/
which doesnt print it out at all. This is due to other optimisations kicking in.
Example of what happens on a structural level:
The regexp /(ac|ad|ab)+/ will produce the following debug output:
1: CURLYM[1] {1,32767}(18)
5: BRANCH(8)
6: EXACT <ac>(16)
8: BRANCH(11)
9: EXACT <ad>(16)
11: BRANCH(14)
12: EXACT <ab>(16)
16: SUCCEED(0)
17: NOTHING(18)
18: END(0)
This would be optimizable with startbranch=5, first=5, last=16, tail=16
and should turn into:
1: CURLYM[1] {1,32767}(18)
5: TRIE(16)
[Words:3 Chars Stored:6 Unique Chars:4 States:5 NCP:1]
<ac>
<ad>
<ab>
16: SUCCEED(0)
17: NOTHING(18)
18: END(0)
Cases where tail != last would be like /(?foo|bar)baz/:
1: BRANCH(4)
2: EXACT <foo>(8)
4: BRANCH(7)
5: EXACT <bar>(8)
7: TAIL(8)
8: EXACT <baz>(10)
10: END(0)
which would be optimizable with startbranch=1, first=1, last=7, tail=8
and would end up looking like:
1: TRIE(8)
[Words:2 Chars Stored:6 Unique Chars:5 States:7 NCP:1]
<foo>
<bar>
7: TAIL(8)
8: EXACT <baz>(10)
10: END(0)
d = uvchr_to_utf8_flags(d, uv, 0);
is the recommended Unicode-aware way of saying
*(d++) = uv;
*/
#define TRIE_STORE_REVCHAR(val) \
STMT_START { \
if (UTF) { \
SV *zlopp = newSV(UTF8_MAXBYTES); \
unsigned char *flrbbbbb = (unsigned char *) SvPVX(zlopp); \
unsigned const char *const kapow = uvchr_to_utf8(flrbbbbb, val); \
SvCUR_set(zlopp, kapow - flrbbbbb); \
SvPOK_on(zlopp); \
SvUTF8_on(zlopp); \
av_push(revcharmap, zlopp); \
} else { \
char ooooff = (char)val; \
av_push(revcharmap, newSVpvn(&ooooff, 1)); \
} \
} STMT_END
/* This gets the next character from the input, folding it if not already
* folded. */
#define TRIE_READ_CHAR STMT_START { \
wordlen++; \
if ( UTF ) { \
/* if it is UTF then it is either already folded, or does not need \
* folding */ \
uvc = valid_utf8_to_uvchr( (const U8*) uc, &len); \
} \
else if (folder == PL_fold_latin1) { \
/* This folder implies Unicode rules, which in the range expressible \
* by not UTF is the lower case, with the two exceptions, one of \
* which should have been taken care of before calling this */ \
assert(*uc != LATIN_SMALL_LETTER_SHARP_S); \
uvc = toLOWER_L1(*uc); \
if (UNLIKELY(uvc == MICRO_SIGN)) uvc = GREEK_SMALL_LETTER_MU; \
len = 1; \
} else { \
/* raw data, will be folded later if needed */ \
uvc = (U32)*uc; \
len = 1; \
} \
} STMT_END
#define TRIE_LIST_PUSH(state,fid,ns) STMT_START { \
if ( TRIE_LIST_CUR( state ) >=TRIE_LIST_LEN( state ) ) { \
U32 ging = TRIE_LIST_LEN( state ) * 2; \
Renew( trie->states[ state ].trans.list, ging, reg_trie_trans_le ); \
TRIE_LIST_LEN( state ) = ging; \
} \
TRIE_LIST_ITEM( state, TRIE_LIST_CUR( state ) ).forid = fid; \
TRIE_LIST_ITEM( state, TRIE_LIST_CUR( state ) ).newstate = ns; \
TRIE_LIST_CUR( state )++; \
} STMT_END
#define TRIE_LIST_NEW(state) STMT_START { \
Newx( trie->states[ state ].trans.list, \
4, reg_trie_trans_le ); \
TRIE_LIST_CUR( state ) = 1; \
TRIE_LIST_LEN( state ) = 4; \
} STMT_END
#define TRIE_HANDLE_WORD(state) STMT_START { \
U16 dupe= trie->states[ state ].wordnum; \
regnode * const noper_next = regnext( noper ); \
\
DEBUG_r({ \
/* store the word for dumping */ \
SV* tmp; \
if (OP(noper) != NOTHING) \
tmp = newSVpvn_utf8(STRING(noper), STR_LEN(noper), UTF); \
else \
tmp = newSVpvn_utf8( "", 0, UTF ); \
av_push( trie_words, tmp ); \
}); \
\
curword++; \
trie->wordinfo[curword].prev = 0; \
trie->wordinfo[curword].len = wordlen; \
trie->wordinfo[curword].accept = state; \
\
if ( noper_next < tail ) { \
if (!trie->jump) \
trie->jump = (U16 *) PerlMemShared_calloc( word_count + 1, \
sizeof(U16) ); \
trie->jump[curword] = (U16)(noper_next - convert); \
if (!jumper) \
jumper = noper_next; \
if (!nextbranch) \
nextbranch= regnext(cur); \
} \
\
if ( dupe ) { \
/* It's a dupe. Pre-insert into the wordinfo[].prev */\
/* chain, so that when the bits of chain are later */\
/* linked together, the dups appear in the chain */\
trie->wordinfo[curword].prev = trie->wordinfo[dupe].prev; \
trie->wordinfo[dupe].prev = curword; \
} else { \
/* we haven't inserted this word yet. */ \
trie->states[ state ].wordnum = curword; \
} \
} STMT_END
#define TRIE_TRANS_STATE(state,base,ucharcount,charid,special) \
( ( base + charid >= ucharcount \
&& base + charid < ubound \
&& state == trie->trans[ base - ucharcount + charid ].check \
&& trie->trans[ base - ucharcount + charid ].next ) \
? trie->trans[ base - ucharcount + charid ].next \
: ( state==1 ? special : 0 ) \
)
#define TRIE_BITMAP_SET_FOLDED(trie, uvc, folder) \
STMT_START { \
TRIE_BITMAP_SET(trie, uvc); \
/* store the folded codepoint */ \
if ( folder ) \
TRIE_BITMAP_SET(trie, folder[(U8) uvc ]); \
\
if ( !UTF ) { \
/* store first byte of utf8 representation of */ \
/* variant codepoints */ \
if (! UVCHR_IS_INVARIANT(uvc)) { \
TRIE_BITMAP_SET(trie, UTF8_TWO_BYTE_HI(uvc)); \
} \
} \
} STMT_END
#define MADE_TRIE 1
#define MADE_JUMP_TRIE 2
#define MADE_EXACT_TRIE 4
STATIC I32
S_make_trie(pTHX_ RExC_state_t *pRExC_state, regnode *startbranch,
regnode *first, regnode *last, regnode *tail,
U32 word_count, U32 flags, U32 depth)
{
/* first pass, loop through and scan words */
reg_trie_data *trie;
HV *widecharmap = NULL;
AV *revcharmap = newAV();
regnode *cur;
STRLEN len = 0;
UV uvc = 0;
U16 curword = 0;
U32 next_alloc = 0;
regnode *jumper = NULL;
regnode *nextbranch = NULL;
regnode *convert = NULL;
U32 *prev_states; /* temp array mapping each state to previous one */
/* we just use folder as a flag in utf8 */
const U8 * folder = NULL;
/* in the below add_data call we are storing either 'tu' or 'tuaa'
* which stands for one trie structure, one hash, optionally followed
* by two arrays */
#ifdef DEBUGGING
const U32 data_slot = add_data( pRExC_state, STR_WITH_LEN("tuaa"));
AV *trie_words = NULL;
/* along with revcharmap, this only used during construction but both are
* useful during debugging so we store them in the struct when debugging.
*/
#else
const U32 data_slot = add_data( pRExC_state, STR_WITH_LEN("tu"));
STRLEN trie_charcount=0;
#endif
SV *re_trie_maxbuff;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_MAKE_TRIE;
#ifndef DEBUGGING
PERL_UNUSED_ARG(depth);
#endif
switch (flags) {
case EXACT: case EXACT_ONLY8: case EXACTL: break;
case EXACTFAA:
case EXACTFUP:
case EXACTFU:
case EXACTFLU8: folder = PL_fold_latin1; break;
case EXACTF: folder = PL_fold; break;
default: Perl_croak( aTHX_ "panic! In trie construction, unknown node type %u %s", (unsigned) flags, PL_reg_name[flags] );
}
trie = (reg_trie_data *) PerlMemShared_calloc( 1, sizeof(reg_trie_data) );
trie->refcount = 1;
trie->startstate = 1;
trie->wordcount = word_count;
RExC_rxi->data->data[ data_slot ] = (void*)trie;
trie->charmap = (U16 *) PerlMemShared_calloc( 256, sizeof(U16) );
if (flags == EXACT || flags == EXACT_ONLY8 || flags == EXACTL)
trie->bitmap = (char *) PerlMemShared_calloc( ANYOF_BITMAP_SIZE, 1 );
trie->wordinfo = (reg_trie_wordinfo *) PerlMemShared_calloc(
trie->wordcount+1, sizeof(reg_trie_wordinfo));
DEBUG_r({
trie_words = newAV();
});
re_trie_maxbuff = get_sv(RE_TRIE_MAXBUF_NAME, GV_ADD);
assert(re_trie_maxbuff);
if (!SvIOK(re_trie_maxbuff)) {
sv_setiv(re_trie_maxbuff, RE_TRIE_MAXBUF_INIT);
}
DEBUG_TRIE_COMPILE_r({
Perl_re_indentf( aTHX_
"make_trie start==%d, first==%d, last==%d, tail==%d depth=%d\n",
depth+1,
REG_NODE_NUM(startbranch), REG_NODE_NUM(first),
REG_NODE_NUM(last), REG_NODE_NUM(tail), (int)depth);
});
/* Find the node we are going to overwrite */
if ( first == startbranch && OP( last ) != BRANCH ) {
/* whole branch chain */
convert = first;
} else {
/* branch sub-chain */
convert = NEXTOPER( first );
}
/* -- First loop and Setup --
We first traverse the branches and scan each word to determine if it
contains widechars, and how many unique chars there are, this is
important as we have to build a table with at least as many columns as we
have unique chars.
We use an array of integers to represent the character codes 0..255
(trie->charmap) and we use a an HV* to store Unicode characters. We use
the native representation of the character value as the key and IV's for
the coded index.
*TODO* If we keep track of how many times each character is used we can
remap the columns so that the table compression later on is more
efficient in terms of memory by ensuring the most common value is in the
middle and the least common are on the outside. IMO this would be better
than a most to least common mapping as theres a decent chance the most
common letter will share a node with the least common, meaning the node
will not be compressible. With a middle is most common approach the worst
case is when we have the least common nodes twice.
*/
for ( cur = first ; cur < last ; cur = regnext( cur ) ) {
regnode *noper = NEXTOPER( cur );
const U8 *uc;
const U8 *e;
int foldlen = 0;
U32 wordlen = 0; /* required init */
STRLEN minchars = 0;
STRLEN maxchars = 0;
bool set_bit = trie->bitmap ? 1 : 0; /*store the first char in the
bitmap?*/
if (OP(noper) == NOTHING) {
/* skip past a NOTHING at the start of an alternation
* eg, /(?:)a|(?:b)/ should be the same as /a|b/
*
* If the next node is not something we are supposed to process
* we will just ignore it due to the condition guarding the
* next block.
*/
regnode *noper_next= regnext(noper);
if (noper_next < tail)
noper= noper_next;
}
if ( noper < tail
&& ( OP(noper) == flags
|| (flags == EXACT && OP(noper) == EXACT_ONLY8)
|| (flags == EXACTFU && ( OP(noper) == EXACTFU_ONLY8
|| OP(noper) == EXACTFUP))))
{
uc= (U8*)STRING(noper);
e= uc + STR_LEN(noper);
} else {
trie->minlen= 0;
continue;
}
if ( set_bit ) { /* bitmap only alloced when !(UTF&&Folding) */
TRIE_BITMAP_SET(trie,*uc); /* store the raw first byte
regardless of encoding */
if (OP( noper ) == EXACTFUP) {
/* false positives are ok, so just set this */
TRIE_BITMAP_SET(trie, LATIN_SMALL_LETTER_SHARP_S);
}
}
for ( ; uc < e ; uc += len ) { /* Look at each char in the current
branch */
TRIE_CHARCOUNT(trie)++;
TRIE_READ_CHAR;
/* TRIE_READ_CHAR returns the current character, or its fold if /i
* is in effect. Under /i, this character can match itself, or
* anything that folds to it. If not under /i, it can match just
* itself. Most folds are 1-1, for example k, K, and KELVIN SIGN
* all fold to k, and all are single characters. But some folds
* expand to more than one character, so for example LATIN SMALL
* LIGATURE FFI folds to the three character sequence 'ffi'. If
* the string beginning at 'uc' is 'ffi', it could be matched by
* three characters, or just by the one ligature character. (It
* could also be matched by two characters: LATIN SMALL LIGATURE FF
* followed by 'i', or by 'f' followed by LATIN SMALL LIGATURE FI).
* (Of course 'I' and/or 'F' instead of 'i' and 'f' can also
* match.) The trie needs to know the minimum and maximum number
* of characters that could match so that it can use size alone to
* quickly reject many match attempts. The max is simple: it is
* the number of folded characters in this branch (since a fold is
* never shorter than what folds to it. */
maxchars++;
/* And the min is equal to the max if not under /i (indicated by
* 'folder' being NULL), or there are no multi-character folds. If
* there is a multi-character fold, the min is incremented just
* once, for the character that folds to the sequence. Each
* character in the sequence needs to be added to the list below of
* characters in the trie, but we count only the first towards the
* min number of characters needed. This is done through the
* variable 'foldlen', which is returned by the macros that look
* for these sequences as the number of bytes the sequence
* occupies. Each time through the loop, we decrement 'foldlen' by
* how many bytes the current char occupies. Only when it reaches
* 0 do we increment 'minchars' or look for another multi-character
* sequence. */
if (folder == NULL) {
minchars++;
}
else if (foldlen > 0) {
foldlen -= (UTF) ? UTF8SKIP(uc) : 1;
}
else {
minchars++;
/* See if *uc is the beginning of a multi-character fold. If
* so, we decrement the length remaining to look at, to account
* for the current character this iteration. (We can use 'uc'
* instead of the fold returned by TRIE_READ_CHAR because for
* non-UTF, the latin1_safe macro is smart enough to account
* for all the unfolded characters, and because for UTF, the
* string will already have been folded earlier in the
* compilation process */
if (UTF) {
if ((foldlen = is_MULTI_CHAR_FOLD_utf8_safe(uc, e))) {
foldlen -= UTF8SKIP(uc);
}
}
else if ((foldlen = is_MULTI_CHAR_FOLD_latin1_safe(uc, e))) {
foldlen--;
}
}
/* The current character (and any potential folds) should be added
* to the possible matching characters for this position in this
* branch */
if ( uvc < 256 ) {
if ( folder ) {
U8 folded= folder[ (U8) uvc ];
if ( !trie->charmap[ folded ] ) {
trie->charmap[ folded ]=( ++trie->uniquecharcount );
TRIE_STORE_REVCHAR( folded );
}
}
if ( !trie->charmap[ uvc ] ) {
trie->charmap[ uvc ]=( ++trie->uniquecharcount );
TRIE_STORE_REVCHAR( uvc );
}
if ( set_bit ) {
/* store the codepoint in the bitmap, and its folded
* equivalent. */
TRIE_BITMAP_SET_FOLDED(trie, uvc, folder);
set_bit = 0; /* We've done our bit :-) */
}
} else {
/* XXX We could come up with the list of code points that fold
* to this using PL_utf8_foldclosures, except not for
* multi-char folds, as there may be multiple combinations
* there that could work, which needs to wait until runtime to
* resolve (The comment about LIGATURE FFI above is such an
* example */
SV** svpp;
if ( !widecharmap )
widecharmap = newHV();
svpp = hv_fetch( widecharmap, (char*)&uvc, sizeof( UV ), 1 );
if ( !svpp )
Perl_croak( aTHX_ "error creating/fetching widecharmap entry for 0x%" UVXf, uvc );
if ( !SvTRUE( *svpp ) ) {
sv_setiv( *svpp, ++trie->uniquecharcount );
TRIE_STORE_REVCHAR(uvc);
}
}
} /* end loop through characters in this branch of the trie */
/* We take the min and max for this branch and combine to find the min
* and max for all branches processed so far */
if( cur == first ) {
trie->minlen = minchars;
trie->maxlen = maxchars;
} else if (minchars < trie->minlen) {
trie->minlen = minchars;
} else if (maxchars > trie->maxlen) {
trie->maxlen = maxchars;
}
} /* end first pass */
DEBUG_TRIE_COMPILE_r(
Perl_re_indentf( aTHX_
"TRIE(%s): W:%d C:%d Uq:%d Min:%d Max:%d\n",
depth+1,
( widecharmap ? "UTF8" : "NATIVE" ), (int)word_count,
(int)TRIE_CHARCOUNT(trie), trie->uniquecharcount,
(int)trie->minlen, (int)trie->maxlen )
);
/*
We now know what we are dealing with in terms of unique chars and
string sizes so we can calculate how much memory a naive
representation using a flat table will take. If it's over a reasonable
limit (as specified by ${^RE_TRIE_MAXBUF}) we use a more memory
conservative but potentially much slower representation using an array
of lists.
At the end we convert both representations into the same compressed
form that will be used in regexec.c for matching with. The latter
is a form that cannot be used to construct with but has memory
properties similar to the list form and access properties similar
to the table form making it both suitable for fast searches and
small enough that its feasable to store for the duration of a program.
See the comment in the code where the compressed table is produced
inplace from the flat tabe representation for an explanation of how
the compression works.
*/
Newx(prev_states, TRIE_CHARCOUNT(trie) + 2, U32);
prev_states[1] = 0;
if ( (IV)( ( TRIE_CHARCOUNT(trie) + 1 ) * trie->uniquecharcount + 1)
> SvIV(re_trie_maxbuff) )
{
/*
Second Pass -- Array Of Lists Representation
Each state will be represented by a list of charid:state records
(reg_trie_trans_le) the first such element holds the CUR and LEN
points of the allocated array. (See defines above).
We build the initial structure using the lists, and then convert
it into the compressed table form which allows faster lookups
(but cant be modified once converted).
*/
STRLEN transcount = 1;
DEBUG_TRIE_COMPILE_MORE_r( Perl_re_indentf( aTHX_ "Compiling trie using list compiler\n",
depth+1));
trie->states = (reg_trie_state *)
PerlMemShared_calloc( TRIE_CHARCOUNT(trie) + 2,
sizeof(reg_trie_state) );
TRIE_LIST_NEW(1);
next_alloc = 2;
for ( cur = first ; cur < last ; cur = regnext( cur ) ) {
regnode *noper = NEXTOPER( cur );
U32 state = 1; /* required init */
U16 charid = 0; /* sanity init */
U32 wordlen = 0; /* required init */
if (OP(noper) == NOTHING) {
regnode *noper_next= regnext(noper);
if (noper_next < tail)
noper= noper_next;
/* we will undo this assignment if noper does not
* point at a trieable type in the else clause of
* the following statement. */
}
if ( noper < tail
&& ( OP(noper) == flags
|| (flags == EXACT && OP(noper) == EXACT_ONLY8)
|| (flags == EXACTFU && ( OP(noper) == EXACTFU_ONLY8
|| OP(noper) == EXACTFUP))))
{
const U8 *uc= (U8*)STRING(noper);
const U8 *e= uc + STR_LEN(noper);
for ( ; uc < e ; uc += len ) {
TRIE_READ_CHAR;
if ( uvc < 256 ) {
charid = trie->charmap[ uvc ];
} else {
SV** const svpp = hv_fetch( widecharmap,
(char*)&uvc,
sizeof( UV ),
0);
if ( !svpp ) {
charid = 0;
} else {
charid=(U16)SvIV( *svpp );
}
}
/* charid is now 0 if we dont know the char read, or
* nonzero if we do */
if ( charid ) {
U16 check;
U32 newstate = 0;
charid--;
if ( !trie->states[ state ].trans.list ) {
TRIE_LIST_NEW( state );
}
for ( check = 1;
check <= TRIE_LIST_USED( state );
check++ )
{
if ( TRIE_LIST_ITEM( state, check ).forid
== charid )
{
newstate = TRIE_LIST_ITEM( state, check ).newstate;
break;
}
}
if ( ! newstate ) {
newstate = next_alloc++;
prev_states[newstate] = state;
TRIE_LIST_PUSH( state, charid, newstate );
transcount++;
}
state = newstate;
} else {
Perl_croak( aTHX_ "panic! In trie construction, no char mapping for %" IVdf, uvc );
}
}
} else {
/* If we end up here it is because we skipped past a NOTHING, but did not end up
* on a trieable type. So we need to reset noper back to point at the first regop
* in the branch before we call TRIE_HANDLE_WORD()
*/
noper= NEXTOPER(cur);
}
TRIE_HANDLE_WORD(state);
} /* end second pass */
/* next alloc is the NEXT state to be allocated */
trie->statecount = next_alloc;
trie->states = (reg_trie_state *)
PerlMemShared_realloc( trie->states,
next_alloc
* sizeof(reg_trie_state) );
/* and now dump it out before we compress it */
DEBUG_TRIE_COMPILE_MORE_r(dump_trie_interim_list(trie, widecharmap,
revcharmap, next_alloc,
depth+1)
);
trie->trans = (reg_trie_trans *)
PerlMemShared_calloc( transcount, sizeof(reg_trie_trans) );
{
U32 state;
U32 tp = 0;
U32 zp = 0;
for( state=1 ; state < next_alloc ; state ++ ) {
U32 base=0;
/*
DEBUG_TRIE_COMPILE_MORE_r(
Perl_re_printf( aTHX_ "tp: %d zp: %d ",tp,zp)
);
*/
if (trie->states[state].trans.list) {
U16 minid=TRIE_LIST_ITEM( state, 1).forid;
U16 maxid=minid;
U16 idx;
for( idx = 2 ; idx <= TRIE_LIST_USED( state ) ; idx++ ) {
const U16 forid = TRIE_LIST_ITEM( state, idx).forid;
if ( forid < minid ) {
minid=forid;
} else if ( forid > maxid ) {
maxid=forid;
}
}
if ( transcount < tp + maxid - minid + 1) {
transcount *= 2;
trie->trans = (reg_trie_trans *)
PerlMemShared_realloc( trie->trans,
transcount
* sizeof(reg_trie_trans) );
Zero( trie->trans + (transcount / 2),
transcount / 2,
reg_trie_trans );
}
base = trie->uniquecharcount + tp - minid;
if ( maxid == minid ) {
U32 set = 0;
for ( ; zp < tp ; zp++ ) {
if ( ! trie->trans[ zp ].next ) {
base = trie->uniquecharcount + zp - minid;
trie->trans[ zp ].next = TRIE_LIST_ITEM( state,
1).newstate;
trie->trans[ zp ].check = state;
set = 1;
break;
}
}
if ( !set ) {
trie->trans[ tp ].next = TRIE_LIST_ITEM( state,
1).newstate;
trie->trans[ tp ].check = state;
tp++;
zp = tp;
}
} else {
for ( idx=1; idx <= TRIE_LIST_USED( state ) ; idx++ ) {
const U32 tid = base
- trie->uniquecharcount
+ TRIE_LIST_ITEM( state, idx ).forid;
trie->trans[ tid ].next = TRIE_LIST_ITEM( state,
idx ).newstate;
trie->trans[ tid ].check = state;
}
tp += ( maxid - minid + 1 );
}
Safefree(trie->states[ state ].trans.list);
}
/*
DEBUG_TRIE_COMPILE_MORE_r(
Perl_re_printf( aTHX_ " base: %d\n",base);
);
*/
trie->states[ state ].trans.base=base;
}
trie->lasttrans = tp + 1;
}
} else {
/*
Second Pass -- Flat Table Representation.
we dont use the 0 slot of either trans[] or states[] so we add 1 to
each. We know that we will need Charcount+1 trans at most to store
the data (one row per char at worst case) So we preallocate both
structures assuming worst case.
We then construct the trie using only the .next slots of the entry
structs.
We use the .check field of the first entry of the node temporarily
to make compression both faster and easier by keeping track of how
many non zero fields are in the node.
Since trans are numbered from 1 any 0 pointer in the table is a FAIL
transition.
There are two terms at use here: state as a TRIE_NODEIDX() which is
a number representing the first entry of the node, and state as a
TRIE_NODENUM() which is the trans number. state 1 is TRIE_NODEIDX(1)
and TRIE_NODENUM(1), state 2 is TRIE_NODEIDX(2) and TRIE_NODENUM(3)
if there are 2 entrys per node. eg:
A B A B
1. 2 4 1. 3 7
2. 0 3 3. 0 5
3. 0 0 5. 0 0
4. 0 0 7. 0 0
The table is internally in the right hand, idx form. However as we
also have to deal with the states array which is indexed by nodenum
we have to use TRIE_NODENUM() to convert.
*/
DEBUG_TRIE_COMPILE_MORE_r( Perl_re_indentf( aTHX_ "Compiling trie using table compiler\n",
depth+1));
trie->trans = (reg_trie_trans *)
PerlMemShared_calloc( ( TRIE_CHARCOUNT(trie) + 1 )
* trie->uniquecharcount + 1,
sizeof(reg_trie_trans) );
trie->states = (reg_trie_state *)
PerlMemShared_calloc( TRIE_CHARCOUNT(trie) + 2,
sizeof(reg_trie_state) );
next_alloc = trie->uniquecharcount + 1;
for ( cur = first ; cur < last ; cur = regnext( cur ) ) {
regnode *noper = NEXTOPER( cur );
U32 state = 1; /* required init */
U16 charid = 0; /* sanity init */
U32 accept_state = 0; /* sanity init */
U32 wordlen = 0; /* required init */
if (OP(noper) == NOTHING) {
regnode *noper_next= regnext(noper);
if (noper_next < tail)
noper= noper_next;
/* we will undo this assignment if noper does not
* point at a trieable type in the else clause of
* the following statement. */
}
if ( noper < tail
&& ( OP(noper) == flags
|| (flags == EXACT && OP(noper) == EXACT_ONLY8)
|| (flags == EXACTFU && ( OP(noper) == EXACTFU_ONLY8
|| OP(noper) == EXACTFUP))))
{
const U8 *uc= (U8*)STRING(noper);
const U8 *e= uc + STR_LEN(noper);
for ( ; uc < e ; uc += len ) {
TRIE_READ_CHAR;
if ( uvc < 256 ) {
charid = trie->charmap[ uvc ];
} else {
SV* const * const svpp = hv_fetch( widecharmap,
(char*)&uvc,
sizeof( UV ),
0);
charid = svpp ? (U16)SvIV(*svpp) : 0;
}
if ( charid ) {
charid--;
if ( !trie->trans[ state + charid ].next ) {
trie->trans[ state + charid ].next = next_alloc;
trie->trans[ state ].check++;
prev_states[TRIE_NODENUM(next_alloc)]
= TRIE_NODENUM(state);
next_alloc += trie->uniquecharcount;
}
state = trie->trans[ state + charid ].next;
} else {
Perl_croak( aTHX_ "panic! In trie construction, no char mapping for %" IVdf, uvc );
}
/* charid is now 0 if we dont know the char read, or
* nonzero if we do */
}
} else {
/* If we end up here it is because we skipped past a NOTHING, but did not end up
* on a trieable type. So we need to reset noper back to point at the first regop
* in the branch before we call TRIE_HANDLE_WORD().
*/
noper= NEXTOPER(cur);
}
accept_state = TRIE_NODENUM( state );
TRIE_HANDLE_WORD(accept_state);
} /* end second pass */
/* and now dump it out before we compress it */
DEBUG_TRIE_COMPILE_MORE_r(dump_trie_interim_table(trie, widecharmap,
revcharmap,
next_alloc, depth+1));
{
/*
* Inplace compress the table.*
For sparse data sets the table constructed by the trie algorithm will
be mostly 0/FAIL transitions or to put it another way mostly empty.
(Note that leaf nodes will not contain any transitions.)
This algorithm compresses the tables by eliminating most such
transitions, at the cost of a modest bit of extra work during lookup:
- Each states[] entry contains a .base field which indicates the
index in the state[] array wheres its transition data is stored.
- If .base is 0 there are no valid transitions from that node.
- If .base is nonzero then charid is added to it to find an entry in
the trans array.
-If trans[states[state].base+charid].check!=state then the
transition is taken to be a 0/Fail transition. Thus if there are fail
transitions at the front of the node then the .base offset will point
somewhere inside the previous nodes data (or maybe even into a node
even earlier), but the .check field determines if the transition is
valid.
XXX - wrong maybe?
The following process inplace converts the table to the compressed
table: We first do not compress the root node 1,and mark all its
.check pointers as 1 and set its .base pointer as 1 as well. This
allows us to do a DFA construction from the compressed table later,
and ensures that any .base pointers we calculate later are greater
than 0.
- We set 'pos' to indicate the first entry of the second node.
- We then iterate over the columns of the node, finding the first and
last used entry at l and m. We then copy l..m into pos..(pos+m-l),
and set the .check pointers accordingly, and advance pos
appropriately and repreat for the next node. Note that when we copy
the next pointers we have to convert them from the original
NODEIDX form to NODENUM form as the former is not valid post
compression.
- If a node has no transitions used we mark its base as 0 and do not
advance the pos pointer.
- If a node only has one transition we use a second pointer into the
structure to fill in allocated fail transitions from other states.
This pointer is independent of the main pointer and scans forward
looking for null transitions that are allocated to a state. When it
finds one it writes the single transition into the "hole". If the
pointer doesnt find one the single transition is appended as normal.
- Once compressed we can Renew/realloc the structures to release the
excess space.
See "Table-Compression Methods" in sec 3.9 of the Red Dragon,
specifically Fig 3.47 and the associated pseudocode.
demq
*/
const U32 laststate = TRIE_NODENUM( next_alloc );
U32 state, charid;
U32 pos = 0, zp=0;
trie->statecount = laststate;
for ( state = 1 ; state < laststate ; state++ ) {
U8 flag = 0;
const U32 stateidx = TRIE_NODEIDX( state );
const U32 o_used = trie->trans[ stateidx ].check;
U32 used = trie->trans[ stateidx ].check;
trie->trans[ stateidx ].check = 0;
for ( charid = 0;
used && charid < trie->uniquecharcount;
charid++ )
{
if ( flag || trie->trans[ stateidx + charid ].next ) {
if ( trie->trans[ stateidx + charid ].next ) {
if (o_used == 1) {
for ( ; zp < pos ; zp++ ) {
if ( ! trie->trans[ zp ].next ) {
break;
}
}
trie->states[ state ].trans.base
= zp
+ trie->uniquecharcount
- charid ;
trie->trans[ zp ].next
= SAFE_TRIE_NODENUM( trie->trans[ stateidx
+ charid ].next );
trie->trans[ zp ].check = state;
if ( ++zp > pos ) pos = zp;
break;
}
used--;
}
if ( !flag ) {
flag = 1;
trie->states[ state ].trans.base
= pos + trie->uniquecharcount - charid ;
}
trie->trans[ pos ].next
= SAFE_TRIE_NODENUM(
trie->trans[ stateidx + charid ].next );
trie->trans[ pos ].check = state;
pos++;
}
}
}
trie->lasttrans = pos + 1;
trie->states = (reg_trie_state *)
PerlMemShared_realloc( trie->states, laststate
* sizeof(reg_trie_state) );
DEBUG_TRIE_COMPILE_MORE_r(
Perl_re_indentf( aTHX_ "Alloc: %d Orig: %" IVdf " elements, Final:%" IVdf ". Savings of %%%5.2f\n",
depth+1,
(int)( ( TRIE_CHARCOUNT(trie) + 1 ) * trie->uniquecharcount
+ 1 ),
(IV)next_alloc,
(IV)pos,
( ( next_alloc - pos ) * 100 ) / (double)next_alloc );
);
} /* end table compress */
}
DEBUG_TRIE_COMPILE_MORE_r(
Perl_re_indentf( aTHX_ "Statecount:%" UVxf " Lasttrans:%" UVxf "\n",
depth+1,
(UV)trie->statecount,
(UV)trie->lasttrans)
);
/* resize the trans array to remove unused space */
trie->trans = (reg_trie_trans *)
PerlMemShared_realloc( trie->trans, trie->lasttrans
* sizeof(reg_trie_trans) );
{ /* Modify the program and insert the new TRIE node */
U8 nodetype =(U8)(flags & 0xFF);
char *str=NULL;
#ifdef DEBUGGING
regnode *optimize = NULL;
#ifdef RE_TRACK_PATTERN_OFFSETS
U32 mjd_offset = 0;
U32 mjd_nodelen = 0;
#endif /* RE_TRACK_PATTERN_OFFSETS */
#endif /* DEBUGGING */
/*
This means we convert either the first branch or the first Exact,
depending on whether the thing following (in 'last') is a branch
or not and whther first is the startbranch (ie is it a sub part of
the alternation or is it the whole thing.)
Assuming its a sub part we convert the EXACT otherwise we convert
the whole branch sequence, including the first.
*/
/* Find the node we are going to overwrite */
if ( first != startbranch || OP( last ) == BRANCH ) {
/* branch sub-chain */
NEXT_OFF( first ) = (U16)(last - first);
#ifdef RE_TRACK_PATTERN_OFFSETS
DEBUG_r({
mjd_offset= Node_Offset((convert));
mjd_nodelen= Node_Length((convert));
});
#endif
/* whole branch chain */
}
#ifdef RE_TRACK_PATTERN_OFFSETS
else {
DEBUG_r({
const regnode *nop = NEXTOPER( convert );
mjd_offset= Node_Offset((nop));
mjd_nodelen= Node_Length((nop));
});
}
DEBUG_OPTIMISE_r(
Perl_re_indentf( aTHX_ "MJD offset:%" UVuf " MJD length:%" UVuf "\n",
depth+1,
(UV)mjd_offset, (UV)mjd_nodelen)
);
#endif
/* But first we check to see if there is a common prefix we can
split out as an EXACT and put in front of the TRIE node. */
trie->startstate= 1;
if ( trie->bitmap && !widecharmap && !trie->jump ) {
/* we want to find the first state that has more than
* one transition, if that state is not the first state
* then we have a common prefix which we can remove.
*/
U32 state;
for ( state = 1 ; state < trie->statecount-1 ; state++ ) {
U32 ofs = 0;
I32 first_ofs = -1; /* keeps track of the ofs of the first
transition, -1 means none */
U32 count = 0;
const U32 base = trie->states[ state ].trans.base;
/* does this state terminate an alternation? */
if ( trie->states[state].wordnum )
count = 1;
for ( ofs = 0 ; ofs < trie->uniquecharcount ; ofs++ ) {
if ( ( base + ofs >= trie->uniquecharcount ) &&
( base + ofs - trie->uniquecharcount < trie->lasttrans ) &&
trie->trans[ base + ofs - trie->uniquecharcount ].check == state )
{
if ( ++count > 1 ) {
/* we have more than one transition */
SV **tmp;
U8 *ch;
/* if this is the first state there is no common prefix
* to extract, so we can exit */
if ( state == 1 ) break;
tmp = av_fetch( revcharmap, ofs, 0);
ch = (U8*)SvPV_nolen_const( *tmp );
/* if we are on count 2 then we need to initialize the
* bitmap, and store the previous char if there was one
* in it*/
if ( count == 2 ) {
/* clear the bitmap */
Zero(trie->bitmap, ANYOF_BITMAP_SIZE, char);
DEBUG_OPTIMISE_r(
Perl_re_indentf( aTHX_ "New Start State=%" UVuf " Class: [",
depth+1,
(UV)state));
if (first_ofs >= 0) {
SV ** const tmp = av_fetch( revcharmap, first_ofs, 0);
const U8 * const ch = (U8*)SvPV_nolen_const( *tmp );
TRIE_BITMAP_SET_FOLDED(trie,*ch, folder);
DEBUG_OPTIMISE_r(
Perl_re_printf( aTHX_ "%s", (char*)ch)
);
}
}
/* store the current firstchar in the bitmap */
TRIE_BITMAP_SET_FOLDED(trie,*ch, folder);
DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ "%s", ch));
}
first_ofs = ofs;
}
}
if ( count == 1 ) {
/* This state has only one transition, its transition is part
* of a common prefix - we need to concatenate the char it
* represents to what we have so far. */
SV **tmp = av_fetch( revcharmap, first_ofs, 0);
STRLEN len;
char *ch = SvPV( *tmp, len );
DEBUG_OPTIMISE_r({
SV *sv=sv_newmortal();
Perl_re_indentf( aTHX_ "Prefix State: %" UVuf " Ofs:%" UVuf " Char='%s'\n",
depth+1,
(UV)state, (UV)first_ofs,
pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), 6,
PL_colors[0], PL_colors[1],
(SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) |
PERL_PV_ESCAPE_FIRSTCHAR
)
);
});
if ( state==1 ) {
OP( convert ) = nodetype;
str=STRING(convert);
STR_LEN(convert)=0;
}
STR_LEN(convert) += len;
while (len--)
*str++ = *ch++;
} else {
#ifdef DEBUGGING
if (state>1)
DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ "]\n"));
#endif
break;
}
}
trie->prefixlen = (state-1);
if (str) {
regnode *n = convert+NODE_SZ_STR(convert);
NEXT_OFF(convert) = NODE_SZ_STR(convert);
trie->startstate = state;
trie->minlen -= (state - 1);
trie->maxlen -= (state - 1);
#ifdef DEBUGGING
/* At least the UNICOS C compiler choked on this
* being argument to DEBUG_r(), so let's just have
* it right here. */
if (
#ifdef PERL_EXT_RE_BUILD
1
#else
DEBUG_r_TEST
#endif
) {
regnode *fix = convert;
U32 word = trie->wordcount;
#ifdef RE_TRACK_PATTERN_OFFSETS
mjd_nodelen++;
#endif
Set_Node_Offset_Length(convert, mjd_offset, state - 1);
while( ++fix < n ) {
Set_Node_Offset_Length(fix, 0, 0);
}
while (word--) {
SV ** const tmp = av_fetch( trie_words, word, 0 );
if (tmp) {
if ( STR_LEN(convert) <= SvCUR(*tmp) )
sv_chop(*tmp, SvPV_nolen(*tmp) + STR_LEN(convert));
else
sv_chop(*tmp, SvPV_nolen(*tmp) + SvCUR(*tmp));
}
}
}
#endif
if (trie->maxlen) {
convert = n;
} else {
NEXT_OFF(convert) = (U16)(tail - convert);
DEBUG_r(optimize= n);
}
}
}
if (!jumper)
jumper = last;
if ( trie->maxlen ) {
NEXT_OFF( convert ) = (U16)(tail - convert);
ARG_SET( convert, data_slot );
/* Store the offset to the first unabsorbed branch in
jump[0], which is otherwise unused by the jump logic.
We use this when dumping a trie and during optimisation. */
if (trie->jump)
trie->jump[0] = (U16)(nextbranch - convert);
/* If the start state is not accepting (meaning there is no empty string/NOTHING)
* and there is a bitmap
* and the first "jump target" node we found leaves enough room
* then convert the TRIE node into a TRIEC node, with the bitmap
* embedded inline in the opcode - this is hypothetically faster.
*/
if ( !trie->states[trie->startstate].wordnum
&& trie->bitmap
&& ( (char *)jumper - (char *)convert) >= (int)sizeof(struct regnode_charclass) )
{
OP( convert ) = TRIEC;
Copy(trie->bitmap, ((struct regnode_charclass *)convert)->bitmap, ANYOF_BITMAP_SIZE, char);
PerlMemShared_free(trie->bitmap);
trie->bitmap= NULL;
} else
OP( convert ) = TRIE;
/* store the type in the flags */
convert->flags = nodetype;
DEBUG_r({
optimize = convert
+ NODE_STEP_REGNODE
+ regarglen[ OP( convert ) ];
});
/* XXX We really should free up the resource in trie now,
as we won't use them - (which resources?) dmq */
}
/* needed for dumping*/
DEBUG_r(if (optimize) {
regnode *opt = convert;
while ( ++opt < optimize) {
Set_Node_Offset_Length(opt, 0, 0);
}
/*
Try to clean up some of the debris left after the
optimisation.
*/
while( optimize < jumper ) {
Track_Code( mjd_nodelen += Node_Length((optimize)); );
OP( optimize ) = OPTIMIZED;
Set_Node_Offset_Length(optimize, 0, 0);
optimize++;
}
Set_Node_Offset_Length(convert, mjd_offset, mjd_nodelen);
});
} /* end node insert */
/* Finish populating the prev field of the wordinfo array. Walk back
* from each accept state until we find another accept state, and if
* so, point the first word's .prev field at the second word. If the
* second already has a .prev field set, stop now. This will be the
* case either if we've already processed that word's accept state,
* or that state had multiple words, and the overspill words were
* already linked up earlier.
*/
{
U16 word;
U32 state;
U16 prev;
for (word=1; word <= trie->wordcount; word++) {
prev = 0;
if (trie->wordinfo[word].prev)
continue;
state = trie->wordinfo[word].accept;
while (state) {
state = prev_states[state];
if (!state)
break;
prev = trie->states[state].wordnum;
if (prev)
break;
}
trie->wordinfo[word].prev = prev;
}
Safefree(prev_states);
}
/* and now dump out the compressed format */
DEBUG_TRIE_COMPILE_r(dump_trie(trie, widecharmap, revcharmap, depth+1));
RExC_rxi->data->data[ data_slot + 1 ] = (void*)widecharmap;
#ifdef DEBUGGING
RExC_rxi->data->data[ data_slot + TRIE_WORDS_OFFSET ] = (void*)trie_words;
RExC_rxi->data->data[ data_slot + 3 ] = (void*)revcharmap;
#else
SvREFCNT_dec_NN(revcharmap);
#endif
return trie->jump
? MADE_JUMP_TRIE
: trie->startstate>1
? MADE_EXACT_TRIE
: MADE_TRIE;
}
STATIC regnode *
S_construct_ahocorasick_from_trie(pTHX_ RExC_state_t *pRExC_state, regnode *source, U32 depth)
{
/* The Trie is constructed and compressed now so we can build a fail array if
* it's needed
This is basically the Aho-Corasick algorithm. Its from exercise 3.31 and
3.32 in the
"Red Dragon" -- Compilers, principles, techniques, and tools. Aho, Sethi,
Ullman 1985/88
ISBN 0-201-10088-6
We find the fail state for each state in the trie, this state is the longest
proper suffix of the current state's 'word' that is also a proper prefix of
another word in our trie. State 1 represents the word '' and is thus the
default fail state. This allows the DFA not to have to restart after its
tried and failed a word at a given point, it simply continues as though it
had been matching the other word in the first place.
Consider
'abcdgu'=~/abcdefg|cdgu/
When we get to 'd' we are still matching the first word, we would encounter
'g' which would fail, which would bring us to the state representing 'd' in
the second word where we would try 'g' and succeed, proceeding to match
'cdgu'.
*/
/* add a fail transition */
const U32 trie_offset = ARG(source);
reg_trie_data *trie=(reg_trie_data *)RExC_rxi->data->data[trie_offset];
U32 *q;
const U32 ucharcount = trie->uniquecharcount;
const U32 numstates = trie->statecount;
const U32 ubound = trie->lasttrans + ucharcount;
U32 q_read = 0;
U32 q_write = 0;
U32 charid;
U32 base = trie->states[ 1 ].trans.base;
U32 *fail;
reg_ac_data *aho;
const U32 data_slot = add_data( pRExC_state, STR_WITH_LEN("T"));
regnode *stclass;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_CONSTRUCT_AHOCORASICK_FROM_TRIE;
PERL_UNUSED_CONTEXT;
#ifndef DEBUGGING
PERL_UNUSED_ARG(depth);
#endif
if ( OP(source) == TRIE ) {
struct regnode_1 *op = (struct regnode_1 *)
PerlMemShared_calloc(1, sizeof(struct regnode_1));
StructCopy(source, op, struct regnode_1);
stclass = (regnode *)op;
} else {
struct regnode_charclass *op = (struct regnode_charclass *)
PerlMemShared_calloc(1, sizeof(struct regnode_charclass));
StructCopy(source, op, struct regnode_charclass);
stclass = (regnode *)op;
}
OP(stclass)+=2; /* convert the TRIE type to its AHO-CORASICK equivalent */
ARG_SET( stclass, data_slot );
aho = (reg_ac_data *) PerlMemShared_calloc( 1, sizeof(reg_ac_data) );
RExC_rxi->data->data[ data_slot ] = (void*)aho;
aho->trie=trie_offset;
aho->states=(reg_trie_state *)PerlMemShared_malloc( numstates * sizeof(reg_trie_state) );
Copy( trie->states, aho->states, numstates, reg_trie_state );
Newx( q, numstates, U32);
aho->fail = (U32 *) PerlMemShared_calloc( numstates, sizeof(U32) );
aho->refcount = 1;
fail = aho->fail;
/* initialize fail[0..1] to be 1 so that we always have
a valid final fail state */
fail[ 0 ] = fail[ 1 ] = 1;
for ( charid = 0; charid < ucharcount ; charid++ ) {
const U32 newstate = TRIE_TRANS_STATE( 1, base, ucharcount, charid, 0 );
if ( newstate ) {
q[ q_write ] = newstate;
/* set to point at the root */
fail[ q[ q_write++ ] ]=1;
}
}
while ( q_read < q_write) {
const U32 cur = q[ q_read++ % numstates ];
base = trie->states[ cur ].trans.base;
for ( charid = 0 ; charid < ucharcount ; charid++ ) {
const U32 ch_state = TRIE_TRANS_STATE( cur, base, ucharcount, charid, 1 );
if (ch_state) {
U32 fail_state = cur;
U32 fail_base;
do {
fail_state = fail[ fail_state ];
fail_base = aho->states[ fail_state ].trans.base;
} while ( !TRIE_TRANS_STATE( fail_state, fail_base, ucharcount, charid, 1 ) );
fail_state = TRIE_TRANS_STATE( fail_state, fail_base, ucharcount, charid, 1 );
fail[ ch_state ] = fail_state;
if ( !aho->states[ ch_state ].wordnum && aho->states[ fail_state ].wordnum )
{
aho->states[ ch_state ].wordnum = aho->states[ fail_state ].wordnum;
}
q[ q_write++ % numstates] = ch_state;
}
}
}
/* restore fail[0..1] to 0 so that we "fall out" of the AC loop
when we fail in state 1, this allows us to use the
charclass scan to find a valid start char. This is based on the principle
that theres a good chance the string being searched contains lots of stuff
that cant be a start char.
*/
fail[ 0 ] = fail[ 1 ] = 0;
DEBUG_TRIE_COMPILE_r({
Perl_re_indentf( aTHX_ "Stclass Failtable (%" UVuf " states): 0",
depth, (UV)numstates
);
for( q_read=1; q_read<numstates; q_read++ ) {
Perl_re_printf( aTHX_ ", %" UVuf, (UV)fail[q_read]);
}
Perl_re_printf( aTHX_ "\n");
});
Safefree(q);
/*RExC_seen |= REG_TRIEDFA_SEEN;*/
return stclass;
}
/* The below joins as many adjacent EXACTish nodes as possible into a single
* one. The regop may be changed if the node(s) contain certain sequences that
* require special handling. The joining is only done if:
* 1) there is room in the current conglomerated node to entirely contain the
* next one.
* 2) they are compatible node types
*
* The adjacent nodes actually may be separated by NOTHING-kind nodes, and
* these get optimized out
*
* XXX khw thinks this should be enhanced to fill EXACT (at least) nodes as full
* as possible, even if that means splitting an existing node so that its first
* part is moved to the preceeding node. This would maximise the efficiency of
* memEQ during matching.
*
* If a node is to match under /i (folded), the number of characters it matches
* can be different than its character length if it contains a multi-character
* fold. *min_subtract is set to the total delta number of characters of the
* input nodes.
*
* And *unfolded_multi_char is set to indicate whether or not the node contains
* an unfolded multi-char fold. This happens when it won't be known until
* runtime whether the fold is valid or not; namely
* 1) for EXACTF nodes that contain LATIN SMALL LETTER SHARP S, as only if the
* target string being matched against turns out to be UTF-8 is that fold
* valid; or
* 2) for EXACTFL nodes whose folding rules depend on the locale in force at
* runtime.
* (Multi-char folds whose components are all above the Latin1 range are not
* run-time locale dependent, and have already been folded by the time this
* function is called.)
*
* This is as good a place as any to discuss the design of handling these
* multi-character fold sequences. It's been wrong in Perl for a very long
* time. There are three code points in Unicode whose multi-character folds
* were long ago discovered to mess things up. The previous designs for
* dealing with these involved assigning a special node for them. This
* approach doesn't always work, as evidenced by this example:
* "\xDFs" =~ /s\xDF/ui # Used to fail before these patches
* Both sides fold to "sss", but if the pattern is parsed to create a node that
* would match just the \xDF, it won't be able to handle the case where a
* successful match would have to cross the node's boundary. The new approach
* that hopefully generally solves the problem generates an EXACTFUP node
* that is "sss" in this case.
*
* It turns out that there are problems with all multi-character folds, and not
* just these three. Now the code is general, for all such cases. The
* approach taken is:
* 1) This routine examines each EXACTFish node that could contain multi-
* character folded sequences. Since a single character can fold into
* such a sequence, the minimum match length for this node is less than
* the number of characters in the node. This routine returns in
* *min_subtract how many characters to subtract from the the actual
* length of the string to get a real minimum match length; it is 0 if
* there are no multi-char foldeds. This delta is used by the caller to
* adjust the min length of the match, and the delta between min and max,
* so that the optimizer doesn't reject these possibilities based on size
* constraints.
*
* 2) For the sequence involving the LATIN SMALL LETTER SHARP S (U+00DF)
* under /u, we fold it to 'ss' in regatom(), and in this routine, after
* joining, we scan for occurrences of the sequence 'ss' in non-UTF-8
* EXACTFU nodes. The node type of such nodes is then changed to
* EXACTFUP, indicating it is problematic, and needs careful handling.
* (The procedures in step 1) above are sufficient to handle this case in
* UTF-8 encoded nodes.) The reason this is problematic is that this is
* the only case where there is a possible fold length change in non-UTF-8
* patterns. By reserving a special node type for problematic cases, the
* far more common regular EXACTFU nodes can be processed faster.
* regexec.c takes advantage of this.
*
* EXACTFUP has been created as a grab-bag for (hopefully uncommon)
* problematic cases. These all only occur when the pattern is not
* UTF-8. In addition to the 'ss' sequence where there is a possible fold
* length change, it handles the situation where the string cannot be
* entirely folded. The strings in an EXACTFish node are folded as much
* as possible during compilation in regcomp.c. This saves effort in
* regex matching. By using an EXACTFUP node when it is not possible to
* fully fold at compile time, regexec.c can know that everything in an
* EXACTFU node is folded, so folding can be skipped at runtime. The only
* case where folding in EXACTFU nodes can't be done at compile time is
* the presumably uncommon MICRO SIGN, when the pattern isn't UTF-8. This
* is because its fold requires UTF-8 to represent. Thus EXACTFUP nodes
* handle two very different cases. Alternatively, there could have been
* a node type where there are length changes, one for unfolded, and one
* for both. If yet another special case needed to be created, the number
* of required node types would have to go to 7. khw figures that even
* though there are plenty of node types to spare, that the maintenance
* cost wasn't worth the small speedup of doing it that way, especially
* since he thinks the MICRO SIGN is rarely encountered in practice.
*
* There are other cases where folding isn't done at compile time, but
* none of them are under /u, and hence not for EXACTFU nodes. The folds
* in EXACTFL nodes aren't known until runtime, and vary as the locale
* changes. Some folds in EXACTF depend on if the runtime target string
* is UTF-8 or not. (regatom() will create an EXACTFU node even under /di
* when no fold in it depends on the UTF-8ness of the target string.)
*
* 3) A problem remains for unfolded multi-char folds. (These occur when the
* validity of the fold won't be known until runtime, and so must remain
* unfolded for now. This happens for the sharp s in EXACTF and EXACTFAA
* nodes when the pattern isn't in UTF-8. (Note, BTW, that there cannot
* be an EXACTF node with a UTF-8 pattern.) They also occur for various
* folds in EXACTFL nodes, regardless of the UTF-ness of the pattern.)
* The reason this is a problem is that the optimizer part of regexec.c
* (probably unwittingly, in Perl_regexec_flags()) makes an assumption
* that a character in the pattern corresponds to at most a single
* character in the target string. (And I do mean character, and not byte
* here, unlike other parts of the documentation that have never been
* updated to account for multibyte Unicode.) Sharp s in EXACTF and
* EXACTFL nodes can match the two character string 'ss'; in EXACTFAA
* nodes it can match "\x{17F}\x{17F}". These, along with other ones in
* EXACTFL nodes, violate the assumption, and they are the only instances
* where it is violated. I'm reluctant to try to change the assumption,
* as the code involved is impenetrable to me (khw), so instead the code
* here punts. This routine examines EXACTFL nodes, and (when the pattern
* isn't UTF-8) EXACTF and EXACTFAA for such unfolded folds, and returns a
* boolean indicating whether or not the node contains such a fold. When
* it is true, the caller sets a flag that later causes the optimizer in
* this file to not set values for the floating and fixed string lengths,
* and thus avoids the optimizer code in regexec.c that makes the invalid
* assumption. Thus, there is no optimization based on string lengths for
* EXACTFL nodes that contain these few folds, nor for non-UTF8-pattern
* EXACTF and EXACTFAA nodes that contain the sharp s. (The reason the
* assumption is wrong only in these cases is that all other non-UTF-8
* folds are 1-1; and, for UTF-8 patterns, we pre-fold all other folds to
* their expanded versions. (Again, we can't prefold sharp s to 'ss' in
* EXACTF nodes because we don't know at compile time if it actually
* matches 'ss' or not. For EXACTF nodes it will match iff the target
* string is in UTF-8. This is in contrast to EXACTFU nodes, where it
* always matches; and EXACTFAA where it never does. In an EXACTFAA node
* in a UTF-8 pattern, sharp s is folded to "\x{17F}\x{17F}, avoiding the
* problem; but in a non-UTF8 pattern, folding it to that above-Latin1
* string would require the pattern to be forced into UTF-8, the overhead
* of which we want to avoid. Similarly the unfolded multi-char folds in
* EXACTFL nodes will match iff the locale at the time of match is a UTF-8
* locale.)
*
* Similarly, the code that generates tries doesn't currently handle
* not-already-folded multi-char folds, and it looks like a pain to change
* that. Therefore, trie generation of EXACTFAA nodes with the sharp s
* doesn't work. Instead, such an EXACTFAA is turned into a new regnode,
* EXACTFAA_NO_TRIE, which the trie code knows not to handle. Most people
* using /iaa matching will be doing so almost entirely with ASCII
* strings, so this should rarely be encountered in practice */
#define JOIN_EXACT(scan,min_subtract,unfolded_multi_char, flags) \
if (PL_regkind[OP(scan)] == EXACT) \
join_exact(pRExC_state,(scan),(min_subtract),unfolded_multi_char, (flags), NULL, depth+1)
STATIC U32
S_join_exact(pTHX_ RExC_state_t *pRExC_state, regnode *scan,
UV *min_subtract, bool *unfolded_multi_char,
U32 flags, regnode *val, U32 depth)
{
/* Merge several consecutive EXACTish nodes into one. */
regnode *n = regnext(scan);
U32 stringok = 1;
regnode *next = scan + NODE_SZ_STR(scan);
U32 merged = 0;
U32 stopnow = 0;
#ifdef DEBUGGING
regnode *stop = scan;
GET_RE_DEBUG_FLAGS_DECL;
#else
PERL_UNUSED_ARG(depth);
#endif
PERL_ARGS_ASSERT_JOIN_EXACT;
#ifndef EXPERIMENTAL_INPLACESCAN
PERL_UNUSED_ARG(flags);
PERL_UNUSED_ARG(val);
#endif
DEBUG_PEEP("join", scan, depth, 0);
assert(PL_regkind[OP(scan)] == EXACT);
/* Look through the subsequent nodes in the chain. Skip NOTHING, merge
* EXACT ones that are mergeable to the current one. */
while ( n
&& ( PL_regkind[OP(n)] == NOTHING
|| (stringok && PL_regkind[OP(n)] == EXACT))
&& NEXT_OFF(n)
&& NEXT_OFF(scan) + NEXT_OFF(n) < I16_MAX)
{
if (OP(n) == TAIL || n > next)
stringok = 0;
if (PL_regkind[OP(n)] == NOTHING) {
DEBUG_PEEP("skip:", n, depth, 0);
NEXT_OFF(scan) += NEXT_OFF(n);
next = n + NODE_STEP_REGNODE;
#ifdef DEBUGGING
if (stringok)
stop = n;
#endif
n = regnext(n);
}
else if (stringok) {
const unsigned int oldl = STR_LEN(scan);
regnode * const nnext = regnext(n);
/* XXX I (khw) kind of doubt that this works on platforms (should
* Perl ever run on one) where U8_MAX is above 255 because of lots
* of other assumptions */
/* Don't join if the sum can't fit into a single node */
if (oldl + STR_LEN(n) > U8_MAX)
break;
/* Joining something that requires UTF-8 with something that
* doesn't, means the result requires UTF-8. */
if (OP(scan) == EXACT && (OP(n) == EXACT_ONLY8)) {
OP(scan) = EXACT_ONLY8;
}
else if (OP(scan) == EXACT_ONLY8 && (OP(n) == EXACT)) {
; /* join is compatible, no need to change OP */
}
else if ((OP(scan) == EXACTFU) && (OP(n) == EXACTFU_ONLY8)) {
OP(scan) = EXACTFU_ONLY8;
}
else if ((OP(scan) == EXACTFU_ONLY8) && (OP(n) == EXACTFU)) {
; /* join is compatible, no need to change OP */
}
else if (OP(scan) == EXACTFU && OP(n) == EXACTFU) {
; /* join is compatible, no need to change OP */
}
else if (OP(scan) == EXACTFU && OP(n) == EXACTFU_S_EDGE) {
/* Under /di, temporary EXACTFU_S_EDGE nodes are generated,
* which can join with EXACTFU ones. We check for this case
* here. These need to be resolved to either EXACTFU or
* EXACTF at joining time. They have nothing in them that
* would forbid them from being the more desirable EXACTFU
* nodes except that they begin and/or end with a single [Ss].
* The reason this is problematic is because they could be
* joined in this loop with an adjacent node that ends and/or
* begins with [Ss] which would then form the sequence 'ss',
* which matches differently under /di than /ui, in which case
* EXACTFU can't be used. If the 'ss' sequence doesn't get
* formed, the nodes get absorbed into any adjacent EXACTFU
* node. And if the only adjacent node is EXACTF, they get
* absorbed into that, under the theory that a longer node is
* better than two shorter ones, even if one is EXACTFU. Note
* that EXACTFU_ONLY8 is generated only for UTF-8 patterns,
* and the EXACTFU_S_EDGE ones only for non-UTF-8. */
if (STRING(n)[STR_LEN(n)-1] == 's') {
/* Here the joined node would end with 's'. If the node
* following the combination is an EXACTF one, it's better to
* join this trailing edge 's' node with that one, leaving the
* current one in 'scan' be the more desirable EXACTFU */
if (OP(nnext) == EXACTF) {
break;
}
OP(scan) = EXACTFU_S_EDGE;
} /* Otherwise, the beginning 's' of the 2nd node just
becomes an interior 's' in 'scan' */
}
else if (OP(scan) == EXACTF && OP(n) == EXACTF) {
; /* join is compatible, no need to change OP */
}
else if (OP(scan) == EXACTF && OP(n) == EXACTFU_S_EDGE) {
/* EXACTF nodes are compatible for joining with EXACTFU_S_EDGE
* nodes. But the latter nodes can be also joined with EXACTFU
* ones, and that is a better outcome, so if the node following
* 'n' is EXACTFU, quit now so that those two can be joined
* later */
if (OP(nnext) == EXACTFU) {
break;
}
/* The join is compatible, and the combined node will be
* EXACTF. (These don't care if they begin or end with 's' */
}
else if (OP(scan) == EXACTFU_S_EDGE && OP(n) == EXACTFU_S_EDGE) {
if ( STRING(scan)[STR_LEN(scan)-1] == 's'
&& STRING(n)[0] == 's')
{
/* When combined, we have the sequence 'ss', which means we
* have to remain /di */
OP(scan) = EXACTF;
}
}
else if (OP(scan) == EXACTFU_S_EDGE && OP(n) == EXACTFU) {
if (STRING(n)[0] == 's') {
; /* Here the join is compatible and the combined node
starts with 's', no need to change OP */
}
else { /* Now the trailing 's' is in the interior */
OP(scan) = EXACTFU;
}
}
else if (OP(scan) == EXACTFU_S_EDGE && OP(n) == EXACTF) {
/* The join is compatible, and the combined node will be
* EXACTF. (These don't care if they begin or end with 's' */
OP(scan) = EXACTF;
}
else if (OP(scan) != OP(n)) {
/* The only other compatible joinings are the same node type */
break;
}
DEBUG_PEEP("merg", n, depth, 0);
merged++;
NEXT_OFF(scan) += NEXT_OFF(n);
STR_LEN(scan) += STR_LEN(n);
next = n + NODE_SZ_STR(n);
/* Now we can overwrite *n : */
Move(STRING(n), STRING(scan) + oldl, STR_LEN(n), char);
#ifdef DEBUGGING
stop = next - 1;
#endif
n = nnext;
if (stopnow) break;
}
#ifdef EXPERIMENTAL_INPLACESCAN
if (flags && !NEXT_OFF(n)) {
DEBUG_PEEP("atch", val, depth, 0);
if (reg_off_by_arg[OP(n)]) {
ARG_SET(n, val - n);
}
else {
NEXT_OFF(n) = val - n;
}
stopnow = 1;
}
#endif
}
/* This temporary node can now be turned into EXACTFU, and must, as
* regexec.c doesn't handle it */
if (OP(scan) == EXACTFU_S_EDGE) {
OP(scan) = EXACTFU;
}
*min_subtract = 0;
*unfolded_multi_char = FALSE;
/* Here, all the adjacent mergeable EXACTish nodes have been merged. We
* can now analyze for sequences of problematic code points. (Prior to
* this final joining, sequences could have been split over boundaries, and
* hence missed). The sequences only happen in folding, hence for any
* non-EXACT EXACTish node */
if (OP(scan) != EXACT && OP(scan) != EXACT_ONLY8 && OP(scan) != EXACTL) {
U8* s0 = (U8*) STRING(scan);
U8* s = s0;
U8* s_end = s0 + STR_LEN(scan);
int total_count_delta = 0; /* Total delta number of characters that
multi-char folds expand to */
/* One pass is made over the node's string looking for all the
* possibilities. To avoid some tests in the loop, there are two main
* cases, for UTF-8 patterns (which can't have EXACTF nodes) and
* non-UTF-8 */
if (UTF) {
U8* folded = NULL;
if (OP(scan) == EXACTFL) {
U8 *d;
/* An EXACTFL node would already have been changed to another
* node type unless there is at least one character in it that
* is problematic; likely a character whose fold definition
* won't be known until runtime, and so has yet to be folded.
* For all but the UTF-8 locale, folds are 1-1 in length, but
* to handle the UTF-8 case, we need to create a temporary
* folded copy using UTF-8 locale rules in order to analyze it.
* This is because our macros that look to see if a sequence is
* a multi-char fold assume everything is folded (otherwise the
* tests in those macros would be too complicated and slow).
* Note that here, the non-problematic folds will have already
* been done, so we can just copy such characters. We actually
* don't completely fold the EXACTFL string. We skip the
* unfolded multi-char folds, as that would just create work
* below to figure out the size they already are */
Newx(folded, UTF8_MAX_FOLD_CHAR_EXPAND * STR_LEN(scan) + 1, U8);
d = folded;
while (s < s_end) {
STRLEN s_len = UTF8SKIP(s);
if (! is_PROBLEMATIC_LOCALE_FOLD_utf8(s)) {
Copy(s, d, s_len, U8);
d += s_len;
}
else if (is_FOLDS_TO_MULTI_utf8(s)) {
*unfolded_multi_char = TRUE;
Copy(s, d, s_len, U8);
d += s_len;
}
else if (isASCII(*s)) {
*(d++) = toFOLD(*s);
}
else {
STRLEN len;
_toFOLD_utf8_flags(s, s_end, d, &len, FOLD_FLAGS_FULL);
d += len;
}
s += s_len;
}
/* Point the remainder of the routine to look at our temporary
* folded copy */
s = folded;
s_end = d;
} /* End of creating folded copy of EXACTFL string */
/* Examine the string for a multi-character fold sequence. UTF-8
* patterns have all characters pre-folded by the time this code is
* executed */
while (s < s_end - 1) /* Can stop 1 before the end, as minimum
length sequence we are looking for is 2 */
{
int count = 0; /* How many characters in a multi-char fold */
int len = is_MULTI_CHAR_FOLD_utf8_safe(s, s_end);
if (! len) { /* Not a multi-char fold: get next char */
s += UTF8SKIP(s);
continue;
}
{ /* Here is a generic multi-char fold. */
U8* multi_end = s + len;
/* Count how many characters are in it. In the case of
* /aa, no folds which contain ASCII code points are
* allowed, so check for those, and skip if found. */
if (OP(scan) != EXACTFAA && OP(scan) != EXACTFAA_NO_TRIE) {
count = utf8_length(s, multi_end);
s = multi_end;
}
else {
while (s < multi_end) {
if (isASCII(*s)) {
s++;
goto next_iteration;
}
else {
s += UTF8SKIP(s);
}
count++;
}
}
}
/* The delta is how long the sequence is minus 1 (1 is how long
* the character that folds to the sequence is) */
total_count_delta += count - 1;
next_iteration: ;
}
/* We created a temporary folded copy of the string in EXACTFL
* nodes. Therefore we need to be sure it doesn't go below zero,
* as the real string could be shorter */
if (OP(scan) == EXACTFL) {
int total_chars = utf8_length((U8*) STRING(scan),
(U8*) STRING(scan) + STR_LEN(scan));
if (total_count_delta > total_chars) {
total_count_delta = total_chars;
}
}
*min_subtract += total_count_delta;
Safefree(folded);
}
else if (OP(scan) == EXACTFAA) {
/* Non-UTF-8 pattern, EXACTFAA node. There can't be a multi-char
* fold to the ASCII range (and there are no existing ones in the
* upper latin1 range). But, as outlined in the comments preceding
* this function, we need to flag any occurrences of the sharp s.
* This character forbids trie formation (because of added
* complexity) */
#if UNICODE_MAJOR_VERSION > 3 /* no multifolds in early Unicode */ \
|| (UNICODE_MAJOR_VERSION == 3 && ( UNICODE_DOT_VERSION > 0) \
|| UNICODE_DOT_DOT_VERSION > 0)
while (s < s_end) {
if (*s == LATIN_SMALL_LETTER_SHARP_S) {
OP(scan) = EXACTFAA_NO_TRIE;
*unfolded_multi_char = TRUE;
break;
}
s++;
}
}
else {
/* Non-UTF-8 pattern, not EXACTFAA node. Look for the multi-char
* folds that are all Latin1. As explained in the comments
* preceding this function, we look also for the sharp s in EXACTF
* and EXACTFL nodes; it can be in the final position. Otherwise
* we can stop looking 1 byte earlier because have to find at least
* two characters for a multi-fold */
const U8* upper = (OP(scan) == EXACTF || OP(scan) == EXACTFL)
? s_end
: s_end -1;
while (s < upper) {
int len = is_MULTI_CHAR_FOLD_latin1_safe(s, s_end);
if (! len) { /* Not a multi-char fold. */
if (*s == LATIN_SMALL_LETTER_SHARP_S
&& (OP(scan) == EXACTF || OP(scan) == EXACTFL))
{
*unfolded_multi_char = TRUE;
}
s++;
continue;
}
if (len == 2
&& isALPHA_FOLD_EQ(*s, 's')
&& isALPHA_FOLD_EQ(*(s+1), 's'))
{
/* EXACTF nodes need to know that the minimum length
* changed so that a sharp s in the string can match this
* ss in the pattern, but they remain EXACTF nodes, as they
* won't match this unless the target string is is UTF-8,
* which we don't know until runtime. EXACTFL nodes can't
* transform into EXACTFU nodes */
if (OP(scan) != EXACTF && OP(scan) != EXACTFL) {
OP(scan) = EXACTFUP;
}
}
*min_subtract += len - 1;
s += len;
}
#endif
}
if ( STR_LEN(scan) == 1
&& isALPHA_A(* STRING(scan))
&& ( OP(scan) == EXACTFAA
|| ( OP(scan) == EXACTFU
&& ! HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(* STRING(scan)))))
{
U8 mask = ~ ('A' ^ 'a'); /* These differ in just one bit */
/* Replace a length 1 ASCII fold pair node with an ANYOFM node,
* with the mask set to the complement of the bit that differs
* between upper and lower case, and the lowest code point of the
* pair (which the '&' forces) */
OP(scan) = ANYOFM;
ARG_SET(scan, *STRING(scan) & mask);
FLAGS(scan) = mask;
}
}
#ifdef DEBUGGING
/* Allow dumping but overwriting the collection of skipped
* ops and/or strings with fake optimized ops */
n = scan + NODE_SZ_STR(scan);
while (n <= stop) {
OP(n) = OPTIMIZED;
FLAGS(n) = 0;
NEXT_OFF(n) = 0;
n++;
}
#endif
DEBUG_OPTIMISE_r(if (merged){DEBUG_PEEP("finl", scan, depth, 0);});
return stopnow;
}
/* REx optimizer. Converts nodes into quicker variants "in place".
Finds fixed substrings. */
/* Stops at toplevel WHILEM as well as at "last". At end *scanp is set
to the position after last scanned or to NULL. */
#define INIT_AND_WITHP \
assert(!and_withp); \
Newx(and_withp, 1, regnode_ssc); \
SAVEFREEPV(and_withp)
static void
S_unwind_scan_frames(pTHX_ const void *p)
{
scan_frame *f= (scan_frame *)p;
do {
scan_frame *n= f->next_frame;
Safefree(f);
f= n;
} while (f);
}
/* Follow the next-chain of the current node and optimize away
all the NOTHINGs from it.
*/
STATIC void
S_rck_elide_nothing(pTHX_ regnode *node)
{
dVAR;
PERL_ARGS_ASSERT_RCK_ELIDE_NOTHING;
if (OP(node) != CURLYX) {
const int max = (reg_off_by_arg[OP(node)]
? I32_MAX
/* I32 may be smaller than U16 on CRAYs! */
: (I32_MAX < U16_MAX ? I32_MAX : U16_MAX));
int off = (reg_off_by_arg[OP(node)] ? ARG(node) : NEXT_OFF(node));
int noff;
regnode *n = node;
/* Skip NOTHING and LONGJMP. */
while (
(n = regnext(n))
&& (
(PL_regkind[OP(n)] == NOTHING && (noff = NEXT_OFF(n)))
|| ((OP(n) == LONGJMP) && (noff = ARG(n)))
)
&& off + noff < max
) {
off += noff;
}
if (reg_off_by_arg[OP(node)])
ARG(node) = off;
else
NEXT_OFF(node) = off;
}
return;
}
/* the return from this sub is the minimum length that could possibly match */
STATIC SSize_t
S_study_chunk(pTHX_ RExC_state_t *pRExC_state, regnode **scanp,
SSize_t *minlenp, SSize_t *deltap,
regnode *last,
scan_data_t *data,
I32 stopparen,
U32 recursed_depth,
regnode_ssc *and_withp,
U32 flags, U32 depth, bool was_mutate_ok)
/* scanp: Start here (read-write). */
/* deltap: Write maxlen-minlen here. */
/* last: Stop before this one. */
/* data: string data about the pattern */
/* stopparen: treat close N as END */
/* recursed: which subroutines have we recursed into */
/* and_withp: Valid if flags & SCF_DO_STCLASS_OR */
{
dVAR;
/* There must be at least this number of characters to match */
SSize_t min = 0;
I32 pars = 0, code;
regnode *scan = *scanp, *next;
SSize_t delta = 0;
int is_inf = (flags & SCF_DO_SUBSTR) && (data->flags & SF_IS_INF);
int is_inf_internal = 0; /* The studied chunk is infinite */
I32 is_par = OP(scan) == OPEN ? ARG(scan) : 0;
scan_data_t data_fake;
SV *re_trie_maxbuff = NULL;
regnode *first_non_open = scan;
SSize_t stopmin = SSize_t_MAX;
scan_frame *frame = NULL;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_STUDY_CHUNK;
RExC_study_started= 1;
Zero(&data_fake, 1, scan_data_t);
if ( depth == 0 ) {
while (first_non_open && OP(first_non_open) == OPEN)
first_non_open=regnext(first_non_open);
}
fake_study_recurse:
DEBUG_r(
RExC_study_chunk_recursed_count++;
);
DEBUG_OPTIMISE_MORE_r(
{
Perl_re_indentf( aTHX_ "study_chunk stopparen=%ld recursed_count=%lu depth=%lu recursed_depth=%lu scan=%p last=%p",
depth, (long)stopparen,
(unsigned long)RExC_study_chunk_recursed_count,
(unsigned long)depth, (unsigned long)recursed_depth,
scan,
last);
if (recursed_depth) {
U32 i;
U32 j;
for ( j = 0 ; j < recursed_depth ; j++ ) {
for ( i = 0 ; i < (U32)RExC_total_parens ; i++ ) {
if (
PAREN_TEST(RExC_study_chunk_recursed +
( j * RExC_study_chunk_recursed_bytes), i )
&& (
!j ||
!PAREN_TEST(RExC_study_chunk_recursed +
(( j - 1 ) * RExC_study_chunk_recursed_bytes), i)
)
) {
Perl_re_printf( aTHX_ " %d",(int)i);
break;
}
}
if ( j + 1 < recursed_depth ) {
Perl_re_printf( aTHX_ ",");
}
}
}
Perl_re_printf( aTHX_ "\n");
}
);
while ( scan && OP(scan) != END && scan < last ){
UV min_subtract = 0; /* How mmany chars to subtract from the minimum
node length to get a real minimum (because
the folded version may be shorter) */
bool unfolded_multi_char = FALSE;
/* avoid mutating ops if we are anywhere within the recursed or
* enframed handling for a GOSUB: the outermost level will handle it.
*/
bool mutate_ok = was_mutate_ok && !(frame && frame->in_gosub);
/* Peephole optimizer: */
DEBUG_STUDYDATA("Peep", data, depth, is_inf);
DEBUG_PEEP("Peep", scan, depth, flags);
/* The reason we do this here is that we need to deal with things like
* /(?:f)(?:o)(?:o)/ which cant be dealt with by the normal EXACT
* parsing code, as each (?:..) is handled by a different invocation of
* reg() -- Yves
*/
if (mutate_ok)
JOIN_EXACT(scan,&min_subtract, &unfolded_multi_char, 0);
/* Follow the next-chain of the current node and optimize
away all the NOTHINGs from it.
*/
rck_elide_nothing(scan);
/* The principal pseudo-switch. Cannot be a switch, since we
look into several different things. */
if ( OP(scan) == DEFINEP ) {
SSize_t minlen = 0;
SSize_t deltanext = 0;
SSize_t fake_last_close = 0;
I32 f = SCF_IN_DEFINE;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
scan = regnext(scan);
assert( OP(scan) == IFTHEN );
DEBUG_PEEP("expect IFTHEN", scan, depth, flags);
data_fake.last_closep= &fake_last_close;
minlen = *minlenp;
next = regnext(scan);
scan = NEXTOPER(NEXTOPER(scan));
DEBUG_PEEP("scan", scan, depth, flags);
DEBUG_PEEP("next", next, depth, flags);
/* we suppose the run is continuous, last=next...
* NOTE we dont use the return here! */
/* DEFINEP study_chunk() recursion */
(void)study_chunk(pRExC_state, &scan, &minlen,
&deltanext, next, &data_fake, stopparen,
recursed_depth, NULL, f, depth+1, mutate_ok);
scan = next;
} else
if (
OP(scan) == BRANCH ||
OP(scan) == BRANCHJ ||
OP(scan) == IFTHEN
) {
next = regnext(scan);
code = OP(scan);
/* The op(next)==code check below is to see if we
* have "BRANCH-BRANCH", "BRANCHJ-BRANCHJ", "IFTHEN-IFTHEN"
* IFTHEN is special as it might not appear in pairs.
* Not sure whether BRANCH-BRANCHJ is possible, regardless
* we dont handle it cleanly. */
if (OP(next) == code || code == IFTHEN) {
/* NOTE - There is similar code to this block below for
* handling TRIE nodes on a re-study. If you change stuff here
* check there too. */
SSize_t max1 = 0, min1 = SSize_t_MAX, num = 0;
regnode_ssc accum;
regnode * const startbranch=scan;
if (flags & SCF_DO_SUBSTR) {
/* Cannot merge strings after this. */
scan_commit(pRExC_state, data, minlenp, is_inf);
}
if (flags & SCF_DO_STCLASS)
ssc_init_zero(pRExC_state, &accum);
while (OP(scan) == code) {
SSize_t deltanext, minnext, fake;
I32 f = 0;
regnode_ssc this_class;
DEBUG_PEEP("Branch", scan, depth, flags);
num++;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
if (data) {
data_fake.whilem_c = data->whilem_c;
data_fake.last_closep = data->last_closep;
}
else
data_fake.last_closep = &fake;
data_fake.pos_delta = delta;
next = regnext(scan);
scan = NEXTOPER(scan); /* everything */
if (code != BRANCH) /* everything but BRANCH */
scan = NEXTOPER(scan);
if (flags & SCF_DO_STCLASS) {
ssc_init(pRExC_state, &this_class);
data_fake.start_class = &this_class;
f = SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
/* we suppose the run is continuous, last=next...*/
/* recurse study_chunk() for each BRANCH in an alternation */
minnext = study_chunk(pRExC_state, &scan, minlenp,
&deltanext, next, &data_fake, stopparen,
recursed_depth, NULL, f, depth+1,
mutate_ok);
if (min1 > minnext)
min1 = minnext;
if (deltanext == SSize_t_MAX) {
is_inf = is_inf_internal = 1;
max1 = SSize_t_MAX;
} else if (max1 < minnext + deltanext)
max1 = minnext + deltanext;
scan = next;
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SCF_SEEN_ACCEPT) {
if ( stopmin > minnext)
stopmin = min + min1;
flags &= ~SCF_DO_SUBSTR;
if (data)
data->flags |= SCF_SEEN_ACCEPT;
}
if (data) {
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
}
if (flags & SCF_DO_STCLASS)
ssc_or(pRExC_state, &accum, (regnode_charclass*)&this_class);
}
if (code == IFTHEN && num < 2) /* Empty ELSE branch */
min1 = 0;
if (flags & SCF_DO_SUBSTR) {
data->pos_min += min1;
if (data->pos_delta >= SSize_t_MAX - (max1 - min1))
data->pos_delta = SSize_t_MAX;
else
data->pos_delta += max1 - min1;
if (max1 != min1 || is_inf)
data->cur_is_floating = 1;
}
min += min1;
if (delta == SSize_t_MAX
|| SSize_t_MAX - delta - (max1 - min1) < 0)
delta = SSize_t_MAX;
else
delta += max1 - min1;
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass*) &accum);
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
flags &= ~SCF_DO_STCLASS;
}
}
else if (flags & SCF_DO_STCLASS_AND) {
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &accum);
flags &= ~SCF_DO_STCLASS;
}
else {
/* Switch to OR mode: cache the old value of
* data->start_class */
INIT_AND_WITHP;
StructCopy(data->start_class, and_withp, regnode_ssc);
flags &= ~SCF_DO_STCLASS_AND;
StructCopy(&accum, data->start_class, regnode_ssc);
flags |= SCF_DO_STCLASS_OR;
}
}
if (PERL_ENABLE_TRIE_OPTIMISATION
&& OP(startbranch) == BRANCH
&& mutate_ok
) {
/* demq.
Assuming this was/is a branch we are dealing with: 'scan'
now points at the item that follows the branch sequence,
whatever it is. We now start at the beginning of the
sequence and look for subsequences of
BRANCH->EXACT=>x1
BRANCH->EXACT=>x2
tail
which would be constructed from a pattern like
/A|LIST|OF|WORDS/
If we can find such a subsequence we need to turn the first
element into a trie and then add the subsequent branch exact
strings to the trie.
We have two cases
1. patterns where the whole set of branches can be
converted.
2. patterns where only a subset can be converted.
In case 1 we can replace the whole set with a single regop
for the trie. In case 2 we need to keep the start and end
branches so
'BRANCH EXACT; BRANCH EXACT; BRANCH X'
becomes BRANCH TRIE; BRANCH X;
There is an additional case, that being where there is a
common prefix, which gets split out into an EXACT like node
preceding the TRIE node.
If x(1..n)==tail then we can do a simple trie, if not we make
a "jump" trie, such that when we match the appropriate word
we "jump" to the appropriate tail node. Essentially we turn
a nested if into a case structure of sorts.
*/
int made=0;
if (!re_trie_maxbuff) {
re_trie_maxbuff = get_sv(RE_TRIE_MAXBUF_NAME, 1);
if (!SvIOK(re_trie_maxbuff))
sv_setiv(re_trie_maxbuff, RE_TRIE_MAXBUF_INIT);
}
if ( SvIV(re_trie_maxbuff)>=0 ) {
regnode *cur;
regnode *first = (regnode *)NULL;
regnode *last = (regnode *)NULL;
regnode *tail = scan;
U8 trietype = 0;
U32 count=0;
/* var tail is used because there may be a TAIL
regop in the way. Ie, the exacts will point to the
thing following the TAIL, but the last branch will
point at the TAIL. So we advance tail. If we
have nested (?:) we may have to move through several
tails.
*/
while ( OP( tail ) == TAIL ) {
/* this is the TAIL generated by (?:) */
tail = regnext( tail );
}
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, tail, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "%s %" UVuf ":%s\n",
depth+1,
"Looking for TRIE'able sequences. Tail node is ",
(UV) REGNODE_OFFSET(tail),
SvPV_nolen_const( RExC_mysv )
);
});
/*
Step through the branches
cur represents each branch,
noper is the first thing to be matched as part
of that branch
noper_next is the regnext() of that node.
We normally handle a case like this
/FOO[xyz]|BAR[pqr]/ via a "jump trie" but we also
support building with NOJUMPTRIE, which restricts
the trie logic to structures like /FOO|BAR/.
If noper is a trieable nodetype then the branch is
a possible optimization target. If we are building
under NOJUMPTRIE then we require that noper_next is
the same as scan (our current position in the regex
program).
Once we have two or more consecutive such branches
we can create a trie of the EXACT's contents and
stitch it in place into the program.
If the sequence represents all of the branches in
the alternation we replace the entire thing with a
single TRIE node.
Otherwise when it is a subsequence we need to
stitch it in place and replace only the relevant
branches. This means the first branch has to remain
as it is used by the alternation logic, and its
next pointer, and needs to be repointed at the item
on the branch chain following the last branch we
have optimized away.
This could be either a BRANCH, in which case the
subsequence is internal, or it could be the item
following the branch sequence in which case the
subsequence is at the end (which does not
necessarily mean the first node is the start of the
alternation).
TRIE_TYPE(X) is a define which maps the optype to a
trietype.
optype | trietype
----------------+-----------
NOTHING | NOTHING
EXACT | EXACT
EXACT_ONLY8 | EXACT
EXACTFU | EXACTFU
EXACTFU_ONLY8 | EXACTFU
EXACTFUP | EXACTFU
EXACTFAA | EXACTFAA
EXACTL | EXACTL
EXACTFLU8 | EXACTFLU8
*/
#define TRIE_TYPE(X) ( ( NOTHING == (X) ) \
? NOTHING \
: ( EXACT == (X) || EXACT_ONLY8 == (X) ) \
? EXACT \
: ( EXACTFU == (X) \
|| EXACTFU_ONLY8 == (X) \
|| EXACTFUP == (X) ) \
? EXACTFU \
: ( EXACTFAA == (X) ) \
? EXACTFAA \
: ( EXACTL == (X) ) \
? EXACTL \
: ( EXACTFLU8 == (X) ) \
? EXACTFLU8 \
: 0 )
/* dont use tail as the end marker for this traverse */
for ( cur = startbranch ; cur != scan ; cur = regnext( cur ) ) {
regnode * const noper = NEXTOPER( cur );
U8 noper_type = OP( noper );
U8 noper_trietype = TRIE_TYPE( noper_type );
#if defined(DEBUGGING) || defined(NOJUMPTRIE)
regnode * const noper_next = regnext( noper );
U8 noper_next_type = (noper_next && noper_next < tail) ? OP(noper_next) : 0;
U8 noper_next_trietype = (noper_next && noper_next < tail) ? TRIE_TYPE( noper_next_type ) :0;
#endif
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "- %d:%s (%d)",
depth+1,
REG_NODE_NUM(cur), SvPV_nolen_const( RExC_mysv ), REG_NODE_NUM(cur) );
regprop(RExC_rx, RExC_mysv, noper, NULL, pRExC_state);
Perl_re_printf( aTHX_ " -> %d:%s",
REG_NODE_NUM(noper), SvPV_nolen_const(RExC_mysv));
if ( noper_next ) {
regprop(RExC_rx, RExC_mysv, noper_next, NULL, pRExC_state);
Perl_re_printf( aTHX_ "\t=> %d:%s\t",
REG_NODE_NUM(noper_next), SvPV_nolen_const(RExC_mysv));
}
Perl_re_printf( aTHX_ "(First==%d,Last==%d,Cur==%d,tt==%s,ntt==%s,nntt==%s)\n",
REG_NODE_NUM(first), REG_NODE_NUM(last), REG_NODE_NUM(cur),
PL_reg_name[trietype], PL_reg_name[noper_trietype], PL_reg_name[noper_next_trietype]
);
});
/* Is noper a trieable nodetype that can be merged
* with the current trie (if there is one)? */
if ( noper_trietype
&&
(
( noper_trietype == NOTHING )
|| ( trietype == NOTHING )
|| ( trietype == noper_trietype )
)
#ifdef NOJUMPTRIE
&& noper_next >= tail
#endif
&& count < U16_MAX)
{
/* Handle mergable triable node Either we are
* the first node in a new trieable sequence,
* in which case we do some bookkeeping,
* otherwise we update the end pointer. */
if ( !first ) {
first = cur;
if ( noper_trietype == NOTHING ) {
#if !defined(DEBUGGING) && !defined(NOJUMPTRIE)
regnode * const noper_next = regnext( noper );
U8 noper_next_type = (noper_next && noper_next < tail) ? OP(noper_next) : 0;
U8 noper_next_trietype = noper_next_type ? TRIE_TYPE( noper_next_type ) :0;
#endif
if ( noper_next_trietype ) {
trietype = noper_next_trietype;
} else if (noper_next_type) {
/* a NOTHING regop is 1 regop wide.
* We need at least two for a trie
* so we can't merge this in */
first = NULL;
}
} else {
trietype = noper_trietype;
}
} else {
if ( trietype == NOTHING )
trietype = noper_trietype;
last = cur;
}
if (first)
count++;
} /* end handle mergable triable node */
else {
/* handle unmergable node -
* noper may either be a triable node which can
* not be tried together with the current trie,
* or a non triable node */
if ( last ) {
/* If last is set and trietype is not
* NOTHING then we have found at least two
* triable branch sequences in a row of a
* similar trietype so we can turn them
* into a trie. If/when we allow NOTHING to
* start a trie sequence this condition
* will be required, and it isn't expensive
* so we leave it in for now. */
if ( trietype && trietype != NOTHING )
make_trie( pRExC_state,
startbranch, first, cur, tail,
count, trietype, depth+1 );
last = NULL; /* note: we clear/update
first, trietype etc below,
so we dont do it here */
}
if ( noper_trietype
#ifdef NOJUMPTRIE
&& noper_next >= tail
#endif
){
/* noper is triable, so we can start a new
* trie sequence */
count = 1;
first = cur;
trietype = noper_trietype;
} else if (first) {
/* if we already saw a first but the
* current node is not triable then we have
* to reset the first information. */
count = 0;
first = NULL;
trietype = 0;
}
} /* end handle unmergable node */
} /* loop over branches */
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "- %s (%d) <SCAN FINISHED> ",
depth+1, SvPV_nolen_const( RExC_mysv ), REG_NODE_NUM(cur));
Perl_re_printf( aTHX_ "(First==%d, Last==%d, Cur==%d, tt==%s)\n",
REG_NODE_NUM(first), REG_NODE_NUM(last), REG_NODE_NUM(cur),
PL_reg_name[trietype]
);
});
if ( last && trietype ) {
if ( trietype != NOTHING ) {
/* the last branch of the sequence was part of
* a trie, so we have to construct it here
* outside of the loop */
made= make_trie( pRExC_state, startbranch,
first, scan, tail, count,
trietype, depth+1 );
#ifdef TRIE_STUDY_OPT
if ( ((made == MADE_EXACT_TRIE &&
startbranch == first)
|| ( first_non_open == first )) &&
depth==0 ) {
flags |= SCF_TRIE_RESTUDY;
if ( startbranch == first
&& scan >= tail )
{
RExC_seen &=~REG_TOP_LEVEL_BRANCHES_SEEN;
}
}
#endif
} else {
/* at this point we know whatever we have is a
* NOTHING sequence/branch AND if 'startbranch'
* is 'first' then we can turn the whole thing
* into a NOTHING
*/
if ( startbranch == first ) {
regnode *opt;
/* the entire thing is a NOTHING sequence,
* something like this: (?:|) So we can
* turn it into a plain NOTHING op. */
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "- %s (%d) <NOTHING BRANCH SEQUENCE>\n",
depth+1,
SvPV_nolen_const( RExC_mysv ), REG_NODE_NUM(cur));
});
OP(startbranch)= NOTHING;
NEXT_OFF(startbranch)= tail - startbranch;
for ( opt= startbranch + 1; opt < tail ; opt++ )
OP(opt)= OPTIMIZED;
}
}
} /* end if ( last) */
} /* TRIE_MAXBUF is non zero */
} /* do trie */
}
else if ( code == BRANCHJ ) { /* single branch is optimized. */
scan = NEXTOPER(NEXTOPER(scan));
} else /* single branch is optimized. */
scan = NEXTOPER(scan);
continue;
} else if (OP(scan) == SUSPEND || OP(scan) == GOSUB) {
I32 paren = 0;
regnode *start = NULL;
regnode *end = NULL;
U32 my_recursed_depth= recursed_depth;
if (OP(scan) != SUSPEND) { /* GOSUB */
/* Do setup, note this code has side effects beyond
* the rest of this block. Specifically setting
* RExC_recurse[] must happen at least once during
* study_chunk(). */
paren = ARG(scan);
RExC_recurse[ARG2L(scan)] = scan;
start = REGNODE_p(RExC_open_parens[paren]);
end = REGNODE_p(RExC_close_parens[paren]);
/* NOTE we MUST always execute the above code, even
* if we do nothing with a GOSUB */
if (
( flags & SCF_IN_DEFINE )
||
(
(is_inf_internal || is_inf || (data && data->flags & SF_IS_INF))
&&
( (flags & (SCF_DO_STCLASS | SCF_DO_SUBSTR)) == 0 )
)
) {
/* no need to do anything here if we are in a define. */
/* or we are after some kind of infinite construct
* so we can skip recursing into this item.
* Since it is infinite we will not change the maxlen
* or delta, and if we miss something that might raise
* the minlen it will merely pessimise a little.
*
* Iow /(?(DEFINE)(?<foo>foo|food))a+(?&foo)/
* might result in a minlen of 1 and not of 4,
* but this doesn't make us mismatch, just try a bit
* harder than we should.
* */
scan= regnext(scan);
continue;
}
if (
!recursed_depth
||
!PAREN_TEST(RExC_study_chunk_recursed + ((recursed_depth-1) * RExC_study_chunk_recursed_bytes), paren)
) {
/* it is quite possible that there are more efficient ways
* to do this. We maintain a bitmap per level of recursion
* of which patterns we have entered so we can detect if a
* pattern creates a possible infinite loop. When we
* recurse down a level we copy the previous levels bitmap
* down. When we are at recursion level 0 we zero the top
* level bitmap. It would be nice to implement a different
* more efficient way of doing this. In particular the top
* level bitmap may be unnecessary.
*/
if (!recursed_depth) {
Zero(RExC_study_chunk_recursed, RExC_study_chunk_recursed_bytes, U8);
} else {
Copy(RExC_study_chunk_recursed + ((recursed_depth-1) * RExC_study_chunk_recursed_bytes),
RExC_study_chunk_recursed + (recursed_depth * RExC_study_chunk_recursed_bytes),
RExC_study_chunk_recursed_bytes, U8);
}
/* we havent recursed into this paren yet, so recurse into it */
DEBUG_STUDYDATA("gosub-set", data, depth, is_inf);
PAREN_SET(RExC_study_chunk_recursed + (recursed_depth * RExC_study_chunk_recursed_bytes), paren);
my_recursed_depth= recursed_depth + 1;
} else {
DEBUG_STUDYDATA("gosub-inf", data, depth, is_inf);
/* some form of infinite recursion, assume infinite length
* */
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
data->cur_is_floating = 1;
}
is_inf = is_inf_internal = 1;
if (flags & SCF_DO_STCLASS_OR) /* Allow everything */
ssc_anything(data->start_class);
flags &= ~SCF_DO_STCLASS;
start= NULL; /* reset start so we dont recurse later on. */
}
} else {
paren = stopparen;
start = scan + 2;
end = regnext(scan);
}
if (start) {
scan_frame *newframe;
assert(end);
if (!RExC_frame_last) {
Newxz(newframe, 1, scan_frame);
SAVEDESTRUCTOR_X(S_unwind_scan_frames, newframe);
RExC_frame_head= newframe;
RExC_frame_count++;
} else if (!RExC_frame_last->next_frame) {
Newxz(newframe, 1, scan_frame);
RExC_frame_last->next_frame= newframe;
newframe->prev_frame= RExC_frame_last;
RExC_frame_count++;
} else {
newframe= RExC_frame_last->next_frame;
}
RExC_frame_last= newframe;
newframe->next_regnode = regnext(scan);
newframe->last_regnode = last;
newframe->stopparen = stopparen;
newframe->prev_recursed_depth = recursed_depth;
newframe->this_prev_frame= frame;
newframe->in_gosub = (
(frame && frame->in_gosub) || OP(scan) == GOSUB
);
DEBUG_STUDYDATA("frame-new", data, depth, is_inf);
DEBUG_PEEP("fnew", scan, depth, flags);
frame = newframe;
scan = start;
stopparen = paren;
last = end;
depth = depth + 1;
recursed_depth= my_recursed_depth;
continue;
}
}
else if ( OP(scan) == EXACT
|| OP(scan) == EXACT_ONLY8
|| OP(scan) == EXACTL)
{
SSize_t l = STR_LEN(scan);
UV uc;
assert(l);
if (UTF) {
const U8 * const s = (U8*)STRING(scan);
uc = utf8_to_uvchr_buf(s, s + l, NULL);
l = utf8_length(s, s + l);
} else {
uc = *((U8*)STRING(scan));
}
min += l;
if (flags & SCF_DO_SUBSTR) { /* Update longest substr. */
/* The code below prefers earlier match for fixed
offset, later match for variable offset. */
if (data->last_end == -1) { /* Update the start info. */
data->last_start_min = data->pos_min;
data->last_start_max = is_inf
? SSize_t_MAX : data->pos_min + data->pos_delta;
}
sv_catpvn(data->last_found, STRING(scan), STR_LEN(scan));
if (UTF)
SvUTF8_on(data->last_found);
{
SV * const sv = data->last_found;
MAGIC * const mg = SvUTF8(sv) && SvMAGICAL(sv) ?
mg_find(sv, PERL_MAGIC_utf8) : NULL;
if (mg && mg->mg_len >= 0)
mg->mg_len += utf8_length((U8*)STRING(scan),
(U8*)STRING(scan)+STR_LEN(scan));
}
data->last_end = data->pos_min + l;
data->pos_min += l; /* As in the first entry. */
data->flags &= ~SF_BEFORE_EOL;
}
/* ANDing the code point leaves at most it, and not in locale, and
* can't match null string */
if (flags & SCF_DO_STCLASS_AND) {
ssc_cp_and(data->start_class, uc);
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
ssc_clear_locale(data->start_class);
}
else if (flags & SCF_DO_STCLASS_OR) {
ssc_add_cp(data->start_class, uc);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
/* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
}
flags &= ~SCF_DO_STCLASS;
}
else if (PL_regkind[OP(scan)] == EXACT) {
/* But OP != EXACT!, so is EXACTFish */
SSize_t l = STR_LEN(scan);
const U8 * s = (U8*)STRING(scan);
/* Search for fixed substrings supports EXACT only. */
if (flags & SCF_DO_SUBSTR) {
assert(data);
scan_commit(pRExC_state, data, minlenp, is_inf);
}
if (UTF) {
l = utf8_length(s, s + l);
}
if (unfolded_multi_char) {
RExC_seen |= REG_UNFOLDED_MULTI_SEEN;
}
min += l - min_subtract;
assert (min >= 0);
delta += min_subtract;
if (flags & SCF_DO_SUBSTR) {
data->pos_min += l - min_subtract;
if (data->pos_min < 0) {
data->pos_min = 0;
}
data->pos_delta += min_subtract;
if (min_subtract) {
data->cur_is_floating = 1; /* float */
}
}
if (flags & SCF_DO_STCLASS) {
SV* EXACTF_invlist = _make_exactf_invlist(pRExC_state, scan);
assert(EXACTF_invlist);
if (flags & SCF_DO_STCLASS_AND) {
if (OP(scan) != EXACTFL)
ssc_clear_locale(data->start_class);
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
ANYOF_POSIXL_ZERO(data->start_class);
ssc_intersection(data->start_class, EXACTF_invlist, FALSE);
}
else { /* SCF_DO_STCLASS_OR */
ssc_union(data->start_class, EXACTF_invlist, FALSE);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
/* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
}
flags &= ~SCF_DO_STCLASS;
SvREFCNT_dec(EXACTF_invlist);
}
}
else if (REGNODE_VARIES(OP(scan))) {
SSize_t mincount, maxcount, minnext, deltanext, pos_before = 0;
I32 fl = 0, f = flags;
regnode * const oscan = scan;
regnode_ssc this_class;
regnode_ssc *oclass = NULL;
I32 next_is_eval = 0;
switch (PL_regkind[OP(scan)]) {
case WHILEM: /* End of (?:...)* . */
scan = NEXTOPER(scan);
goto finish;
case PLUS:
if (flags & (SCF_DO_SUBSTR | SCF_DO_STCLASS)) {
next = NEXTOPER(scan);
if ( OP(next) == EXACT
|| OP(next) == EXACT_ONLY8
|| OP(next) == EXACTL
|| (flags & SCF_DO_STCLASS))
{
mincount = 1;
maxcount = REG_INFTY;
next = regnext(scan);
scan = NEXTOPER(scan);
goto do_curly;
}
}
if (flags & SCF_DO_SUBSTR)
data->pos_min++;
min++;
/* FALLTHROUGH */
case STAR:
next = NEXTOPER(scan);
/* This temporary node can now be turned into EXACTFU, and
* must, as regexec.c doesn't handle it */
if (OP(next) == EXACTFU_S_EDGE && mutate_ok) {
OP(next) = EXACTFU;
}
if ( STR_LEN(next) == 1
&& isALPHA_A(* STRING(next))
&& ( OP(next) == EXACTFAA
|| ( OP(next) == EXACTFU
&& ! HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(* STRING(next))))
&& mutate_ok
) {
/* These differ in just one bit */
U8 mask = ~ ('A' ^ 'a');
assert(isALPHA_A(* STRING(next)));
/* Then replace it by an ANYOFM node, with
* the mask set to the complement of the
* bit that differs between upper and lower
* case, and the lowest code point of the
* pair (which the '&' forces) */
OP(next) = ANYOFM;
ARG_SET(next, *STRING(next) & mask);
FLAGS(next) = mask;
}
if (flags & SCF_DO_STCLASS) {
mincount = 0;
maxcount = REG_INFTY;
next = regnext(scan);
scan = NEXTOPER(scan);
goto do_curly;
}
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
/* Cannot extend fixed substrings */
data->cur_is_floating = 1; /* float */
}
is_inf = is_inf_internal = 1;
scan = regnext(scan);
goto optimize_curly_tail;
case CURLY:
if (stopparen>0 && (OP(scan)==CURLYN || OP(scan)==CURLYM)
&& (scan->flags == stopparen))
{
mincount = 1;
maxcount = 1;
} else {
mincount = ARG1(scan);
maxcount = ARG2(scan);
}
next = regnext(scan);
if (OP(scan) == CURLYX) {
I32 lp = (data ? *(data->last_closep) : 0);
scan->flags = ((lp <= (I32)U8_MAX) ? (U8)lp : U8_MAX);
}
scan = NEXTOPER(scan) + EXTRA_STEP_2ARGS;
next_is_eval = (OP(scan) == EVAL);
do_curly:
if (flags & SCF_DO_SUBSTR) {
if (mincount == 0)
scan_commit(pRExC_state, data, minlenp, is_inf);
/* Cannot extend fixed substrings */
pos_before = data->pos_min;
}
if (data) {
fl = data->flags;
data->flags &= ~(SF_HAS_PAR|SF_IN_PAR|SF_HAS_EVAL);
if (is_inf)
data->flags |= SF_IS_INF;
}
if (flags & SCF_DO_STCLASS) {
ssc_init(pRExC_state, &this_class);
oclass = data->start_class;
data->start_class = &this_class;
f |= SCF_DO_STCLASS_AND;
f &= ~SCF_DO_STCLASS_OR;
}
/* Exclude from super-linear cache processing any {n,m}
regops for which the combination of input pos and regex
pos is not enough information to determine if a match
will be possible.
For example, in the regex /foo(bar\s*){4,8}baz/ with the
regex pos at the \s*, the prospects for a match depend not
only on the input position but also on how many (bar\s*)
repeats into the {4,8} we are. */
if ((mincount > 1) || (maxcount > 1 && maxcount != REG_INFTY))
f &= ~SCF_WHILEM_VISITED_POS;
/* This will finish on WHILEM, setting scan, or on NULL: */
/* recurse study_chunk() on loop bodies */
minnext = study_chunk(pRExC_state, &scan, minlenp, &deltanext,
last, data, stopparen, recursed_depth, NULL,
(mincount == 0
? (f & ~SCF_DO_SUBSTR)
: f)
, depth+1, mutate_ok);
if (flags & SCF_DO_STCLASS)
data->start_class = oclass;
if (mincount == 0 || minnext == 0) {
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &this_class);
}
else if (flags & SCF_DO_STCLASS_AND) {
/* Switch to OR mode: cache the old value of
* data->start_class */
INIT_AND_WITHP;
StructCopy(data->start_class, and_withp, regnode_ssc);
flags &= ~SCF_DO_STCLASS_AND;
StructCopy(&this_class, data->start_class, regnode_ssc);
flags |= SCF_DO_STCLASS_OR;
ANYOF_FLAGS(data->start_class)
|= SSC_MATCHES_EMPTY_STRING;
}
} else { /* Non-zero len */
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &this_class);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
}
else if (flags & SCF_DO_STCLASS_AND)
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &this_class);
flags &= ~SCF_DO_STCLASS;
}
if (!scan) /* It was not CURLYX, but CURLY. */
scan = next;
if (((flags & (SCF_TRIE_DOING_RESTUDY|SCF_DO_SUBSTR))==SCF_DO_SUBSTR)
/* ? quantifier ok, except for (?{ ... }) */
&& (next_is_eval || !(mincount == 0 && maxcount == 1))
&& (minnext == 0) && (deltanext == 0)
&& data && !(data->flags & (SF_HAS_PAR|SF_IN_PAR))
&& maxcount <= REG_INFTY/3) /* Complement check for big
count */
{
_WARN_HELPER(RExC_precomp_end, packWARN(WARN_REGEXP),
Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP),
"Quantifier unexpected on zero-length expression "
"in regex m/%" UTF8f "/",
UTF8fARG(UTF, RExC_precomp_end - RExC_precomp,
RExC_precomp)));
}
if ( ( minnext > 0 && mincount >= SSize_t_MAX / minnext )
|| min >= SSize_t_MAX - minnext * mincount )
{
FAIL("Regexp out of space");
}
min += minnext * mincount;
is_inf_internal |= deltanext == SSize_t_MAX
|| (maxcount == REG_INFTY && minnext + deltanext > 0);
is_inf |= is_inf_internal;
if (is_inf) {
delta = SSize_t_MAX;
} else {
delta += (minnext + deltanext) * maxcount
- minnext * mincount;
}
/* Try powerful optimization CURLYX => CURLYN. */
if ( OP(oscan) == CURLYX && data
&& data->flags & SF_IN_PAR
&& !(data->flags & SF_HAS_EVAL)
&& !deltanext && minnext == 1
&& mutate_ok
) {
/* Try to optimize to CURLYN. */
regnode *nxt = NEXTOPER(oscan) + EXTRA_STEP_2ARGS;
regnode * const nxt1 = nxt;
#ifdef DEBUGGING
regnode *nxt2;
#endif
/* Skip open. */
nxt = regnext(nxt);
if (!REGNODE_SIMPLE(OP(nxt))
&& !(PL_regkind[OP(nxt)] == EXACT
&& STR_LEN(nxt) == 1))
goto nogo;
#ifdef DEBUGGING
nxt2 = nxt;
#endif
nxt = regnext(nxt);
if (OP(nxt) != CLOSE)
goto nogo;
if (RExC_open_parens) {
/*open->CURLYM*/
RExC_open_parens[ARG(nxt1)] = REGNODE_OFFSET(oscan);
/*close->while*/
RExC_close_parens[ARG(nxt1)] = REGNODE_OFFSET(nxt) + 2;
}
/* Now we know that nxt2 is the only contents: */
oscan->flags = (U8)ARG(nxt);
OP(oscan) = CURLYN;
OP(nxt1) = NOTHING; /* was OPEN. */
#ifdef DEBUGGING
OP(nxt1 + 1) = OPTIMIZED; /* was count. */
NEXT_OFF(nxt1+ 1) = 0; /* just for consistency. */
NEXT_OFF(nxt2) = 0; /* just for consistency with CURLY. */
OP(nxt) = OPTIMIZED; /* was CLOSE. */
OP(nxt + 1) = OPTIMIZED; /* was count. */
NEXT_OFF(nxt+ 1) = 0; /* just for consistency. */
#endif
}
nogo:
/* Try optimization CURLYX => CURLYM. */
if ( OP(oscan) == CURLYX && data
&& !(data->flags & SF_HAS_PAR)
&& !(data->flags & SF_HAS_EVAL)
&& !deltanext /* atom is fixed width */
&& minnext != 0 /* CURLYM can't handle zero width */
/* Nor characters whose fold at run-time may be
* multi-character */
&& ! (RExC_seen & REG_UNFOLDED_MULTI_SEEN)
&& mutate_ok
) {
/* XXXX How to optimize if data == 0? */
/* Optimize to a simpler form. */
regnode *nxt = NEXTOPER(oscan) + EXTRA_STEP_2ARGS; /* OPEN */
regnode *nxt2;
OP(oscan) = CURLYM;
while ( (nxt2 = regnext(nxt)) /* skip over embedded stuff*/
&& (OP(nxt2) != WHILEM))
nxt = nxt2;
OP(nxt2) = SUCCEED; /* Whas WHILEM */
/* Need to optimize away parenths. */
if ((data->flags & SF_IN_PAR) && OP(nxt) == CLOSE) {
/* Set the parenth number. */
regnode *nxt1 = NEXTOPER(oscan) + EXTRA_STEP_2ARGS; /* OPEN*/
oscan->flags = (U8)ARG(nxt);
if (RExC_open_parens) {
/*open->CURLYM*/
RExC_open_parens[ARG(nxt1)] = REGNODE_OFFSET(oscan);
/*close->NOTHING*/
RExC_close_parens[ARG(nxt1)] = REGNODE_OFFSET(nxt2)
+ 1;
}
OP(nxt1) = OPTIMIZED; /* was OPEN. */
OP(nxt) = OPTIMIZED; /* was CLOSE. */
#ifdef DEBUGGING
OP(nxt1 + 1) = OPTIMIZED; /* was count. */
OP(nxt + 1) = OPTIMIZED; /* was count. */
NEXT_OFF(nxt1 + 1) = 0; /* just for consistency. */
NEXT_OFF(nxt + 1) = 0; /* just for consistency. */
#endif
#if 0
while ( nxt1 && (OP(nxt1) != WHILEM)) {
regnode *nnxt = regnext(nxt1);
if (nnxt == nxt) {
if (reg_off_by_arg[OP(nxt1)])
ARG_SET(nxt1, nxt2 - nxt1);
else if (nxt2 - nxt1 < U16_MAX)
NEXT_OFF(nxt1) = nxt2 - nxt1;
else
OP(nxt) = NOTHING; /* Cannot beautify */
}
nxt1 = nnxt;
}
#endif
/* Optimize again: */
/* recurse study_chunk() on optimised CURLYX => CURLYM */
study_chunk(pRExC_state, &nxt1, minlenp, &deltanext, nxt,
NULL, stopparen, recursed_depth, NULL, 0,
depth+1, mutate_ok);
}
else
oscan->flags = 0;
}
else if ((OP(oscan) == CURLYX)
&& (flags & SCF_WHILEM_VISITED_POS)
/* See the comment on a similar expression above.
However, this time it's not a subexpression
we care about, but the expression itself. */
&& (maxcount == REG_INFTY)
&& data) {
/* This stays as CURLYX, we can put the count/of pair. */
/* Find WHILEM (as in regexec.c) */
regnode *nxt = oscan + NEXT_OFF(oscan);
if (OP(PREVOPER(nxt)) == NOTHING) /* LONGJMP */
nxt += ARG(nxt);
nxt = PREVOPER(nxt);
if (nxt->flags & 0xf) {
/* we've already set whilem count on this node */
} else if (++data->whilem_c < 16) {
assert(data->whilem_c <= RExC_whilem_seen);
nxt->flags = (U8)(data->whilem_c
| (RExC_whilem_seen << 4)); /* On WHILEM */
}
}
if (data && fl & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (flags & SCF_DO_SUBSTR) {
SV *last_str = NULL;
STRLEN last_chrs = 0;
int counted = mincount != 0;
if (data->last_end > 0 && mincount != 0) { /* Ends with a
string. */
SSize_t b = pos_before >= data->last_start_min
? pos_before : data->last_start_min;
STRLEN l;
const char * const s = SvPV_const(data->last_found, l);
SSize_t old = b - data->last_start_min;
assert(old >= 0);
if (UTF)
old = utf8_hop_forward((U8*)s, old,
(U8 *) SvEND(data->last_found))
- (U8*)s;
l -= old;
/* Get the added string: */
last_str = newSVpvn_utf8(s + old, l, UTF);
last_chrs = UTF ? utf8_length((U8*)(s + old),
(U8*)(s + old + l)) : l;
if (deltanext == 0 && pos_before == b) {
/* What was added is a constant string */
if (mincount > 1) {
SvGROW(last_str, (mincount * l) + 1);
repeatcpy(SvPVX(last_str) + l,
SvPVX_const(last_str), l,
mincount - 1);
SvCUR_set(last_str, SvCUR(last_str) * mincount);
/* Add additional parts. */
SvCUR_set(data->last_found,
SvCUR(data->last_found) - l);
sv_catsv(data->last_found, last_str);
{
SV * sv = data->last_found;
MAGIC *mg =
SvUTF8(sv) && SvMAGICAL(sv) ?
mg_find(sv, PERL_MAGIC_utf8) : NULL;
if (mg && mg->mg_len >= 0)
mg->mg_len += last_chrs * (mincount-1);
}
last_chrs *= mincount;
data->last_end += l * (mincount - 1);
}
} else {
/* start offset must point into the last copy */
data->last_start_min += minnext * (mincount - 1);
data->last_start_max =
is_inf
? SSize_t_MAX
: data->last_start_max +
(maxcount - 1) * (minnext + data->pos_delta);
}
}
/* It is counted once already... */
data->pos_min += minnext * (mincount - counted);
#if 0
Perl_re_printf( aTHX_ "counted=%" UVuf " deltanext=%" UVuf
" SSize_t_MAX=%" UVuf " minnext=%" UVuf
" maxcount=%" UVuf " mincount=%" UVuf "\n",
(UV)counted, (UV)deltanext, (UV)SSize_t_MAX, (UV)minnext, (UV)maxcount,
(UV)mincount);
if (deltanext != SSize_t_MAX)
Perl_re_printf( aTHX_ "LHS=%" UVuf " RHS=%" UVuf "\n",
(UV)(-counted * deltanext + (minnext + deltanext) * maxcount
- minnext * mincount), (UV)(SSize_t_MAX - data->pos_delta));
#endif
if (deltanext == SSize_t_MAX
|| -counted * deltanext + (minnext + deltanext) * maxcount - minnext * mincount >= SSize_t_MAX - data->pos_delta)
data->pos_delta = SSize_t_MAX;
else
data->pos_delta += - counted * deltanext +
(minnext + deltanext) * maxcount - minnext * mincount;
if (mincount != maxcount) {
/* Cannot extend fixed substrings found inside
the group. */
scan_commit(pRExC_state, data, minlenp, is_inf);
if (mincount && last_str) {
SV * const sv = data->last_found;
MAGIC * const mg = SvUTF8(sv) && SvMAGICAL(sv) ?
mg_find(sv, PERL_MAGIC_utf8) : NULL;
if (mg)
mg->mg_len = -1;
sv_setsv(sv, last_str);
data->last_end = data->pos_min;
data->last_start_min = data->pos_min - last_chrs;
data->last_start_max = is_inf
? SSize_t_MAX
: data->pos_min + data->pos_delta - last_chrs;
}
data->cur_is_floating = 1; /* float */
}
SvREFCNT_dec(last_str);
}
if (data && (fl & SF_HAS_EVAL))
data->flags |= SF_HAS_EVAL;
optimize_curly_tail:
rck_elide_nothing(oscan);
continue;
default:
#ifdef DEBUGGING
Perl_croak(aTHX_ "panic: unexpected varying REx opcode %d",
OP(scan));
#endif
case REF:
case CLUMP:
if (flags & SCF_DO_SUBSTR) {
/* Cannot expect anything... */
scan_commit(pRExC_state, data, minlenp, is_inf);
data->cur_is_floating = 1; /* float */
}
is_inf = is_inf_internal = 1;
if (flags & SCF_DO_STCLASS_OR) {
if (OP(scan) == CLUMP) {
/* Actually is any start char, but very few code points
* aren't start characters */
ssc_match_all_cp(data->start_class);
}
else {
ssc_anything(data->start_class);
}
}
flags &= ~SCF_DO_STCLASS;
break;
}
}
else if (OP(scan) == LNBREAK) {
if (flags & SCF_DO_STCLASS) {
if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class,
PL_XPosix_ptrs[_CC_VERTSPACE], FALSE);
ssc_clear_locale(data->start_class);
ANYOF_FLAGS(data->start_class)
&= ~SSC_MATCHES_EMPTY_STRING;
}
else if (flags & SCF_DO_STCLASS_OR) {
ssc_union(data->start_class,
PL_XPosix_ptrs[_CC_VERTSPACE],
FALSE);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
/* See commit msg for
* 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class)
&= ~SSC_MATCHES_EMPTY_STRING;
}
flags &= ~SCF_DO_STCLASS;
}
min++;
if (delta != SSize_t_MAX)
delta++; /* Because of the 2 char string cr-lf */
if (flags & SCF_DO_SUBSTR) {
/* Cannot expect anything... */
scan_commit(pRExC_state, data, minlenp, is_inf);
data->pos_min += 1;
if (data->pos_delta != SSize_t_MAX) {
data->pos_delta += 1;
}
data->cur_is_floating = 1; /* float */
}
}
else if (REGNODE_SIMPLE(OP(scan))) {
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
data->pos_min++;
}
min++;
if (flags & SCF_DO_STCLASS) {
bool invert = 0;
SV* my_invlist = NULL;
U8 namedclass;
/* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
/* Some of the logic below assumes that switching
locale on will only add false positives. */
switch (OP(scan)) {
default:
#ifdef DEBUGGING
Perl_croak(aTHX_ "panic: unexpected simple REx opcode %d",
OP(scan));
#endif
case SANY:
if (flags & SCF_DO_STCLASS_OR) /* Allow everything */
ssc_match_all_cp(data->start_class);
break;
case REG_ANY:
{
SV* REG_ANY_invlist = _new_invlist(2);
REG_ANY_invlist = add_cp_to_invlist(REG_ANY_invlist,
'\n');
if (flags & SCF_DO_STCLASS_OR) {
ssc_union(data->start_class,
REG_ANY_invlist,
TRUE /* TRUE => invert, hence all but \n
*/
);
}
else if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class,
REG_ANY_invlist,
TRUE /* TRUE => invert */
);
ssc_clear_locale(data->start_class);
}
SvREFCNT_dec_NN(REG_ANY_invlist);
}
break;
case ANYOFD:
case ANYOFL:
case ANYOFPOSIXL:
case ANYOFH:
case ANYOF:
if (flags & SCF_DO_STCLASS_AND)
ssc_and(pRExC_state, data->start_class,
(regnode_charclass *) scan);
else
ssc_or(pRExC_state, data->start_class,
(regnode_charclass *) scan);
break;
case NANYOFM:
case ANYOFM:
{
SV* cp_list = get_ANYOFM_contents(scan);
if (flags & SCF_DO_STCLASS_OR) {
ssc_union(data->start_class, cp_list, invert);
}
else if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class, cp_list, invert);
}
SvREFCNT_dec_NN(cp_list);
break;
}
case NPOSIXL:
invert = 1;
/* FALLTHROUGH */
case POSIXL:
namedclass = classnum_to_namedclass(FLAGS(scan)) + invert;
if (flags & SCF_DO_STCLASS_AND) {
bool was_there = cBOOL(
ANYOF_POSIXL_TEST(data->start_class,
namedclass));
ANYOF_POSIXL_ZERO(data->start_class);
if (was_there) { /* Do an AND */
ANYOF_POSIXL_SET(data->start_class, namedclass);
}
/* No individual code points can now match */
data->start_class->invlist
= sv_2mortal(_new_invlist(0));
}
else {
int complement = namedclass + ((invert) ? -1 : 1);
assert(flags & SCF_DO_STCLASS_OR);
/* If the complement of this class was already there,
* the result is that they match all code points,
* (\d + \D == everything). Remove the classes from
* future consideration. Locale is not relevant in
* this case */
if (ANYOF_POSIXL_TEST(data->start_class, complement)) {
ssc_match_all_cp(data->start_class);
ANYOF_POSIXL_CLEAR(data->start_class, namedclass);
ANYOF_POSIXL_CLEAR(data->start_class, complement);
}
else { /* The usual case; just add this class to the
existing set */
ANYOF_POSIXL_SET(data->start_class, namedclass);
}
}
break;
case NPOSIXA: /* For these, we always know the exact set of
what's matched */
invert = 1;
/* FALLTHROUGH */
case POSIXA:
my_invlist = invlist_clone(PL_Posix_ptrs[FLAGS(scan)], NULL);
goto join_posix_and_ascii;
case NPOSIXD:
case NPOSIXU:
invert = 1;
/* FALLTHROUGH */
case POSIXD:
case POSIXU:
my_invlist = invlist_clone(PL_XPosix_ptrs[FLAGS(scan)], NULL);
/* NPOSIXD matches all upper Latin1 code points unless the
* target string being matched is UTF-8, which is
* unknowable until match time. Since we are going to
* invert, we want to get rid of all of them so that the
* inversion will match all */
if (OP(scan) == NPOSIXD) {
_invlist_subtract(my_invlist, PL_UpperLatin1,
&my_invlist);
}
join_posix_and_ascii:
if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class, my_invlist, invert);
ssc_clear_locale(data->start_class);
}
else {
assert(flags & SCF_DO_STCLASS_OR);
ssc_union(data->start_class, my_invlist, invert);
}
SvREFCNT_dec(my_invlist);
}
if (flags & SCF_DO_STCLASS_OR)
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
flags &= ~SCF_DO_STCLASS;
}
}
else if (PL_regkind[OP(scan)] == EOL && flags & SCF_DO_SUBSTR) {
data->flags |= (OP(scan) == MEOL
? SF_BEFORE_MEOL
: SF_BEFORE_SEOL);
scan_commit(pRExC_state, data, minlenp, is_inf);
}
else if ( PL_regkind[OP(scan)] == BRANCHJ
/* Lookbehind, or need to calculate parens/evals/stclass: */
&& (scan->flags || data || (flags & SCF_DO_STCLASS))
&& (OP(scan) == IFMATCH || OP(scan) == UNLESSM))
{
if ( !PERL_ENABLE_POSITIVE_ASSERTION_STUDY
|| OP(scan) == UNLESSM )
{
/* Negative Lookahead/lookbehind
In this case we can't do fixed string optimisation.
*/
SSize_t deltanext, minnext, fake = 0;
regnode *nscan;
regnode_ssc intrnl;
int f = 0;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
if (data) {
data_fake.whilem_c = data->whilem_c;
data_fake.last_closep = data->last_closep;
}
else
data_fake.last_closep = &fake;
data_fake.pos_delta = delta;
if ( flags & SCF_DO_STCLASS && !scan->flags
&& OP(scan) == IFMATCH ) { /* Lookahead */
ssc_init(pRExC_state, &intrnl);
data_fake.start_class = &intrnl;
f |= SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
next = regnext(scan);
nscan = NEXTOPER(NEXTOPER(scan));
/* recurse study_chunk() for lookahead body */
minnext = study_chunk(pRExC_state, &nscan, minlenp, &deltanext,
last, &data_fake, stopparen,
recursed_depth, NULL, f, depth+1,
mutate_ok);
if (scan->flags) {
if ( deltanext < 0
|| deltanext > (I32) U8_MAX
|| minnext > (I32)U8_MAX
|| minnext + deltanext > (I32)U8_MAX)
{
FAIL2("Lookbehind longer than %" UVuf " not implemented",
(UV)U8_MAX);
}
/* The 'next_off' field has been repurposed to count the
* additional starting positions to try beyond the initial
* one. (This leaves it at 0 for non-variable length
* matches to avoid breakage for those not using this
* extension) */
if (deltanext) {
scan->next_off = deltanext;
ckWARNexperimental(RExC_parse,
WARN_EXPERIMENTAL__VLB,
"Variable length lookbehind is experimental");
}
scan->flags = (U8)minnext + deltanext;
}
if (data) {
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
}
if (f & SCF_DO_STCLASS_AND) {
if (flags & SCF_DO_STCLASS_OR) {
/* OR before, AND after: ideally we would recurse with
* data_fake to get the AND applied by study of the
* remainder of the pattern, and then derecurse;
* *** HACK *** for now just treat as "no information".
* See [perl #56690].
*/
ssc_init(pRExC_state, data->start_class);
} else {
/* AND before and after: combine and continue. These
* assertions are zero-length, so can match an EMPTY
* string */
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &intrnl);
ANYOF_FLAGS(data->start_class)
|= SSC_MATCHES_EMPTY_STRING;
}
}
}
#if PERL_ENABLE_POSITIVE_ASSERTION_STUDY
else {
/* Positive Lookahead/lookbehind
In this case we can do fixed string optimisation,
but we must be careful about it. Note in the case of
lookbehind the positions will be offset by the minimum
length of the pattern, something we won't know about
until after the recurse.
*/
SSize_t deltanext, fake = 0;
regnode *nscan;
regnode_ssc intrnl;
int f = 0;
/* We use SAVEFREEPV so that when the full compile
is finished perl will clean up the allocated
minlens when it's all done. This way we don't
have to worry about freeing them when we know
they wont be used, which would be a pain.
*/
SSize_t *minnextp;
Newx( minnextp, 1, SSize_t );
SAVEFREEPV(minnextp);
if (data) {
StructCopy(data, &data_fake, scan_data_t);
if ((flags & SCF_DO_SUBSTR) && data->last_found) {
f |= SCF_DO_SUBSTR;
if (scan->flags)
scan_commit(pRExC_state, &data_fake, minlenp, is_inf);
data_fake.last_found=newSVsv(data->last_found);
}
}
else
data_fake.last_closep = &fake;
data_fake.flags = 0;
data_fake.substrs[0].flags = 0;
data_fake.substrs[1].flags = 0;
data_fake.pos_delta = delta;
if (is_inf)
data_fake.flags |= SF_IS_INF;
if ( flags & SCF_DO_STCLASS && !scan->flags
&& OP(scan) == IFMATCH ) { /* Lookahead */
ssc_init(pRExC_state, &intrnl);
data_fake.start_class = &intrnl;
f |= SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
next = regnext(scan);
nscan = NEXTOPER(NEXTOPER(scan));
/* positive lookahead study_chunk() recursion */
*minnextp = study_chunk(pRExC_state, &nscan, minnextp,
&deltanext, last, &data_fake,
stopparen, recursed_depth, NULL,
f, depth+1, mutate_ok);
if (scan->flags) {
assert(0); /* This code has never been tested since this
is normally not compiled */
if ( deltanext < 0
|| deltanext > (I32) U8_MAX
|| *minnextp > (I32)U8_MAX
|| *minnextp + deltanext > (I32)U8_MAX)
{
FAIL2("Lookbehind longer than %" UVuf " not implemented",
(UV)U8_MAX);
}
if (deltanext) {
scan->next_off = deltanext;
}
scan->flags = (U8)*minnextp + deltanext;
}
*minnextp += min;
if (f & SCF_DO_STCLASS_AND) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &intrnl);
ANYOF_FLAGS(data->start_class) |= SSC_MATCHES_EMPTY_STRING;
}
if (data) {
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
if ((flags & SCF_DO_SUBSTR) && data_fake.last_found) {
int i;
if (RExC_rx->minlen<*minnextp)
RExC_rx->minlen=*minnextp;
scan_commit(pRExC_state, &data_fake, minnextp, is_inf);
SvREFCNT_dec_NN(data_fake.last_found);
for (i = 0; i < 2; i++) {
if (data_fake.substrs[i].minlenp != minlenp) {
data->substrs[i].min_offset =
data_fake.substrs[i].min_offset;
data->substrs[i].max_offset =
data_fake.substrs[i].max_offset;
data->substrs[i].minlenp =
data_fake.substrs[i].minlenp;
data->substrs[i].lookbehind += scan->flags;
}
}
}
}
}
#endif
}
else if (OP(scan) == OPEN) {
if (stopparen != (I32)ARG(scan))
pars++;
}
else if (OP(scan) == CLOSE) {
if (stopparen == (I32)ARG(scan)) {
break;
}
if ((I32)ARG(scan) == is_par) {
next = regnext(scan);
if ( next && (OP(next) != WHILEM) && next < last)
is_par = 0; /* Disable optimization */
}
if (data)
*(data->last_closep) = ARG(scan);
}
else if (OP(scan) == EVAL) {
if (data)
data->flags |= SF_HAS_EVAL;
}
else if ( PL_regkind[OP(scan)] == ENDLIKE ) {
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
flags &= ~SCF_DO_SUBSTR;
}
if (data && OP(scan)==ACCEPT) {
data->flags |= SCF_SEEN_ACCEPT;
if (stopmin > min)
stopmin = min;
}
}
else if (OP(scan) == LOGICAL && scan->flags == 2) /* Embedded follows */
{
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
data->cur_is_floating = 1; /* float */
}
is_inf = is_inf_internal = 1;
if (flags & SCF_DO_STCLASS_OR) /* Allow everything */
ssc_anything(data->start_class);
flags &= ~SCF_DO_STCLASS;
}
else if (OP(scan) == GPOS) {
if (!(RExC_rx->intflags & PREGf_GPOS_FLOAT) &&
!(delta || is_inf || (data && data->pos_delta)))
{
if (!(RExC_rx->intflags & PREGf_ANCH) && (flags & SCF_DO_SUBSTR))
RExC_rx->intflags |= PREGf_ANCH_GPOS;
if (RExC_rx->gofs < (STRLEN)min)
RExC_rx->gofs = min;
} else {
RExC_rx->intflags |= PREGf_GPOS_FLOAT;
RExC_rx->gofs = 0;
}
}
#ifdef TRIE_STUDY_OPT
#ifdef FULL_TRIE_STUDY
else if (PL_regkind[OP(scan)] == TRIE) {
/* NOTE - There is similar code to this block above for handling
BRANCH nodes on the initial study. If you change stuff here
check there too. */
regnode *trie_node= scan;
regnode *tail= regnext(scan);
reg_trie_data *trie = (reg_trie_data*)RExC_rxi->data->data[ ARG(scan) ];
SSize_t max1 = 0, min1 = SSize_t_MAX;
regnode_ssc accum;
if (flags & SCF_DO_SUBSTR) { /* XXXX Add !SUSPEND? */
/* Cannot merge strings after this. */
scan_commit(pRExC_state, data, minlenp, is_inf);
}
if (flags & SCF_DO_STCLASS)
ssc_init_zero(pRExC_state, &accum);
if (!trie->jump) {
min1= trie->minlen;
max1= trie->maxlen;
} else {
const regnode *nextbranch= NULL;
U32 word;
for ( word=1 ; word <= trie->wordcount ; word++)
{
SSize_t deltanext=0, minnext=0, f = 0, fake;
regnode_ssc this_class;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
if (data) {
data_fake.whilem_c = data->whilem_c;
data_fake.last_closep = data->last_closep;
}
else
data_fake.last_closep = &fake;
data_fake.pos_delta = delta;
if (flags & SCF_DO_STCLASS) {
ssc_init(pRExC_state, &this_class);
data_fake.start_class = &this_class;
f = SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
if (trie->jump[word]) {
if (!nextbranch)
nextbranch = trie_node + trie->jump[0];
scan= trie_node + trie->jump[word];
/* We go from the jump point to the branch that follows
it. Note this means we need the vestigal unused
branches even though they arent otherwise used. */
/* optimise study_chunk() for TRIE */
minnext = study_chunk(pRExC_state, &scan, minlenp,
&deltanext, (regnode *)nextbranch, &data_fake,
stopparen, recursed_depth, NULL, f, depth+1,
mutate_ok);
}
if (nextbranch && PL_regkind[OP(nextbranch)]==BRANCH)
nextbranch= regnext((regnode*)nextbranch);
if (min1 > (SSize_t)(minnext + trie->minlen))
min1 = minnext + trie->minlen;
if (deltanext == SSize_t_MAX) {
is_inf = is_inf_internal = 1;
max1 = SSize_t_MAX;
} else if (max1 < (SSize_t)(minnext + deltanext + trie->maxlen))
max1 = minnext + deltanext + trie->maxlen;
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SCF_SEEN_ACCEPT) {
if ( stopmin > min + min1)
stopmin = min + min1;
flags &= ~SCF_DO_SUBSTR;
if (data)
data->flags |= SCF_SEEN_ACCEPT;
}
if (data) {
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
}
if (flags & SCF_DO_STCLASS)
ssc_or(pRExC_state, &accum, (regnode_charclass *) &this_class);
}
}
if (flags & SCF_DO_SUBSTR) {
data->pos_min += min1;
data->pos_delta += max1 - min1;
if (max1 != min1 || is_inf)
data->cur_is_floating = 1; /* float */
}
min += min1;
if (delta != SSize_t_MAX) {
if (SSize_t_MAX - (max1 - min1) >= delta)
delta += max1 - min1;
else
delta = SSize_t_MAX;
}
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &accum);
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
flags &= ~SCF_DO_STCLASS;
}
}
else if (flags & SCF_DO_STCLASS_AND) {
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &accum);
flags &= ~SCF_DO_STCLASS;
}
else {
/* Switch to OR mode: cache the old value of
* data->start_class */
INIT_AND_WITHP;
StructCopy(data->start_class, and_withp, regnode_ssc);
flags &= ~SCF_DO_STCLASS_AND;
StructCopy(&accum, data->start_class, regnode_ssc);
flags |= SCF_DO_STCLASS_OR;
}
}
scan= tail;
continue;
}
#else
else if (PL_regkind[OP(scan)] == TRIE) {
reg_trie_data *trie = (reg_trie_data*)RExC_rxi->data->data[ ARG(scan) ];
U8*bang=NULL;
min += trie->minlen;
delta += (trie->maxlen - trie->minlen);
flags &= ~SCF_DO_STCLASS; /* xxx */
if (flags & SCF_DO_SUBSTR) {
/* Cannot expect anything... */
scan_commit(pRExC_state, data, minlenp, is_inf);
data->pos_min += trie->minlen;
data->pos_delta += (trie->maxlen - trie->minlen);
if (trie->maxlen != trie->minlen)
data->cur_is_floating = 1; /* float */
}
if (trie->jump) /* no more substrings -- for now /grr*/
flags &= ~SCF_DO_SUBSTR;
}
#endif /* old or new */
#endif /* TRIE_STUDY_OPT */
/* Else: zero-length, ignore. */
scan = regnext(scan);
}
finish:
if (frame) {
/* we need to unwind recursion. */
depth = depth - 1;
DEBUG_STUDYDATA("frame-end", data, depth, is_inf);
DEBUG_PEEP("fend", scan, depth, flags);
/* restore previous context */
last = frame->last_regnode;
scan = frame->next_regnode;
stopparen = frame->stopparen;
recursed_depth = frame->prev_recursed_depth;
RExC_frame_last = frame->prev_frame;
frame = frame->this_prev_frame;
goto fake_study_recurse;
}
assert(!frame);
DEBUG_STUDYDATA("pre-fin", data, depth, is_inf);
*scanp = scan;
*deltap = is_inf_internal ? SSize_t_MAX : delta;
if (flags & SCF_DO_SUBSTR && is_inf)
data->pos_delta = SSize_t_MAX - data->pos_min;
if (is_par > (I32)U8_MAX)
is_par = 0;
if (is_par && pars==1 && data) {
data->flags |= SF_IN_PAR;
data->flags &= ~SF_HAS_PAR;
}
else if (pars && data) {
data->flags |= SF_HAS_PAR;
data->flags &= ~SF_IN_PAR;
}
if (flags & SCF_DO_STCLASS_OR)
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
if (flags & SCF_TRIE_RESTUDY)
data->flags |= SCF_TRIE_RESTUDY;
DEBUG_STUDYDATA("post-fin", data, depth, is_inf);
{
SSize_t final_minlen= min < stopmin ? min : stopmin;
if (!(RExC_seen & REG_UNBOUNDED_QUANTIFIER_SEEN)) {
if (final_minlen > SSize_t_MAX - delta)
RExC_maxlen = SSize_t_MAX;
else if (RExC_maxlen < final_minlen + delta)
RExC_maxlen = final_minlen + delta;
}
return final_minlen;
}
NOT_REACHED; /* NOTREACHED */
}
STATIC U32
S_add_data(RExC_state_t* const pRExC_state, const char* const s, const U32 n)
{
U32 count = RExC_rxi->data ? RExC_rxi->data->count : 0;
PERL_ARGS_ASSERT_ADD_DATA;
Renewc(RExC_rxi->data,
sizeof(*RExC_rxi->data) + sizeof(void*) * (count + n - 1),
char, struct reg_data);
if(count)
Renew(RExC_rxi->data->what, count + n, U8);
else
Newx(RExC_rxi->data->what, n, U8);
RExC_rxi->data->count = count + n;
Copy(s, RExC_rxi->data->what + count, n, U8);
return count;
}
/*XXX: todo make this not included in a non debugging perl, but appears to be
* used anyway there, in 'use re' */
#ifndef PERL_IN_XSUB_RE
void
Perl_reginitcolors(pTHX)
{
const char * const s = PerlEnv_getenv("PERL_RE_COLORS");
if (s) {
char *t = savepv(s);
int i = 0;
PL_colors[0] = t;
while (++i < 6) {
t = strchr(t, '\t');
if (t) {
*t = '\0';
PL_colors[i] = ++t;
}
else
PL_colors[i] = t = (char *)"";
}
} else {
int i = 0;
while (i < 6)
PL_colors[i++] = (char *)"";
}
PL_colorset = 1;
}
#endif
#ifdef TRIE_STUDY_OPT
#define CHECK_RESTUDY_GOTO_butfirst(dOsomething) \
STMT_START { \
if ( \
(data.flags & SCF_TRIE_RESTUDY) \
&& ! restudied++ \
) { \
dOsomething; \
goto reStudy; \
} \
} STMT_END
#else
#define CHECK_RESTUDY_GOTO_butfirst
#endif
/*
* pregcomp - compile a regular expression into internal code
*
* Decides which engine's compiler to call based on the hint currently in
* scope
*/
#ifndef PERL_IN_XSUB_RE
/* return the currently in-scope regex engine (or the default if none) */
regexp_engine const *
Perl_current_re_engine(pTHX)
{
if (IN_PERL_COMPILETIME) {
HV * const table = GvHV(PL_hintgv);
SV **ptr;
if (!table || !(PL_hints & HINT_LOCALIZE_HH))
return &PL_core_reg_engine;
ptr = hv_fetchs(table, "regcomp", FALSE);
if ( !(ptr && SvIOK(*ptr) && SvIV(*ptr)))
return &PL_core_reg_engine;
return INT2PTR(regexp_engine*, SvIV(*ptr));
}
else {
SV *ptr;
if (!PL_curcop->cop_hints_hash)
return &PL_core_reg_engine;
ptr = cop_hints_fetch_pvs(PL_curcop, "regcomp", 0);
if ( !(ptr && SvIOK(ptr) && SvIV(ptr)))
return &PL_core_reg_engine;
return INT2PTR(regexp_engine*, SvIV(ptr));
}
}
REGEXP *
Perl_pregcomp(pTHX_ SV * const pattern, const U32 flags)
{
regexp_engine const *eng = current_re_engine();
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_PREGCOMP;
/* Dispatch a request to compile a regexp to correct regexp engine. */
DEBUG_COMPILE_r({
Perl_re_printf( aTHX_ "Using engine %" UVxf "\n",
PTR2UV(eng));
});
return CALLREGCOMP_ENG(eng, pattern, flags);
}
#endif
/* public(ish) entry point for the perl core's own regex compiling code.
* It's actually a wrapper for Perl_re_op_compile that only takes an SV
* pattern rather than a list of OPs, and uses the internal engine rather
* than the current one */
REGEXP *
Perl_re_compile(pTHX_ SV * const pattern, U32 rx_flags)
{
SV *pat = pattern; /* defeat constness! */
PERL_ARGS_ASSERT_RE_COMPILE;
return Perl_re_op_compile(aTHX_ &pat, 1, NULL,
#ifdef PERL_IN_XSUB_RE
&my_reg_engine,
#else
&PL_core_reg_engine,
#endif
NULL, NULL, rx_flags, 0);
}
static void
S_free_codeblocks(pTHX_ struct reg_code_blocks *cbs)
{
int n;
if (--cbs->refcnt > 0)
return;
for (n = 0; n < cbs->count; n++) {
REGEXP *rx = cbs->cb[n].src_regex;
if (rx) {
cbs->cb[n].src_regex = NULL;
SvREFCNT_dec_NN(rx);
}
}
Safefree(cbs->cb);
Safefree(cbs);
}
static struct reg_code_blocks *
S_alloc_code_blocks(pTHX_ int ncode)
{
struct reg_code_blocks *cbs;
Newx(cbs, 1, struct reg_code_blocks);
cbs->count = ncode;
cbs->refcnt = 1;
SAVEDESTRUCTOR_X(S_free_codeblocks, cbs);
if (ncode)
Newx(cbs->cb, ncode, struct reg_code_block);
else
cbs->cb = NULL;
return cbs;
}
/* upgrade pattern pat_p of length plen_p to UTF8, and if there are code
* blocks, recalculate the indices. Update pat_p and plen_p in-place to
* point to the realloced string and length.
*
* This is essentially a copy of Perl_bytes_to_utf8() with the code index
* stuff added */
static void
S_pat_upgrade_to_utf8(pTHX_ RExC_state_t * const pRExC_state,
char **pat_p, STRLEN *plen_p, int num_code_blocks)
{
U8 *const src = (U8*)*pat_p;
U8 *dst, *d;
int n=0;
STRLEN s = 0;
bool do_end = 0;
GET_RE_DEBUG_FLAGS_DECL;
DEBUG_PARSE_r(Perl_re_printf( aTHX_
"UTF8 mismatch! Converting to utf8 for resizing and compile\n"));
/* 1 for each byte + 1 for each byte that expands to two, + trailing NUL */
Newx(dst, *plen_p + variant_under_utf8_count(src, src + *plen_p) + 1, U8);
d = dst;
while (s < *plen_p) {
append_utf8_from_native_byte(src[s], &d);
if (n < num_code_blocks) {
assert(pRExC_state->code_blocks);
if (!do_end && pRExC_state->code_blocks->cb[n].start == s) {
pRExC_state->code_blocks->cb[n].start = d - dst - 1;
assert(*(d - 1) == '(');
do_end = 1;
}
else if (do_end && pRExC_state->code_blocks->cb[n].end == s) {
pRExC_state->code_blocks->cb[n].end = d - dst - 1;
assert(*(d - 1) == ')');
do_end = 0;
n++;
}
}
s++;
}
*d = '\0';
*plen_p = d - dst;
*pat_p = (char*) dst;
SAVEFREEPV(*pat_p);
RExC_orig_utf8 = RExC_utf8 = 1;
}
/* S_concat_pat(): concatenate a list of args to the pattern string pat,
* while recording any code block indices, and handling overloading,
* nested qr// objects etc. If pat is null, it will allocate a new
* string, or just return the first arg, if there's only one.
*
* Returns the malloced/updated pat.
* patternp and pat_count is the array of SVs to be concatted;
* oplist is the optional list of ops that generated the SVs;
* recompile_p is a pointer to a boolean that will be set if
* the regex will need to be recompiled.
* delim, if non-null is an SV that will be inserted between each element
*/
static SV*
S_concat_pat(pTHX_ RExC_state_t * const pRExC_state,
SV *pat, SV ** const patternp, int pat_count,
OP *oplist, bool *recompile_p, SV *delim)
{
SV **svp;
int n = 0;
bool use_delim = FALSE;
bool alloced = FALSE;
/* if we know we have at least two args, create an empty string,
* then concatenate args to that. For no args, return an empty string */
if (!pat && pat_count != 1) {
pat = newSVpvs("");
SAVEFREESV(pat);
alloced = TRUE;
}
for (svp = patternp; svp < patternp + pat_count; svp++) {
SV *sv;
SV *rx = NULL;
STRLEN orig_patlen = 0;
bool code = 0;
SV *msv = use_delim ? delim : *svp;
if (!msv) msv = &PL_sv_undef;
/* if we've got a delimiter, we go round the loop twice for each
* svp slot (except the last), using the delimiter the second
* time round */
if (use_delim) {
svp--;
use_delim = FALSE;
}
else if (delim)
use_delim = TRUE;
if (SvTYPE(msv) == SVt_PVAV) {
/* we've encountered an interpolated array within
* the pattern, e.g. /...@a..../. Expand the list of elements,
* then recursively append elements.
* The code in this block is based on S_pushav() */
AV *const av = (AV*)msv;
const SSize_t maxarg = AvFILL(av) + 1;
SV **array;
if (oplist) {
assert(oplist->op_type == OP_PADAV
|| oplist->op_type == OP_RV2AV);
oplist = OpSIBLING(oplist);
}
if (SvRMAGICAL(av)) {
SSize_t i;
Newx(array, maxarg, SV*);
SAVEFREEPV(array);
for (i=0; i < maxarg; i++) {
SV ** const svp = av_fetch(av, i, FALSE);
array[i] = svp ? *svp : &PL_sv_undef;
}
}
else
array = AvARRAY(av);
pat = S_concat_pat(aTHX_ pRExC_state, pat,
array, maxarg, NULL, recompile_p,
/* $" */
GvSV((gv_fetchpvs("\"", GV_ADDMULTI, SVt_PV))));
continue;
}
/* we make the assumption here that each op in the list of
* op_siblings maps to one SV pushed onto the stack,
* except for code blocks, with have both an OP_NULL and
* and OP_CONST.
* This allows us to match up the list of SVs against the
* list of OPs to find the next code block.
*
* Note that PUSHMARK PADSV PADSV ..
* is optimised to
* PADRANGE PADSV PADSV ..
* so the alignment still works. */
if (oplist) {
if (oplist->op_type == OP_NULL
&& (oplist->op_flags & OPf_SPECIAL))
{
assert(n < pRExC_state->code_blocks->count);
pRExC_state->code_blocks->cb[n].start = pat ? SvCUR(pat) : 0;
pRExC_state->code_blocks->cb[n].block = oplist;
pRExC_state->code_blocks->cb[n].src_regex = NULL;
n++;
code = 1;
oplist = OpSIBLING(oplist); /* skip CONST */
assert(oplist);
}
oplist = OpSIBLING(oplist);;
}
/* apply magic and QR overloading to arg */
SvGETMAGIC(msv);
if (SvROK(msv) && SvAMAGIC(msv)) {
SV *sv = AMG_CALLunary(msv, regexp_amg);
if (sv) {
if (SvROK(sv))
sv = SvRV(sv);
if (SvTYPE(sv) != SVt_REGEXP)
Perl_croak(aTHX_ "Overloaded qr did not return a REGEXP");
msv = sv;
}
}
/* try concatenation overload ... */
if (pat && (SvAMAGIC(pat) || SvAMAGIC(msv)) &&
(sv = amagic_call(pat, msv, concat_amg, AMGf_assign)))
{
sv_setsv(pat, sv);
/* overloading involved: all bets are off over literal
* code. Pretend we haven't seen it */
if (n)
pRExC_state->code_blocks->count -= n;
n = 0;
}
else {
/* ... or failing that, try "" overload */
while (SvAMAGIC(msv)
&& (sv = AMG_CALLunary(msv, string_amg))
&& sv != msv
&& !( SvROK(msv)
&& SvROK(sv)
&& SvRV(msv) == SvRV(sv))
) {
msv = sv;
SvGETMAGIC(msv);
}
if (SvROK(msv) && SvTYPE(SvRV(msv)) == SVt_REGEXP)
msv = SvRV(msv);
if (pat) {
/* this is a partially unrolled
* sv_catsv_nomg(pat, msv);
* that allows us to adjust code block indices if
* needed */
STRLEN dlen;
char *dst = SvPV_force_nomg(pat, dlen);
orig_patlen = dlen;
if (SvUTF8(msv) && !SvUTF8(pat)) {
S_pat_upgrade_to_utf8(aTHX_ pRExC_state, &dst, &dlen, n);
sv_setpvn(pat, dst, dlen);
SvUTF8_on(pat);
}
sv_catsv_nomg(pat, msv);
rx = msv;
}
else {
/* We have only one SV to process, but we need to verify
* it is properly null terminated or we will fail asserts
* later. In theory we probably shouldn't get such SV's,
* but if we do we should handle it gracefully. */
if ( SvTYPE(msv) != SVt_PV || (SvLEN(msv) > SvCUR(msv) && *(SvEND(msv)) == 0) || SvIsCOW_shared_hash(msv) ) {
/* not a string, or a string with a trailing null */
pat = msv;
} else {
/* a string with no trailing null, we need to copy it
* so it has a trailing null */
pat = sv_2mortal(newSVsv(msv));
}
}
if (code)
pRExC_state->code_blocks->cb[n-1].end = SvCUR(pat)-1;
}
/* extract any code blocks within any embedded qr//'s */
if (rx && SvTYPE(rx) == SVt_REGEXP
&& RX_ENGINE((REGEXP*)rx)->op_comp)
{
RXi_GET_DECL(ReANY((REGEXP *)rx), ri);
if (ri->code_blocks && ri->code_blocks->count) {
int i;
/* the presence of an embedded qr// with code means
* we should always recompile: the text of the
* qr// may not have changed, but it may be a
* different closure than last time */
*recompile_p = 1;
if (pRExC_state->code_blocks) {
int new_count = pRExC_state->code_blocks->count
+ ri->code_blocks->count;
Renew(pRExC_state->code_blocks->cb,
new_count, struct reg_code_block);
pRExC_state->code_blocks->count = new_count;
}
else
pRExC_state->code_blocks = S_alloc_code_blocks(aTHX_
ri->code_blocks->count);
for (i=0; i < ri->code_blocks->count; i++) {
struct reg_code_block *src, *dst;
STRLEN offset = orig_patlen
+ ReANY((REGEXP *)rx)->pre_prefix;
assert(n < pRExC_state->code_blocks->count);
src = &ri->code_blocks->cb[i];
dst = &pRExC_state->code_blocks->cb[n];
dst->start = src->start + offset;
dst->end = src->end + offset;
dst->block = src->block;
dst->src_regex = (REGEXP*) SvREFCNT_inc( (SV*)
src->src_regex
? src->src_regex
: (REGEXP*)rx);
n++;
}
}
}
}
/* avoid calling magic multiple times on a single element e.g. =~ $qr */
if (alloced)
SvSETMAGIC(pat);
return pat;
}
/* see if there are any run-time code blocks in the pattern.
* False positives are allowed */
static bool
S_has_runtime_code(pTHX_ RExC_state_t * const pRExC_state,
char *pat, STRLEN plen)
{
int n = 0;
STRLEN s;
PERL_UNUSED_CONTEXT;
for (s = 0; s < plen; s++) {
if ( pRExC_state->code_blocks
&& n < pRExC_state->code_blocks->count
&& s == pRExC_state->code_blocks->cb[n].start)
{
s = pRExC_state->code_blocks->cb[n].end;
n++;
continue;
}
/* TODO ideally should handle [..], (#..), /#.../x to reduce false
* positives here */
if (pat[s] == '(' && s+2 <= plen && pat[s+1] == '?' &&
(pat[s+2] == '{'
|| (s + 2 <= plen && pat[s+2] == '?' && pat[s+3] == '{'))
)
return 1;
}
return 0;
}
/* Handle run-time code blocks. We will already have compiled any direct
* or indirect literal code blocks. Now, take the pattern 'pat' and make a
* copy of it, but with any literal code blocks blanked out and
* appropriate chars escaped; then feed it into
*
* eval "qr'modified_pattern'"
*
* For example,
*
* a\bc(?{"this was literal"})def'ghi\\jkl(?{"this is runtime"})mno
*
* becomes
*
* qr'a\\bc_______________________def\'ghi\\\\jkl(?{"this is runtime"})mno'
*
* After eval_sv()-ing that, grab any new code blocks from the returned qr
* and merge them with any code blocks of the original regexp.
*
* If the pat is non-UTF8, while the evalled qr is UTF8, don't merge;
* instead, just save the qr and return FALSE; this tells our caller that
* the original pattern needs upgrading to utf8.
*/
static bool
S_compile_runtime_code(pTHX_ RExC_state_t * const pRExC_state,
char *pat, STRLEN plen)
{
SV *qr;
GET_RE_DEBUG_FLAGS_DECL;
if (pRExC_state->runtime_code_qr) {
/* this is the second time we've been called; this should
* only happen if the main pattern got upgraded to utf8
* during compilation; re-use the qr we compiled first time
* round (which should be utf8 too)
*/
qr = pRExC_state->runtime_code_qr;
pRExC_state->runtime_code_qr = NULL;
assert(RExC_utf8 && SvUTF8(qr));
}
else {
int n = 0;
STRLEN s;
char *p, *newpat;
int newlen = plen + 7; /* allow for "qr''xx\0" extra chars */
SV *sv, *qr_ref;
dSP;
/* determine how many extra chars we need for ' and \ escaping */
for (s = 0; s < plen; s++) {
if (pat[s] == '\'' || pat[s] == '\\')
newlen++;
}
Newx(newpat, newlen, char);
p = newpat;
*p++ = 'q'; *p++ = 'r'; *p++ = '\'';
for (s = 0; s < plen; s++) {
if ( pRExC_state->code_blocks
&& n < pRExC_state->code_blocks->count
&& s == pRExC_state->code_blocks->cb[n].start)
{
/* blank out literal code block so that they aren't
* recompiled: eg change from/to:
* /(?{xyz})/
* /(?=====)/
* and
* /(??{xyz})/
* /(?======)/
* and
* /(?(?{xyz}))/
* /(?(?=====))/
*/
assert(pat[s] == '(');
assert(pat[s+1] == '?');
*p++ = '(';
*p++ = '?';
s += 2;
while (s < pRExC_state->code_blocks->cb[n].end) {
*p++ = '=';
s++;
}
*p++ = ')';
n++;
continue;
}
if (pat[s] == '\'' || pat[s] == '\\')
*p++ = '\\';
*p++ = pat[s];
}
*p++ = '\'';
if (pRExC_state->pm_flags & RXf_PMf_EXTENDED) {
*p++ = 'x';
if (pRExC_state->pm_flags & RXf_PMf_EXTENDED_MORE) {
*p++ = 'x';
}
}
*p++ = '\0';
DEBUG_COMPILE_r({
Perl_re_printf( aTHX_
"%sre-parsing pattern for runtime code:%s %s\n",
PL_colors[4], PL_colors[5], newpat);
});
sv = newSVpvn_flags(newpat, p-newpat-1, RExC_utf8 ? SVf_UTF8 : 0);
Safefree(newpat);
ENTER;
SAVETMPS;
save_re_context();
PUSHSTACKi(PERLSI_REQUIRE);
/* G_RE_REPARSING causes the toker to collapse \\ into \ when
* parsing qr''; normally only q'' does this. It also alters
* hints handling */
eval_sv(sv, G_SCALAR|G_RE_REPARSING);
SvREFCNT_dec_NN(sv);
SPAGAIN;
qr_ref = POPs;
PUTBACK;
{
SV * const errsv = ERRSV;
if (SvTRUE_NN(errsv))
/* use croak_sv ? */
Perl_croak_nocontext("%" SVf, SVfARG(errsv));
}
assert(SvROK(qr_ref));
qr = SvRV(qr_ref);
assert(SvTYPE(qr) == SVt_REGEXP && RX_ENGINE((REGEXP*)qr)->op_comp);
/* the leaving below frees the tmp qr_ref.
* Give qr a life of its own */
SvREFCNT_inc(qr);
POPSTACK;
FREETMPS;
LEAVE;
}
if (!RExC_utf8 && SvUTF8(qr)) {
/* first time through; the pattern got upgraded; save the
* qr for the next time through */
assert(!pRExC_state->runtime_code_qr);
pRExC_state->runtime_code_qr = qr;
return 0;
}
/* extract any code blocks within the returned qr// */
/* merge the main (r1) and run-time (r2) code blocks into one */
{
RXi_GET_DECL(ReANY((REGEXP *)qr), r2);
struct reg_code_block *new_block, *dst;
RExC_state_t * const r1 = pRExC_state; /* convenient alias */
int i1 = 0, i2 = 0;
int r1c, r2c;
if (!r2->code_blocks || !r2->code_blocks->count) /* we guessed wrong */
{
SvREFCNT_dec_NN(qr);
return 1;
}
if (!r1->code_blocks)
r1->code_blocks = S_alloc_code_blocks(aTHX_ 0);
r1c = r1->code_blocks->count;
r2c = r2->code_blocks->count;
Newx(new_block, r1c + r2c, struct reg_code_block);
dst = new_block;
while (i1 < r1c || i2 < r2c) {
struct reg_code_block *src;
bool is_qr = 0;
if (i1 == r1c) {
src = &r2->code_blocks->cb[i2++];
is_qr = 1;
}
else if (i2 == r2c)
src = &r1->code_blocks->cb[i1++];
else if ( r1->code_blocks->cb[i1].start
< r2->code_blocks->cb[i2].start)
{
src = &r1->code_blocks->cb[i1++];
assert(src->end < r2->code_blocks->cb[i2].start);
}
else {
assert( r1->code_blocks->cb[i1].start
> r2->code_blocks->cb[i2].start);
src = &r2->code_blocks->cb[i2++];
is_qr = 1;
assert(src->end < r1->code_blocks->cb[i1].start);
}
assert(pat[src->start] == '(');
assert(pat[src->end] == ')');
dst->start = src->start;
dst->end = src->end;
dst->block = src->block;
dst->src_regex = is_qr ? (REGEXP*) SvREFCNT_inc( (SV*) qr)
: src->src_regex;
dst++;
}
r1->code_blocks->count += r2c;
Safefree(r1->code_blocks->cb);
r1->code_blocks->cb = new_block;
}
SvREFCNT_dec_NN(qr);
return 1;
}
STATIC bool
S_setup_longest(pTHX_ RExC_state_t *pRExC_state,
struct reg_substr_datum *rsd,
struct scan_data_substrs *sub,
STRLEN longest_length)
{
/* This is the common code for setting up the floating and fixed length
* string data extracted from Perl_re_op_compile() below. Returns a boolean
* as to whether succeeded or not */
I32 t;
SSize_t ml;
bool eol = cBOOL(sub->flags & SF_BEFORE_EOL);
bool meol = cBOOL(sub->flags & SF_BEFORE_MEOL);
if (! (longest_length
|| (eol /* Can't have SEOL and MULTI */
&& (! meol || (RExC_flags & RXf_PMf_MULTILINE)))
)
/* See comments for join_exact for why REG_UNFOLDED_MULTI_SEEN */
|| (RExC_seen & REG_UNFOLDED_MULTI_SEEN))
{
return FALSE;
}
/* copy the information about the longest from the reg_scan_data
over to the program. */
if (SvUTF8(sub->str)) {
rsd->substr = NULL;
rsd->utf8_substr = sub->str;
} else {
rsd->substr = sub->str;
rsd->utf8_substr = NULL;
}
/* end_shift is how many chars that must be matched that
follow this item. We calculate it ahead of time as once the
lookbehind offset is added in we lose the ability to correctly
calculate it.*/
ml = sub->minlenp ? *(sub->minlenp) : (SSize_t)longest_length;
rsd->end_shift = ml - sub->min_offset
- longest_length
/* XXX SvTAIL is always false here - did you mean FBMcf_TAIL
* intead? - DAPM
+ (SvTAIL(sub->str) != 0)
*/
+ sub->lookbehind;
t = (eol/* Can't have SEOL and MULTI */
&& (! meol || (RExC_flags & RXf_PMf_MULTILINE)));
fbm_compile(sub->str, t ? FBMcf_TAIL : 0);
return TRUE;
}
STATIC void
S_set_regex_pv(pTHX_ RExC_state_t *pRExC_state, REGEXP *Rx)
{
/* Calculates and sets in the compiled pattern 'Rx' the string to compile,
* properly wrapped with the right modifiers */
bool has_p = ((RExC_rx->extflags & RXf_PMf_KEEPCOPY) == RXf_PMf_KEEPCOPY);
bool has_charset = RExC_utf8 || (get_regex_charset(RExC_rx->extflags)
!= REGEX_DEPENDS_CHARSET);
/* The caret is output if there are any defaults: if not all the STD
* flags are set, or if no character set specifier is needed */
bool has_default =
(((RExC_rx->extflags & RXf_PMf_STD_PMMOD) != RXf_PMf_STD_PMMOD)
|| ! has_charset);
bool has_runon = ((RExC_seen & REG_RUN_ON_COMMENT_SEEN)
== REG_RUN_ON_COMMENT_SEEN);
U8 reganch = (U8)((RExC_rx->extflags & RXf_PMf_STD_PMMOD)
>> RXf_PMf_STD_PMMOD_SHIFT);
const char *fptr = STD_PAT_MODS; /*"msixxn"*/
char *p;
STRLEN pat_len = RExC_precomp_end - RExC_precomp;
/* We output all the necessary flags; we never output a minus, as all
* those are defaults, so are
* covered by the caret */
const STRLEN wraplen = pat_len + has_p + has_runon
+ has_default /* If needs a caret */
+ PL_bitcount[reganch] /* 1 char for each set standard flag */
/* If needs a character set specifier */
+ ((has_charset) ? MAX_CHARSET_NAME_LENGTH : 0)
+ (sizeof("(?:)") - 1);
PERL_ARGS_ASSERT_SET_REGEX_PV;
/* make sure PL_bitcount bounds not exceeded */
assert(sizeof(STD_PAT_MODS) <= 8);
p = sv_grow(MUTABLE_SV(Rx), wraplen + 1); /* +1 for the ending NUL */
SvPOK_on(Rx);
if (RExC_utf8)
SvFLAGS(Rx) |= SVf_UTF8;
*p++='('; *p++='?';
/* If a default, cover it using the caret */
if (has_default) {
*p++= DEFAULT_PAT_MOD;
}
if (has_charset) {
STRLEN len;
const char* name;
name = get_regex_charset_name(RExC_rx->extflags, &len);
if strEQ(name, DEPENDS_PAT_MODS) { /* /d under UTF-8 => /u */
assert(RExC_utf8);
name = UNICODE_PAT_MODS;
len = sizeof(UNICODE_PAT_MODS) - 1;
}
Copy(name, p, len, char);
p += len;
}
if (has_p)
*p++ = KEEPCOPY_PAT_MOD; /*'p'*/
{
char ch;
while((ch = *fptr++)) {
if(reganch & 1)
*p++ = ch;
reganch >>= 1;
}
}
*p++ = ':';
Copy(RExC_precomp, p, pat_len, char);
assert ((RX_WRAPPED(Rx) - p) < 16);
RExC_rx->pre_prefix = p - RX_WRAPPED(Rx);
p += pat_len;
/* Adding a trailing \n causes this to compile properly:
my $R = qr / A B C # D E/x; /($R)/
Otherwise the parens are considered part of the comment */
if (has_runon)
*p++ = '\n';
*p++ = ')';
*p = 0;
SvCUR_set(Rx, p - RX_WRAPPED(Rx));
}
/*
* Perl_re_op_compile - the perl internal RE engine's function to compile a
* regular expression into internal code.
* The pattern may be passed either as:
* a list of SVs (patternp plus pat_count)
* a list of OPs (expr)
* If both are passed, the SV list is used, but the OP list indicates
* which SVs are actually pre-compiled code blocks
*
* The SVs in the list have magic and qr overloading applied to them (and
* the list may be modified in-place with replacement SVs in the latter
* case).
*
* If the pattern hasn't changed from old_re, then old_re will be
* returned.
*
* eng is the current engine. If that engine has an op_comp method, then
* handle directly (i.e. we assume that op_comp was us); otherwise, just
* do the initial concatenation of arguments and pass on to the external
* engine.
*
* If is_bare_re is not null, set it to a boolean indicating whether the
* arg list reduced (after overloading) to a single bare regex which has
* been returned (i.e. /$qr/).
*
* orig_rx_flags contains RXf_* flags. See perlreapi.pod for more details.
*
* pm_flags contains the PMf_* flags, typically based on those from the
* pm_flags field of the related PMOP. Currently we're only interested in
* PMf_HAS_CV, PMf_IS_QR, PMf_USE_RE_EVAL.
*
* For many years this code had an initial sizing pass that calculated
* (sometimes incorrectly, leading to security holes) the size needed for the
* compiled pattern. That was changed by commit
* 7c932d07cab18751bfc7515b4320436273a459e2 in 5.29, which reallocs the size, a
* node at a time, as parsing goes along. Patches welcome to fix any obsolete
* references to this sizing pass.
*
* Now, an initial crude guess as to the size needed is made, based on the
* length of the pattern. Patches welcome to improve that guess. That amount
* of space is malloc'd and then immediately freed, and then clawed back node
* by node. This design is to minimze, to the extent possible, memory churn
* when doing the the reallocs.
*
* A separate parentheses counting pass may be needed in some cases.
* (Previously the sizing pass did this.) Patches welcome to reduce the number
* of these cases.
*
* The existence of a sizing pass necessitated design decisions that are no
* longer needed. There are potential areas of simplification.
*
* Beware that the optimization-preparation code in here knows about some
* of the structure of the compiled regexp. [I'll say.]
*/
REGEXP *
Perl_re_op_compile(pTHX_ SV ** const patternp, int pat_count,
OP *expr, const regexp_engine* eng, REGEXP *old_re,
bool *is_bare_re, const U32 orig_rx_flags, const U32 pm_flags)
{
dVAR;
REGEXP *Rx; /* Capital 'R' means points to a REGEXP */
STRLEN plen;
char *exp;
regnode *scan;
I32 flags;
SSize_t minlen = 0;
U32 rx_flags;
SV *pat;
SV** new_patternp = patternp;
/* these are all flags - maybe they should be turned
* into a single int with different bit masks */
I32 sawlookahead = 0;
I32 sawplus = 0;
I32 sawopen = 0;
I32 sawminmod = 0;
regex_charset initial_charset = get_regex_charset(orig_rx_flags);
bool recompile = 0;
bool runtime_code = 0;
scan_data_t data;
RExC_state_t RExC_state;
RExC_state_t * const pRExC_state = &RExC_state;
#ifdef TRIE_STUDY_OPT
int restudied = 0;
RExC_state_t copyRExC_state;
#endif
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_RE_OP_COMPILE;
DEBUG_r(if (!PL_colorset) reginitcolors());
/* Initialize these here instead of as-needed, as is quick and avoids
* having to test them each time otherwise */
if (! PL_InBitmap) {
#ifdef DEBUGGING
char * dump_len_string;
#endif
/* This is calculated here, because the Perl program that generates the
* static global ones doesn't currently have access to
* NUM_ANYOF_CODE_POINTS */
PL_InBitmap = _new_invlist(2);
PL_InBitmap = _add_range_to_invlist(PL_InBitmap, 0,
NUM_ANYOF_CODE_POINTS - 1);
#ifdef DEBUGGING
dump_len_string = PerlEnv_getenv("PERL_DUMP_RE_MAX_LEN");
if ( ! dump_len_string
|| ! grok_atoUV(dump_len_string, (UV *)&PL_dump_re_max_len, NULL))
{
PL_dump_re_max_len = 60; /* A reasonable default */
}
#endif
}
pRExC_state->warn_text = NULL;
pRExC_state->unlexed_names = NULL;
pRExC_state->code_blocks = NULL;
if (is_bare_re)
*is_bare_re = FALSE;
if (expr && (expr->op_type == OP_LIST ||
(expr->op_type == OP_NULL && expr->op_targ == OP_LIST))) {
/* allocate code_blocks if needed */
OP *o;
int ncode = 0;
for (o = cLISTOPx(expr)->op_first; o; o = OpSIBLING(o))
if (o->op_type == OP_NULL && (o->op_flags & OPf_SPECIAL))
ncode++; /* count of DO blocks */
if (ncode)
pRExC_state->code_blocks = S_alloc_code_blocks(aTHX_ ncode);
}
if (!pat_count) {
/* compile-time pattern with just OP_CONSTs and DO blocks */
int n;
OP *o;
/* find how many CONSTs there are */
assert(expr);
n = 0;
if (expr->op_type == OP_CONST)
n = 1;
else
for (o = cLISTOPx(expr)->op_first; o; o = OpSIBLING(o)) {
if (o->op_type == OP_CONST)
n++;
}
/* fake up an SV array */
assert(!new_patternp);
Newx(new_patternp, n, SV*);
SAVEFREEPV(new_patternp);
pat_count = n;
n = 0;
if (expr->op_type == OP_CONST)
new_patternp[n] = cSVOPx_sv(expr);
else
for (o = cLISTOPx(expr)->op_first; o; o = OpSIBLING(o)) {
if (o->op_type == OP_CONST)
new_patternp[n++] = cSVOPo_sv;
}
}
DEBUG_PARSE_r(Perl_re_printf( aTHX_
"Assembling pattern from %d elements%s\n", pat_count,
orig_rx_flags & RXf_SPLIT ? " for split" : ""));
/* set expr to the first arg op */
if (pRExC_state->code_blocks && pRExC_state->code_blocks->count
&& expr->op_type != OP_CONST)
{
expr = cLISTOPx(expr)->op_first;
assert( expr->op_type == OP_PUSHMARK
|| (expr->op_type == OP_NULL && expr->op_targ == OP_PUSHMARK)
|| expr->op_type == OP_PADRANGE);
expr = OpSIBLING(expr);
}
pat = S_concat_pat(aTHX_ pRExC_state, NULL, new_patternp, pat_count,
expr, &recompile, NULL);
/* handle bare (possibly after overloading) regex: foo =~ $re */
{
SV *re = pat;
if (SvROK(re))
re = SvRV(re);
if (SvTYPE(re) == SVt_REGEXP) {
if (is_bare_re)
*is_bare_re = TRUE;
SvREFCNT_inc(re);
DEBUG_PARSE_r(Perl_re_printf( aTHX_
"Precompiled pattern%s\n",
orig_rx_flags & RXf_SPLIT ? " for split" : ""));
return (REGEXP*)re;
}
}
exp = SvPV_nomg(pat, plen);
if (!eng->op_comp) {
if ((SvUTF8(pat) && IN_BYTES)
|| SvGMAGICAL(pat) || SvAMAGIC(pat))
{
/* make a temporary copy; either to convert to bytes,
* or to avoid repeating get-magic / overloaded stringify */
pat = newSVpvn_flags(exp, plen, SVs_TEMP |
(IN_BYTES ? 0 : SvUTF8(pat)));
}
return CALLREGCOMP_ENG(eng, pat, orig_rx_flags);
}
/* ignore the utf8ness if the pattern is 0 length */
RExC_utf8 = RExC_orig_utf8 = (plen == 0 || IN_BYTES) ? 0 : SvUTF8(pat);
RExC_uni_semantics = 0;
RExC_contains_locale = 0;
RExC_strict = cBOOL(pm_flags & RXf_PMf_STRICT);
RExC_in_script_run = 0;
RExC_study_started = 0;
pRExC_state->runtime_code_qr = NULL;
RExC_frame_head= NULL;
RExC_frame_last= NULL;
RExC_frame_count= 0;
RExC_latest_warn_offset = 0;
RExC_use_BRANCHJ = 0;
RExC_total_parens = 0;
RExC_open_parens = NULL;
RExC_close_parens = NULL;
RExC_paren_names = NULL;
RExC_size = 0;
RExC_seen_d_op = FALSE;
#ifdef DEBUGGING
RExC_paren_name_list = NULL;
#endif
DEBUG_r({
RExC_mysv1= sv_newmortal();
RExC_mysv2= sv_newmortal();
});
DEBUG_COMPILE_r({
SV *dsv= sv_newmortal();
RE_PV_QUOTED_DECL(s, RExC_utf8, dsv, exp, plen, PL_dump_re_max_len);
Perl_re_printf( aTHX_ "%sCompiling REx%s %s\n",
PL_colors[4], PL_colors[5], s);
});
/* we jump here if we have to recompile, e.g., from upgrading the pattern
* to utf8 */
if ((pm_flags & PMf_USE_RE_EVAL)
/* this second condition covers the non-regex literal case,
* i.e. $foo =~ '(?{})'. */
|| (IN_PERL_COMPILETIME && (PL_hints & HINT_RE_EVAL))
)
runtime_code = S_has_runtime_code(aTHX_ pRExC_state, exp, plen);
redo_parse:
/* return old regex if pattern hasn't changed */
/* XXX: note in the below we have to check the flags as well as the
* pattern.
*
* Things get a touch tricky as we have to compare the utf8 flag
* independently from the compile flags. */
if ( old_re
&& !recompile
&& !!RX_UTF8(old_re) == !!RExC_utf8
&& ( RX_COMPFLAGS(old_re) == ( orig_rx_flags & RXf_PMf_FLAGCOPYMASK ) )
&& RX_PRECOMP(old_re)
&& RX_PRELEN(old_re) == plen
&& memEQ(RX_PRECOMP(old_re), exp, plen)
&& !runtime_code /* with runtime code, always recompile */ )
{
return old_re;
}
/* Allocate the pattern's SV */
RExC_rx_sv = Rx = (REGEXP*) newSV_type(SVt_REGEXP);
RExC_rx = ReANY(Rx);
if ( RExC_rx == NULL )
FAIL("Regexp out of space");
rx_flags = orig_rx_flags;
if ( (UTF || RExC_uni_semantics)
&& initial_charset == REGEX_DEPENDS_CHARSET)
{
/* Set to use unicode semantics if the pattern is in utf8 and has the
* 'depends' charset specified, as it means unicode when utf8 */
set_regex_charset(&rx_flags, REGEX_UNICODE_CHARSET);
RExC_uni_semantics = 1;
}
RExC_pm_flags = pm_flags;
if (runtime_code) {
assert(TAINTING_get || !TAINT_get);
if (TAINT_get)
Perl_croak(aTHX_ "Eval-group in insecure regular expression");
if (!S_compile_runtime_code(aTHX_ pRExC_state, exp, plen)) {
/* whoops, we have a non-utf8 pattern, whilst run-time code
* got compiled as utf8. Try again with a utf8 pattern */
S_pat_upgrade_to_utf8(aTHX_ pRExC_state, &exp, &plen,
pRExC_state->code_blocks ? pRExC_state->code_blocks->count : 0);
goto redo_parse;
}
}
assert(!pRExC_state->runtime_code_qr);
RExC_sawback = 0;
RExC_seen = 0;
RExC_maxlen = 0;
RExC_in_lookbehind = 0;
RExC_seen_zerolen = *exp == '^' ? -1 : 0;
#ifdef EBCDIC
RExC_recode_x_to_native = 0;
#endif
RExC_in_multi_char_class = 0;
RExC_start = RExC_copy_start_in_constructed = RExC_copy_start_in_input = RExC_precomp = exp;
RExC_precomp_end = RExC_end = exp + plen;
RExC_nestroot = 0;
RExC_whilem_seen = 0;
RExC_end_op = NULL;
RExC_recurse = NULL;
RExC_study_chunk_recursed = NULL;
RExC_study_chunk_recursed_bytes= 0;
RExC_recurse_count = 0;
pRExC_state->code_index = 0;
/* Initialize the string in the compiled pattern. This is so that there is
* something to output if necessary */
set_regex_pv(pRExC_state, Rx);
DEBUG_PARSE_r({
Perl_re_printf( aTHX_
"Starting parse and generation\n");
RExC_lastnum=0;
RExC_lastparse=NULL;
});
/* Allocate space and zero-initialize. Note, the two step process
of zeroing when in debug mode, thus anything assigned has to
happen after that */
if (! RExC_size) {
/* On the first pass of the parse, we guess how big this will be. Then
* we grow in one operation to that amount and then give it back. As
* we go along, we re-allocate what we need.
*
* XXX Currently the guess is essentially that the pattern will be an
* EXACT node with one byte input, one byte output. This is crude, and
* better heuristics are welcome.
*
* On any subsequent passes, we guess what we actually computed in the
* latest earlier pass. Such a pass probably didn't complete so is
* missing stuff. We could improve those guesses by knowing where the
* parse stopped, and use the length so far plus apply the above
* assumption to what's left. */
RExC_size = STR_SZ(RExC_end - RExC_start);
}
Newxc(RExC_rxi, sizeof(regexp_internal) + RExC_size, char, regexp_internal);
if ( RExC_rxi == NULL )
FAIL("Regexp out of space");
Zero(RExC_rxi, sizeof(regexp_internal) + RExC_size, char);
RXi_SET( RExC_rx, RExC_rxi );
/* We start from 0 (over from 0 in the case this is a reparse. The first
* node parsed will give back any excess memory we have allocated so far).
* */
RExC_size = 0;
/* non-zero initialization begins here */
RExC_rx->engine= eng;
RExC_rx->extflags = rx_flags;
RXp_COMPFLAGS(RExC_rx) = orig_rx_flags & RXf_PMf_FLAGCOPYMASK;
if (pm_flags & PMf_IS_QR) {
RExC_rxi->code_blocks = pRExC_state->code_blocks;
if (RExC_rxi->code_blocks) {
RExC_rxi->code_blocks->refcnt++;
}
}
RExC_rx->intflags = 0;
RExC_flags = rx_flags; /* don't let top level (?i) bleed */
RExC_parse = exp;
/* This NUL is guaranteed because the pattern comes from an SV*, and the sv
* code makes sure the final byte is an uncounted NUL. But should this
* ever not be the case, lots of things could read beyond the end of the
* buffer: loops like
* while(isFOO(*RExC_parse)) RExC_parse++;
* strchr(RExC_parse, "foo");
* etc. So it is worth noting. */
assert(*RExC_end == '\0');
RExC_naughty = 0;
RExC_npar = 1;
RExC_parens_buf_size = 0;
RExC_emit_start = RExC_rxi->program;
pRExC_state->code_index = 0;
*((char*) RExC_emit_start) = (char) REG_MAGIC;
RExC_emit = 1;
/* Do the parse */
if (reg(pRExC_state, 0, &flags, 1)) {
/* Success!, But we may need to redo the parse knowing how many parens
* there actually are */
if (IN_PARENS_PASS) {
flags |= RESTART_PARSE;
}
/* We have that number in RExC_npar */
RExC_total_parens = RExC_npar;
/* XXX For backporting, use long jumps if there is any possibility of
* overflow */
if (RExC_size > U16_MAX && ! RExC_use_BRANCHJ) {
RExC_use_BRANCHJ = TRUE;
flags |= RESTART_PARSE;
}
}
else if (! MUST_RESTART(flags)) {
ReREFCNT_dec(Rx);
Perl_croak(aTHX_ "panic: reg returned failure to re_op_compile, flags=%#" UVxf, (UV) flags);
}
/* Here, we either have success, or we have to redo the parse for some reason */
if (MUST_RESTART(flags)) {
/* It's possible to write a regexp in ascii that represents Unicode
codepoints outside of the byte range, such as via \x{100}. If we
detect such a sequence we have to convert the entire pattern to utf8
and then recompile, as our sizing calculation will have been based
on 1 byte == 1 character, but we will need to use utf8 to encode
at least some part of the pattern, and therefore must convert the whole
thing.
-- dmq */
if (flags & NEED_UTF8) {
/* We have stored the offset of the final warning output so far.
* That must be adjusted. Any variant characters between the start
* of the pattern and this warning count for 2 bytes in the final,
* so just add them again */
if (UNLIKELY(RExC_latest_warn_offset > 0)) {
RExC_latest_warn_offset +=
variant_under_utf8_count((U8 *) exp, (U8 *) exp
+ RExC_latest_warn_offset);
}
S_pat_upgrade_to_utf8(aTHX_ pRExC_state, &exp, &plen,
pRExC_state->code_blocks ? pRExC_state->code_blocks->count : 0);
DEBUG_PARSE_r(Perl_re_printf( aTHX_ "Need to redo parse after upgrade\n"));
}
else {
DEBUG_PARSE_r(Perl_re_printf( aTHX_ "Need to redo parse\n"));
}
if (ALL_PARENS_COUNTED) {
/* Make enough room for all the known parens, and zero it */
Renew(RExC_open_parens, RExC_total_parens, regnode_offset);
Zero(RExC_open_parens, RExC_total_parens, regnode_offset);
RExC_open_parens[0] = 1; /* +1 for REG_MAGIC */
Renew(RExC_close_parens, RExC_total_parens, regnode_offset);
Zero(RExC_close_parens, RExC_total_parens, regnode_offset);
}
else { /* Parse did not complete. Reinitialize the parentheses
structures */
RExC_total_parens = 0;
if (RExC_open_parens) {
Safefree(RExC_open_parens);
RExC_open_parens = NULL;
}
if (RExC_close_parens) {
Safefree(RExC_close_parens);
RExC_close_parens = NULL;
}
}
/* Clean up what we did in this parse */
SvREFCNT_dec_NN(RExC_rx_sv);
goto redo_parse;
}
/* Here, we have successfully parsed and generated the pattern's program
* for the regex engine. We are ready to finish things up and look for
* optimizations. */
/* Update the string to compile, with correct modifiers, etc */
set_regex_pv(pRExC_state, Rx);
RExC_rx->nparens = RExC_total_parens - 1;
/* Uses the upper 4 bits of the FLAGS field, so keep within that size */
if (RExC_whilem_seen > 15)
RExC_whilem_seen = 15;
DEBUG_PARSE_r({
Perl_re_printf( aTHX_
"Required size %" IVdf " nodes\n", (IV)RExC_size);
RExC_lastnum=0;
RExC_lastparse=NULL;
});
#ifdef RE_TRACK_PATTERN_OFFSETS
DEBUG_OFFSETS_r(Perl_re_printf( aTHX_
"%s %" UVuf " bytes for offset annotations.\n",
RExC_offsets ? "Got" : "Couldn't get",
(UV)((RExC_offsets[0] * 2 + 1))));
DEBUG_OFFSETS_r(if (RExC_offsets) {
const STRLEN len = RExC_offsets[0];
STRLEN i;
GET_RE_DEBUG_FLAGS_DECL;
Perl_re_printf( aTHX_
"Offsets: [%" UVuf "]\n\t", (UV)RExC_offsets[0]);
for (i = 1; i <= len; i++) {
if (RExC_offsets[i*2-1] || RExC_offsets[i*2])
Perl_re_printf( aTHX_ "%" UVuf ":%" UVuf "[%" UVuf "] ",
(UV)i, (UV)RExC_offsets[i*2-1], (UV)RExC_offsets[i*2]);
}
Perl_re_printf( aTHX_ "\n");
});
#else
SetProgLen(RExC_rxi,RExC_size);
#endif
DEBUG_OPTIMISE_r(
Perl_re_printf( aTHX_ "Starting post parse optimization\n");
);
/* XXXX To minimize changes to RE engine we always allocate
3-units-long substrs field. */
Newx(RExC_rx->substrs, 1, struct reg_substr_data);
if (RExC_recurse_count) {
Newx(RExC_recurse, RExC_recurse_count, regnode *);
SAVEFREEPV(RExC_recurse);
}
if (RExC_seen & REG_RECURSE_SEEN) {
/* Note, RExC_total_parens is 1 + the number of parens in a pattern.
* So its 1 if there are no parens. */
RExC_study_chunk_recursed_bytes= (RExC_total_parens >> 3) +
((RExC_total_parens & 0x07) != 0);
Newx(RExC_study_chunk_recursed,
RExC_study_chunk_recursed_bytes * RExC_total_parens, U8);
SAVEFREEPV(RExC_study_chunk_recursed);
}
reStudy:
RExC_rx->minlen = minlen = sawlookahead = sawplus = sawopen = sawminmod = 0;
DEBUG_r(
RExC_study_chunk_recursed_count= 0;
);
Zero(RExC_rx->substrs, 1, struct reg_substr_data);
if (RExC_study_chunk_recursed) {
Zero(RExC_study_chunk_recursed,
RExC_study_chunk_recursed_bytes * RExC_total_parens, U8);
}
#ifdef TRIE_STUDY_OPT
if (!restudied) {
StructCopy(&zero_scan_data, &data, scan_data_t);
copyRExC_state = RExC_state;
} else {
U32 seen=RExC_seen;
DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ "Restudying\n"));
RExC_state = copyRExC_state;
if (seen & REG_TOP_LEVEL_BRANCHES_SEEN)
RExC_seen |= REG_TOP_LEVEL_BRANCHES_SEEN;
else
RExC_seen &= ~REG_TOP_LEVEL_BRANCHES_SEEN;
StructCopy(&zero_scan_data, &data, scan_data_t);
}
#else
StructCopy(&zero_scan_data, &data, scan_data_t);
#endif
/* Dig out information for optimizations. */
RExC_rx->extflags = RExC_flags; /* was pm_op */
/*dmq: removed as part of de-PMOP: pm->op_pmflags = RExC_flags; */
if (UTF)
SvUTF8_on(Rx); /* Unicode in it? */
RExC_rxi->regstclass = NULL;
if (RExC_naughty >= TOO_NAUGHTY) /* Probably an expensive pattern. */
RExC_rx->intflags |= PREGf_NAUGHTY;
scan = RExC_rxi->program + 1; /* First BRANCH. */
/* testing for BRANCH here tells us whether there is "must appear"
data in the pattern. If there is then we can use it for optimisations */
if (!(RExC_seen & REG_TOP_LEVEL_BRANCHES_SEEN)) { /* Only one top-level choice.
*/
SSize_t fake;
STRLEN longest_length[2];
regnode_ssc ch_class; /* pointed to by data */
int stclass_flag;
SSize_t last_close = 0; /* pointed to by data */
regnode *first= scan;
regnode *first_next= regnext(first);
int i;
/*
* Skip introductions and multiplicators >= 1
* so that we can extract the 'meat' of the pattern that must
* match in the large if() sequence following.
* NOTE that EXACT is NOT covered here, as it is normally
* picked up by the optimiser separately.
*
* This is unfortunate as the optimiser isnt handling lookahead
* properly currently.
*
*/
while ((OP(first) == OPEN && (sawopen = 1)) ||
/* An OR of *one* alternative - should not happen now. */
(OP(first) == BRANCH && OP(first_next) != BRANCH) ||
/* for now we can't handle lookbehind IFMATCH*/
(OP(first) == IFMATCH && !first->flags && (sawlookahead = 1)) ||
(OP(first) == PLUS) ||
(OP(first) == MINMOD) ||
/* An {n,m} with n>0 */
(PL_regkind[OP(first)] == CURLY && ARG1(first) > 0) ||
(OP(first) == NOTHING && PL_regkind[OP(first_next)] != END ))
{
/*
* the only op that could be a regnode is PLUS, all the rest
* will be regnode_1 or regnode_2.
*
* (yves doesn't think this is true)
*/
if (OP(first) == PLUS)
sawplus = 1;
else {
if (OP(first) == MINMOD)
sawminmod = 1;
first += regarglen[OP(first)];
}
first = NEXTOPER(first);
first_next= regnext(first);
}
/* Starting-point info. */
again:
DEBUG_PEEP("first:", first, 0, 0);
/* Ignore EXACT as we deal with it later. */
if (PL_regkind[OP(first)] == EXACT) {
if ( OP(first) == EXACT
|| OP(first) == EXACT_ONLY8
|| OP(first) == EXACTL)
{
NOOP; /* Empty, get anchored substr later. */
}
else
RExC_rxi->regstclass = first;
}
#ifdef TRIE_STCLASS
else if (PL_regkind[OP(first)] == TRIE &&
((reg_trie_data *)RExC_rxi->data->data[ ARG(first) ])->minlen>0)
{
/* this can happen only on restudy */
RExC_rxi->regstclass = construct_ahocorasick_from_trie(pRExC_state, (regnode *)first, 0);
}
#endif
else if (REGNODE_SIMPLE(OP(first)))
RExC_rxi->regstclass = first;
else if (PL_regkind[OP(first)] == BOUND ||
PL_regkind[OP(first)] == NBOUND)
RExC_rxi->regstclass = first;
else if (PL_regkind[OP(first)] == BOL) {
RExC_rx->intflags |= (OP(first) == MBOL
? PREGf_ANCH_MBOL
: PREGf_ANCH_SBOL);
first = NEXTOPER(first);
goto again;
}
else if (OP(first) == GPOS) {
RExC_rx->intflags |= PREGf_ANCH_GPOS;
first = NEXTOPER(first);
goto again;
}
else if ((!sawopen || !RExC_sawback) &&
!sawlookahead &&
(OP(first) == STAR &&
PL_regkind[OP(NEXTOPER(first))] == REG_ANY) &&
!(RExC_rx->intflags & PREGf_ANCH) && !pRExC_state->code_blocks)
{
/* turn .* into ^.* with an implied $*=1 */
const int type =
(OP(NEXTOPER(first)) == REG_ANY)
? PREGf_ANCH_MBOL
: PREGf_ANCH_SBOL;
RExC_rx->intflags |= (type | PREGf_IMPLICIT);
first = NEXTOPER(first);
goto again;
}
if (sawplus && !sawminmod && !sawlookahead
&& (!sawopen || !RExC_sawback)
&& !pRExC_state->code_blocks) /* May examine pos and $& */
/* x+ must match at the 1st pos of run of x's */
RExC_rx->intflags |= PREGf_SKIP;
/* Scan is after the zeroth branch, first is atomic matcher. */
#ifdef TRIE_STUDY_OPT
DEBUG_PARSE_r(
if (!restudied)
Perl_re_printf( aTHX_ "first at %" IVdf "\n",
(IV)(first - scan + 1))
);
#else
DEBUG_PARSE_r(
Perl_re_printf( aTHX_ "first at %" IVdf "\n",
(IV)(first - scan + 1))
);
#endif
/*
* If there's something expensive in the r.e., find the
* longest literal string that must appear and make it the
* regmust. Resolve ties in favor of later strings, since
* the regstart check works with the beginning of the r.e.
* and avoiding duplication strengthens checking. Not a
* strong reason, but sufficient in the absence of others.
* [Now we resolve ties in favor of the earlier string if
* it happens that c_offset_min has been invalidated, since the
* earlier string may buy us something the later one won't.]
*/
data.substrs[0].str = newSVpvs("");
data.substrs[1].str = newSVpvs("");
data.last_found = newSVpvs("");
data.cur_is_floating = 0; /* initially any found substring is fixed */
ENTER_with_name("study_chunk");
SAVEFREESV(data.substrs[0].str);
SAVEFREESV(data.substrs[1].str);
SAVEFREESV(data.last_found);
first = scan;
if (!RExC_rxi->regstclass) {
ssc_init(pRExC_state, &ch_class);
data.start_class = &ch_class;
stclass_flag = SCF_DO_STCLASS_AND;
} else /* XXXX Check for BOUND? */
stclass_flag = 0;
data.last_closep = &last_close;
DEBUG_RExC_seen();
/*
* MAIN ENTRY FOR study_chunk() FOR m/PATTERN/
* (NO top level branches)
*/
minlen = study_chunk(pRExC_state, &first, &minlen, &fake,
scan + RExC_size, /* Up to end */
&data, -1, 0, NULL,
SCF_DO_SUBSTR | SCF_WHILEM_VISITED_POS | stclass_flag
| (restudied ? SCF_TRIE_DOING_RESTUDY : 0),
0, TRUE);
CHECK_RESTUDY_GOTO_butfirst(LEAVE_with_name("study_chunk"));
if ( RExC_total_parens == 1 && !data.cur_is_floating
&& data.last_start_min == 0 && data.last_end > 0
&& !RExC_seen_zerolen
&& !(RExC_seen & REG_VERBARG_SEEN)
&& !(RExC_seen & REG_GPOS_SEEN)
){
RExC_rx->extflags |= RXf_CHECK_ALL;
}
scan_commit(pRExC_state, &data,&minlen, 0);
/* XXX this is done in reverse order because that's the way the
* code was before it was parameterised. Don't know whether it
* actually needs doing in reverse order. DAPM */
for (i = 1; i >= 0; i--) {
longest_length[i] = CHR_SVLEN(data.substrs[i].str);
if ( !( i
&& SvCUR(data.substrs[0].str) /* ok to leave SvCUR */
&& data.substrs[0].min_offset
== data.substrs[1].min_offset
&& SvCUR(data.substrs[0].str)
== SvCUR(data.substrs[1].str)
)
&& S_setup_longest (aTHX_ pRExC_state,
&(RExC_rx->substrs->data[i]),
&(data.substrs[i]),
longest_length[i]))
{
RExC_rx->substrs->data[i].min_offset =
data.substrs[i].min_offset - data.substrs[i].lookbehind;
RExC_rx->substrs->data[i].max_offset = data.substrs[i].max_offset;
/* Don't offset infinity */
if (data.substrs[i].max_offset < SSize_t_MAX)
RExC_rx->substrs->data[i].max_offset -= data.substrs[i].lookbehind;
SvREFCNT_inc_simple_void_NN(data.substrs[i].str);
}
else {
RExC_rx->substrs->data[i].substr = NULL;
RExC_rx->substrs->data[i].utf8_substr = NULL;
longest_length[i] = 0;
}
}
LEAVE_with_name("study_chunk");
if (RExC_rxi->regstclass
&& (OP(RExC_rxi->regstclass) == REG_ANY || OP(RExC_rxi->regstclass) == SANY))
RExC_rxi->regstclass = NULL;
if ((!(RExC_rx->substrs->data[0].substr || RExC_rx->substrs->data[0].utf8_substr)
|| RExC_rx->substrs->data[0].min_offset)
&& stclass_flag
&& ! (ANYOF_FLAGS(data.start_class) & SSC_MATCHES_EMPTY_STRING)
&& is_ssc_worth_it(pRExC_state, data.start_class))
{
const U32 n = add_data(pRExC_state, STR_WITH_LEN("f"));
ssc_finalize(pRExC_state, data.start_class);
Newx(RExC_rxi->data->data[n], 1, regnode_ssc);
StructCopy(data.start_class,
(regnode_ssc*)RExC_rxi->data->data[n],
regnode_ssc);
RExC_rxi->regstclass = (regnode*)RExC_rxi->data->data[n];
RExC_rx->intflags &= ~PREGf_SKIP; /* Used in find_byclass(). */
DEBUG_COMPILE_r({ SV *sv = sv_newmortal();
regprop(RExC_rx, sv, (regnode*)data.start_class, NULL, pRExC_state);
Perl_re_printf( aTHX_
"synthetic stclass \"%s\".\n",
SvPVX_const(sv));});
data.start_class = NULL;
}
/* A temporary algorithm prefers floated substr to fixed one of
* same length to dig more info. */
i = (longest_length[0] <= longest_length[1]);
RExC_rx->substrs->check_ix = i;
RExC_rx->check_end_shift = RExC_rx->substrs->data[i].end_shift;
RExC_rx->check_substr = RExC_rx->substrs->data[i].substr;
RExC_rx->check_utf8 = RExC_rx->substrs->data[i].utf8_substr;
RExC_rx->check_offset_min = RExC_rx->substrs->data[i].min_offset;
RExC_rx->check_offset_max = RExC_rx->substrs->data[i].max_offset;
if (!i && (RExC_rx->intflags & (PREGf_ANCH_SBOL|PREGf_ANCH_GPOS)))
RExC_rx->intflags |= PREGf_NOSCAN;
if ((RExC_rx->check_substr || RExC_rx->check_utf8) ) {
RExC_rx->extflags |= RXf_USE_INTUIT;
if (SvTAIL(RExC_rx->check_substr ? RExC_rx->check_substr : RExC_rx->check_utf8))
RExC_rx->extflags |= RXf_INTUIT_TAIL;
}
/* XXX Unneeded? dmq (shouldn't as this is handled elsewhere)
if ( (STRLEN)minlen < longest_length[1] )
minlen= longest_length[1];
if ( (STRLEN)minlen < longest_length[0] )
minlen= longest_length[0];
*/
}
else {
/* Several toplevels. Best we can is to set minlen. */
SSize_t fake;
regnode_ssc ch_class;
SSize_t last_close = 0;
DEBUG_PARSE_r(Perl_re_printf( aTHX_ "\nMulti Top Level\n"));
scan = RExC_rxi->program + 1;
ssc_init(pRExC_state, &ch_class);
data.start_class = &ch_class;
data.last_closep = &last_close;
DEBUG_RExC_seen();
/*
* MAIN ENTRY FOR study_chunk() FOR m/P1|P2|.../
* (patterns WITH top level branches)
*/
minlen = study_chunk(pRExC_state,
&scan, &minlen, &fake, scan + RExC_size, &data, -1, 0, NULL,
SCF_DO_STCLASS_AND|SCF_WHILEM_VISITED_POS|(restudied
? SCF_TRIE_DOING_RESTUDY
: 0),
0, TRUE);
CHECK_RESTUDY_GOTO_butfirst(NOOP);
RExC_rx->check_substr = NULL;
RExC_rx->check_utf8 = NULL;
RExC_rx->substrs->data[0].substr = NULL;
RExC_rx->substrs->data[0].utf8_substr = NULL;
RExC_rx->substrs->data[1].substr = NULL;
RExC_rx->substrs->data[1].utf8_substr = NULL;
if (! (ANYOF_FLAGS(data.start_class) & SSC_MATCHES_EMPTY_STRING)
&& is_ssc_worth_it(pRExC_state, data.start_class))
{
const U32 n = add_data(pRExC_state, STR_WITH_LEN("f"));
ssc_finalize(pRExC_state, data.start_class);
Newx(RExC_rxi->data->data[n], 1, regnode_ssc);
StructCopy(data.start_class,
(regnode_ssc*)RExC_rxi->data->data[n],
regnode_ssc);
RExC_rxi->regstclass = (regnode*)RExC_rxi->data->data[n];
RExC_rx->intflags &= ~PREGf_SKIP; /* Used in find_byclass(). */
DEBUG_COMPILE_r({ SV* sv = sv_newmortal();
regprop(RExC_rx, sv, (regnode*)data.start_class, NULL, pRExC_state);
Perl_re_printf( aTHX_
"synthetic stclass \"%s\".\n",
SvPVX_const(sv));});
data.start_class = NULL;
}
}
if (RExC_seen & REG_UNBOUNDED_QUANTIFIER_SEEN) {
RExC_rx->extflags |= RXf_UNBOUNDED_QUANTIFIER_SEEN;
RExC_rx->maxlen = REG_INFTY;
}
else {
RExC_rx->maxlen = RExC_maxlen;
}
/* Guard against an embedded (?=) or (?<=) with a longer minlen than
the "real" pattern. */
DEBUG_OPTIMISE_r({
Perl_re_printf( aTHX_ "minlen: %" IVdf " RExC_rx->minlen:%" IVdf " maxlen:%" IVdf "\n",
(IV)minlen, (IV)RExC_rx->minlen, (IV)RExC_maxlen);
});
RExC_rx->minlenret = minlen;
if (RExC_rx->minlen < minlen)
RExC_rx->minlen = minlen;
if (RExC_seen & REG_RECURSE_SEEN ) {
RExC_rx->intflags |= PREGf_RECURSE_SEEN;
Newx(RExC_rx->recurse_locinput, RExC_rx->nparens + 1, char *);
}
if (RExC_seen & REG_GPOS_SEEN)
RExC_rx->intflags |= PREGf_GPOS_SEEN;
if (RExC_seen & REG_LOOKBEHIND_SEEN)
RExC_rx->extflags |= RXf_NO_INPLACE_SUBST; /* inplace might break the
lookbehind */
if (pRExC_state->code_blocks)
RExC_rx->extflags |= RXf_EVAL_SEEN;
if (RExC_seen & REG_VERBARG_SEEN)
{
RExC_rx->intflags |= PREGf_VERBARG_SEEN;
RExC_rx->extflags |= RXf_NO_INPLACE_SUBST; /* don't understand this! Yves */
}
if (RExC_seen & REG_CUTGROUP_SEEN)
RExC_rx->intflags |= PREGf_CUTGROUP_SEEN;
if (pm_flags & PMf_USE_RE_EVAL)
RExC_rx->intflags |= PREGf_USE_RE_EVAL;
if (RExC_paren_names)
RXp_PAREN_NAMES(RExC_rx) = MUTABLE_HV(SvREFCNT_inc(RExC_paren_names));
else
RXp_PAREN_NAMES(RExC_rx) = NULL;
/* If we have seen an anchor in our pattern then we set the extflag RXf_IS_ANCHORED
* so it can be used in pp.c */
if (RExC_rx->intflags & PREGf_ANCH)
RExC_rx->extflags |= RXf_IS_ANCHORED;
{
/* this is used to identify "special" patterns that might result
* in Perl NOT calling the regex engine and instead doing the match "itself",
* particularly special cases in split//. By having the regex compiler
* do this pattern matching at a regop level (instead of by inspecting the pattern)
* we avoid weird issues with equivalent patterns resulting in different behavior,
* AND we allow non Perl engines to get the same optimizations by the setting the
* flags appropriately - Yves */
regnode *first = RExC_rxi->program + 1;
U8 fop = OP(first);
regnode *next = regnext(first);
U8 nop = OP(next);
if (PL_regkind[fop] == NOTHING && nop == END)
RExC_rx->extflags |= RXf_NULL;
else if ((fop == MBOL || (fop == SBOL && !first->flags)) && nop == END)
/* when fop is SBOL first->flags will be true only when it was
* produced by parsing /\A/, and not when parsing /^/. This is
* very important for the split code as there we want to
* treat /^/ as /^/m, but we do not want to treat /\A/ as /^/m.
* See rt #122761 for more details. -- Yves */
RExC_rx->extflags |= RXf_START_ONLY;
else if (fop == PLUS
&& PL_regkind[nop] == POSIXD && FLAGS(next) == _CC_SPACE
&& nop == END)
RExC_rx->extflags |= RXf_WHITE;
else if ( RExC_rx->extflags & RXf_SPLIT
&& (fop == EXACT || fop == EXACT_ONLY8 || fop == EXACTL)
&& STR_LEN(first) == 1
&& *(STRING(first)) == ' '
&& nop == END )
RExC_rx->extflags |= (RXf_SKIPWHITE|RXf_WHITE);
}
if (RExC_contains_locale) {
RXp_EXTFLAGS(RExC_rx) |= RXf_TAINTED;
}
#ifdef DEBUGGING
if (RExC_paren_names) {
RExC_rxi->name_list_idx = add_data( pRExC_state, STR_WITH_LEN("a"));
RExC_rxi->data->data[RExC_rxi->name_list_idx]
= (void*)SvREFCNT_inc(RExC_paren_name_list);
} else
#endif
RExC_rxi->name_list_idx = 0;
while ( RExC_recurse_count > 0 ) {
const regnode *scan = RExC_recurse[ --RExC_recurse_count ];
/*
* This data structure is set up in study_chunk() and is used
* to calculate the distance between a GOSUB regopcode and
* the OPEN/CURLYM (CURLYM's are special and can act like OPEN's)
* it refers to.
*
* If for some reason someone writes code that optimises
* away a GOSUB opcode then the assert should be changed to
* an if(scan) to guard the ARG2L_SET() - Yves
*
*/
assert(scan && OP(scan) == GOSUB);
ARG2L_SET( scan, RExC_open_parens[ARG(scan)] - REGNODE_OFFSET(scan));
}
Newxz(RExC_rx->offs, RExC_total_parens, regexp_paren_pair);
/* assume we don't need to swap parens around before we match */
DEBUG_TEST_r({
Perl_re_printf( aTHX_ "study_chunk_recursed_count: %lu\n",
(unsigned long)RExC_study_chunk_recursed_count);
});
DEBUG_DUMP_r({
DEBUG_RExC_seen();
Perl_re_printf( aTHX_ "Final program:\n");
regdump(RExC_rx);
});
if (RExC_open_parens) {
Safefree(RExC_open_parens);
RExC_open_parens = NULL;
}
if (RExC_close_parens) {
Safefree(RExC_close_parens);
RExC_close_parens = NULL;
}
#ifdef USE_ITHREADS
/* under ithreads the ?pat? PMf_USED flag on the pmop is simulated
* by setting the regexp SV to readonly-only instead. If the
* pattern's been recompiled, the USEDness should remain. */
if (old_re && SvREADONLY(old_re))
SvREADONLY_on(Rx);
#endif
return Rx;
}
SV*
Perl_reg_named_buff(pTHX_ REGEXP * const rx, SV * const key, SV * const value,
const U32 flags)
{
PERL_ARGS_ASSERT_REG_NAMED_BUFF;
PERL_UNUSED_ARG(value);
if (flags & RXapif_FETCH) {
return reg_named_buff_fetch(rx, key, flags);
} else if (flags & (RXapif_STORE | RXapif_DELETE | RXapif_CLEAR)) {
Perl_croak_no_modify();
return NULL;
} else if (flags & RXapif_EXISTS) {
return reg_named_buff_exists(rx, key, flags)
? &PL_sv_yes
: &PL_sv_no;
} else if (flags & RXapif_REGNAMES) {
return reg_named_buff_all(rx, flags);
} else if (flags & (RXapif_SCALAR | RXapif_REGNAMES_COUNT)) {
return reg_named_buff_scalar(rx, flags);
} else {
Perl_croak(aTHX_ "panic: Unknown flags %d in named_buff", (int)flags);
return NULL;
}
}
SV*
Perl_reg_named_buff_iter(pTHX_ REGEXP * const rx, const SV * const lastkey,
const U32 flags)
{
PERL_ARGS_ASSERT_REG_NAMED_BUFF_ITER;
PERL_UNUSED_ARG(lastkey);
if (flags & RXapif_FIRSTKEY)
return reg_named_buff_firstkey(rx, flags);
else if (flags & RXapif_NEXTKEY)
return reg_named_buff_nextkey(rx, flags);
else {
Perl_croak(aTHX_ "panic: Unknown flags %d in named_buff_iter",
(int)flags);
return NULL;
}
}
SV*
Perl_reg_named_buff_fetch(pTHX_ REGEXP * const r, SV * const namesv,
const U32 flags)
{
SV *ret;
struct regexp *const rx = ReANY(r);
PERL_ARGS_ASSERT_REG_NAMED_BUFF_FETCH;
if (rx && RXp_PAREN_NAMES(rx)) {
HE *he_str = hv_fetch_ent( RXp_PAREN_NAMES(rx), namesv, 0, 0 );
if (he_str) {
IV i;
SV* sv_dat=HeVAL(he_str);
I32 *nums=(I32*)SvPVX(sv_dat);
AV * const retarray = (flags & RXapif_ALL) ? newAV() : NULL;
for ( i=0; i<SvIVX(sv_dat); i++ ) {
if ((I32)(rx->nparens) >= nums[i]
&& rx->offs[nums[i]].start != -1
&& rx->offs[nums[i]].end != -1)
{
ret = newSVpvs("");
CALLREG_NUMBUF_FETCH(r, nums[i], ret);
if (!retarray)
return ret;
} else {
if (retarray)
ret = newSVsv(&PL_sv_undef);
}
if (retarray)
av_push(retarray, ret);
}
if (retarray)
return newRV_noinc(MUTABLE_SV(retarray));
}
}
return NULL;
}
bool
Perl_reg_named_buff_exists(pTHX_ REGEXP * const r, SV * const key,
const U32 flags)
{
struct regexp *const rx = ReANY(r);
PERL_ARGS_ASSERT_REG_NAMED_BUFF_EXISTS;
if (rx && RXp_PAREN_NAMES(rx)) {
if (flags & RXapif_ALL) {
return hv_exists_ent(RXp_PAREN_NAMES(rx), key, 0);
} else {
SV *sv = CALLREG_NAMED_BUFF_FETCH(r, key, flags);
if (sv) {
SvREFCNT_dec_NN(sv);
return TRUE;
} else {
return FALSE;
}
}
} else {
return FALSE;
}
}
SV*
Perl_reg_named_buff_firstkey(pTHX_ REGEXP * const r, const U32 flags)
{
struct regexp *const rx = ReANY(r);
PERL_ARGS_ASSERT_REG_NAMED_BUFF_FIRSTKEY;
if ( rx && RXp_PAREN_NAMES(rx) ) {
(void)hv_iterinit(RXp_PAREN_NAMES(rx));
return CALLREG_NAMED_BUFF_NEXTKEY(r, NULL, flags & ~RXapif_FIRSTKEY);
} else {
return FALSE;
}
}
SV*
Perl_reg_named_buff_nextkey(pTHX_ REGEXP * const r, const U32 flags)
{
struct regexp *const rx = ReANY(r);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REG_NAMED_BUFF_NEXTKEY;
if (rx && RXp_PAREN_NAMES(rx)) {
HV *hv = RXp_PAREN_NAMES(rx);
HE *temphe;
while ( (temphe = hv_iternext_flags(hv, 0)) ) {
IV i;
IV parno = 0;
SV* sv_dat = HeVAL(temphe);
I32 *nums = (I32*)SvPVX(sv_dat);
for ( i = 0; i < SvIVX(sv_dat); i++ ) {
if ((I32)(rx->lastparen) >= nums[i] &&
rx->offs[nums[i]].start != -1 &&
rx->offs[nums[i]].end != -1)
{
parno = nums[i];
break;
}
}
if (parno || flags & RXapif_ALL) {
return newSVhek(HeKEY_hek(temphe));
}
}
}
return NULL;
}
SV*
Perl_reg_named_buff_scalar(pTHX_ REGEXP * const r, const U32 flags)
{
SV *ret;
AV *av;
SSize_t length;
struct regexp *const rx = ReANY(r);
PERL_ARGS_ASSERT_REG_NAMED_BUFF_SCALAR;
if (rx && RXp_PAREN_NAMES(rx)) {
if (flags & (RXapif_ALL | RXapif_REGNAMES_COUNT)) {
return newSViv(HvTOTALKEYS(RXp_PAREN_NAMES(rx)));
} else if (flags & RXapif_ONE) {
ret = CALLREG_NAMED_BUFF_ALL(r, (flags | RXapif_REGNAMES));
av = MUTABLE_AV(SvRV(ret));
length = av_tindex(av);
SvREFCNT_dec_NN(ret);
return newSViv(length + 1);
} else {
Perl_croak(aTHX_ "panic: Unknown flags %d in named_buff_scalar",
(int)flags);
return NULL;
}
}
return &PL_sv_undef;
}
SV*
Perl_reg_named_buff_all(pTHX_ REGEXP * const r, const U32 flags)
{
struct regexp *const rx = ReANY(r);
AV *av = newAV();
PERL_ARGS_ASSERT_REG_NAMED_BUFF_ALL;
if (rx && RXp_PAREN_NAMES(rx)) {
HV *hv= RXp_PAREN_NAMES(rx);
HE *temphe;
(void)hv_iterinit(hv);
while ( (temphe = hv_iternext_flags(hv, 0)) ) {
IV i;
IV parno = 0;
SV* sv_dat = HeVAL(temphe);
I32 *nums = (I32*)SvPVX(sv_dat);
for ( i = 0; i < SvIVX(sv_dat); i++ ) {
if ((I32)(rx->lastparen) >= nums[i] &&
rx->offs[nums[i]].start != -1 &&
rx->offs[nums[i]].end != -1)
{
parno = nums[i];
break;
}
}
if (parno || flags & RXapif_ALL) {
av_push(av, newSVhek(HeKEY_hek(temphe)));
}
}
}
return newRV_noinc(MUTABLE_SV(av));
}
void
Perl_reg_numbered_buff_fetch(pTHX_ REGEXP * const r, const I32 paren,
SV * const sv)
{
struct regexp *const rx = ReANY(r);
char *s = NULL;
SSize_t i = 0;
SSize_t s1, t1;
I32 n = paren;
PERL_ARGS_ASSERT_REG_NUMBERED_BUFF_FETCH;
if ( n == RX_BUFF_IDX_CARET_PREMATCH
|| n == RX_BUFF_IDX_CARET_FULLMATCH
|| n == RX_BUFF_IDX_CARET_POSTMATCH
)
{
bool keepcopy = cBOOL(rx->extflags & RXf_PMf_KEEPCOPY);
if (!keepcopy) {
/* on something like
* $r = qr/.../;
* /$qr/p;
* the KEEPCOPY is set on the PMOP rather than the regex */
if (PL_curpm && r == PM_GETRE(PL_curpm))
keepcopy = cBOOL(PL_curpm->op_pmflags & PMf_KEEPCOPY);
}
if (!keepcopy)
goto ret_undef;
}
if (!rx->subbeg)
goto ret_undef;
if (n == RX_BUFF_IDX_CARET_FULLMATCH)
/* no need to distinguish between them any more */
n = RX_BUFF_IDX_FULLMATCH;
if ((n == RX_BUFF_IDX_PREMATCH || n == RX_BUFF_IDX_CARET_PREMATCH)
&& rx->offs[0].start != -1)
{
/* $`, ${^PREMATCH} */
i = rx->offs[0].start;
s = rx->subbeg;
}
else
if ((n == RX_BUFF_IDX_POSTMATCH || n == RX_BUFF_IDX_CARET_POSTMATCH)
&& rx->offs[0].end != -1)
{
/* $', ${^POSTMATCH} */
s = rx->subbeg - rx->suboffset + rx->offs[0].end;
i = rx->sublen + rx->suboffset - rx->offs[0].end;
}
else
if ( 0 <= n && n <= (I32)rx->nparens &&
(s1 = rx->offs[n].start) != -1 &&
(t1 = rx->offs[n].end) != -1)
{
/* $&, ${^MATCH}, $1 ... */
i = t1 - s1;
s = rx->subbeg + s1 - rx->suboffset;
} else {
goto ret_undef;
}
assert(s >= rx->subbeg);
assert((STRLEN)rx->sublen >= (STRLEN)((s - rx->subbeg) + i) );
if (i >= 0) {
#ifdef NO_TAINT_SUPPORT
sv_setpvn(sv, s, i);
#else
const int oldtainted = TAINT_get;
TAINT_NOT;
sv_setpvn(sv, s, i);
TAINT_set(oldtainted);
#endif
if (RXp_MATCH_UTF8(rx))
SvUTF8_on(sv);
else
SvUTF8_off(sv);
if (TAINTING_get) {
if (RXp_MATCH_TAINTED(rx)) {
if (SvTYPE(sv) >= SVt_PVMG) {
MAGIC* const mg = SvMAGIC(sv);
MAGIC* mgt;
TAINT;
SvMAGIC_set(sv, mg->mg_moremagic);
SvTAINT(sv);
if ((mgt = SvMAGIC(sv))) {
mg->mg_moremagic = mgt;
SvMAGIC_set(sv, mg);
}
} else {
TAINT;
SvTAINT(sv);
}
} else
SvTAINTED_off(sv);
}
} else {
ret_undef:
sv_set_undef(sv);
return;
}
}
void
Perl_reg_numbered_buff_store(pTHX_ REGEXP * const rx, const I32 paren,
SV const * const value)
{
PERL_ARGS_ASSERT_REG_NUMBERED_BUFF_STORE;
PERL_UNUSED_ARG(rx);
PERL_UNUSED_ARG(paren);
PERL_UNUSED_ARG(value);
if (!PL_localizing)
Perl_croak_no_modify();
}
I32
Perl_reg_numbered_buff_length(pTHX_ REGEXP * const r, const SV * const sv,
const I32 paren)
{
struct regexp *const rx = ReANY(r);
I32 i;
I32 s1, t1;
PERL_ARGS_ASSERT_REG_NUMBERED_BUFF_LENGTH;
if ( paren == RX_BUFF_IDX_CARET_PREMATCH
|| paren == RX_BUFF_IDX_CARET_FULLMATCH
|| paren == RX_BUFF_IDX_CARET_POSTMATCH
)
{
bool keepcopy = cBOOL(rx->extflags & RXf_PMf_KEEPCOPY);
if (!keepcopy) {
/* on something like
* $r = qr/.../;
* /$qr/p;
* the KEEPCOPY is set on the PMOP rather than the regex */
if (PL_curpm && r == PM_GETRE(PL_curpm))
keepcopy = cBOOL(PL_curpm->op_pmflags & PMf_KEEPCOPY);
}
if (!keepcopy)
goto warn_undef;
}
/* Some of this code was originally in C<Perl_magic_len> in F<mg.c> */
switch (paren) {
case RX_BUFF_IDX_CARET_PREMATCH: /* ${^PREMATCH} */
case RX_BUFF_IDX_PREMATCH: /* $` */
if (rx->offs[0].start != -1) {
i = rx->offs[0].start;
if (i > 0) {
s1 = 0;
t1 = i;
goto getlen;
}
}
return 0;
case RX_BUFF_IDX_CARET_POSTMATCH: /* ${^POSTMATCH} */
case RX_BUFF_IDX_POSTMATCH: /* $' */
if (rx->offs[0].end != -1) {
i = rx->sublen - rx->offs[0].end;
if (i > 0) {
s1 = rx->offs[0].end;
t1 = rx->sublen;
goto getlen;
}
}
return 0;
default: /* $& / ${^MATCH}, $1, $2, ... */
if (paren <= (I32)rx->nparens &&
(s1 = rx->offs[paren].start) != -1 &&
(t1 = rx->offs[paren].end) != -1)
{
i = t1 - s1;
goto getlen;
} else {
warn_undef:
if (ckWARN(WARN_UNINITIALIZED))
report_uninit((const SV *)sv);
return 0;
}
}
getlen:
if (i > 0 && RXp_MATCH_UTF8(rx)) {
const char * const s = rx->subbeg - rx->suboffset + s1;
const U8 *ep;
STRLEN el;
i = t1 - s1;
if (is_utf8_string_loclen((U8*)s, i, &ep, &el))
i = el;
}
return i;
}
SV*
Perl_reg_qr_package(pTHX_ REGEXP * const rx)
{
PERL_ARGS_ASSERT_REG_QR_PACKAGE;
PERL_UNUSED_ARG(rx);
if (0)
return NULL;
else
return newSVpvs("Regexp");
}
/* Scans the name of a named buffer from the pattern.
* If flags is REG_RSN_RETURN_NULL returns null.
* If flags is REG_RSN_RETURN_NAME returns an SV* containing the name
* If flags is REG_RSN_RETURN_DATA returns the data SV* corresponding
* to the parsed name as looked up in the RExC_paren_names hash.
* If there is an error throws a vFAIL().. type exception.
*/
#define REG_RSN_RETURN_NULL 0
#define REG_RSN_RETURN_NAME 1
#define REG_RSN_RETURN_DATA 2
STATIC SV*
S_reg_scan_name(pTHX_ RExC_state_t *pRExC_state, U32 flags)
{
char *name_start = RExC_parse;
SV* sv_name;
PERL_ARGS_ASSERT_REG_SCAN_NAME;
assert (RExC_parse <= RExC_end);
if (RExC_parse == RExC_end) NOOP;
else if (isIDFIRST_lazy_if_safe(RExC_parse, RExC_end, UTF)) {
/* Note that the code here assumes well-formed UTF-8. Skip IDFIRST by
* using do...while */
if (UTF)
do {
RExC_parse += UTF8SKIP(RExC_parse);
} while ( RExC_parse < RExC_end
&& isWORDCHAR_utf8_safe((U8*)RExC_parse, (U8*) RExC_end));
else
do {
RExC_parse++;
} while (RExC_parse < RExC_end && isWORDCHAR(*RExC_parse));
} else {
RExC_parse++; /* so the <- from the vFAIL is after the offending
character */
vFAIL("Group name must start with a non-digit word character");
}
sv_name = newSVpvn_flags(name_start, (int)(RExC_parse - name_start),
SVs_TEMP | (UTF ? SVf_UTF8 : 0));
if ( flags == REG_RSN_RETURN_NAME)
return sv_name;
else if (flags==REG_RSN_RETURN_DATA) {
HE *he_str = NULL;
SV *sv_dat = NULL;
if ( ! sv_name ) /* should not happen*/
Perl_croak(aTHX_ "panic: no svname in reg_scan_name");
if (RExC_paren_names)
he_str = hv_fetch_ent( RExC_paren_names, sv_name, 0, 0 );
if ( he_str )
sv_dat = HeVAL(he_str);
if ( ! sv_dat ) { /* Didn't find group */
/* It might be a forward reference; we can't fail until we
* know, by completing the parse to get all the groups, and
* then reparsing */
if (ALL_PARENS_COUNTED) {
vFAIL("Reference to nonexistent named group");
}
else {
REQUIRE_PARENS_PASS;
}
}
return sv_dat;
}
Perl_croak(aTHX_ "panic: bad flag %lx in reg_scan_name",
(unsigned long) flags);
}
#define DEBUG_PARSE_MSG(funcname) DEBUG_PARSE_r({ \
if (RExC_lastparse!=RExC_parse) { \
Perl_re_printf( aTHX_ "%s", \
Perl_pv_pretty(aTHX_ RExC_mysv1, RExC_parse, \
RExC_end - RExC_parse, 16, \
"", "", \
PERL_PV_ESCAPE_UNI_DETECT | \
PERL_PV_PRETTY_ELLIPSES | \
PERL_PV_PRETTY_LTGT | \
PERL_PV_ESCAPE_RE | \
PERL_PV_PRETTY_EXACTSIZE \
) \
); \
} else \
Perl_re_printf( aTHX_ "%16s",""); \
\
if (RExC_lastnum!=RExC_emit) \
Perl_re_printf( aTHX_ "|%4d", RExC_emit); \
else \
Perl_re_printf( aTHX_ "|%4s",""); \
Perl_re_printf( aTHX_ "|%*s%-4s", \
(int)((depth*2)), "", \
(funcname) \
); \
RExC_lastnum=RExC_emit; \
RExC_lastparse=RExC_parse; \
})
#define DEBUG_PARSE(funcname) DEBUG_PARSE_r({ \
DEBUG_PARSE_MSG((funcname)); \
Perl_re_printf( aTHX_ "%4s","\n"); \
})
#define DEBUG_PARSE_FMT(funcname,fmt,args) DEBUG_PARSE_r({\
DEBUG_PARSE_MSG((funcname)); \
Perl_re_printf( aTHX_ fmt "\n",args); \
})
/* This section of code defines the inversion list object and its methods. The
* interfaces are highly subject to change, so as much as possible is static to
* this file. An inversion list is here implemented as a malloc'd C UV array
* as an SVt_INVLIST scalar.
*
* An inversion list for Unicode is an array of code points, sorted by ordinal
* number. Each element gives the code point that begins a range that extends
* up-to but not including the code point given by the next element. The final
* element gives the first code point of a range that extends to the platform's
* infinity. The even-numbered elements (invlist[0], invlist[2], invlist[4],
* ...) give ranges whose code points are all in the inversion list. We say
* that those ranges are in the set. The odd-numbered elements give ranges
* whose code points are not in the inversion list, and hence not in the set.
* Thus, element [0] is the first code point in the list. Element [1]
* is the first code point beyond that not in the list; and element [2] is the
* first code point beyond that that is in the list. In other words, the first
* range is invlist[0]..(invlist[1]-1), and all code points in that range are
* in the inversion list. The second range is invlist[1]..(invlist[2]-1), and
* all code points in that range are not in the inversion list. The third
* range invlist[2]..(invlist[3]-1) gives code points that are in the inversion
* list, and so forth. Thus every element whose index is divisible by two
* gives the beginning of a range that is in the list, and every element whose
* index is not divisible by two gives the beginning of a range not in the
* list. If the final element's index is divisible by two, the inversion list
* extends to the platform's infinity; otherwise the highest code point in the
* inversion list is the contents of that element minus 1.
*
* A range that contains just a single code point N will look like
* invlist[i] == N
* invlist[i+1] == N+1
*
* If N is UV_MAX (the highest representable code point on the machine), N+1 is
* impossible to represent, so element [i+1] is omitted. The single element
* inversion list
* invlist[0] == UV_MAX
* contains just UV_MAX, but is interpreted as matching to infinity.
*
* Taking the complement (inverting) an inversion list is quite simple, if the
* first element is 0, remove it; otherwise add a 0 element at the beginning.
* This implementation reserves an element at the beginning of each inversion
* list to always contain 0; there is an additional flag in the header which
* indicates if the list begins at the 0, or is offset to begin at the next
* element. This means that the inversion list can be inverted without any
* copying; just flip the flag.
*
* More about inversion lists can be found in "Unicode Demystified"
* Chapter 13 by Richard Gillam, published by Addison-Wesley.
*
* The inversion list data structure is currently implemented as an SV pointing
* to an array of UVs that the SV thinks are bytes. This allows us to have an
* array of UV whose memory management is automatically handled by the existing
* facilities for SV's.
*
* Some of the methods should always be private to the implementation, and some
* should eventually be made public */
/* The header definitions are in F<invlist_inline.h> */
#ifndef PERL_IN_XSUB_RE
PERL_STATIC_INLINE UV*
S__invlist_array_init(SV* const invlist, const bool will_have_0)
{
/* Returns a pointer to the first element in the inversion list's array.
* This is called upon initialization of an inversion list. Where the
* array begins depends on whether the list has the code point U+0000 in it
* or not. The other parameter tells it whether the code that follows this
* call is about to put a 0 in the inversion list or not. The first
* element is either the element reserved for 0, if TRUE, or the element
* after it, if FALSE */
bool* offset = get_invlist_offset_addr(invlist);
UV* zero_addr = (UV *) SvPVX(invlist);
PERL_ARGS_ASSERT__INVLIST_ARRAY_INIT;
/* Must be empty */
assert(! _invlist_len(invlist));
*zero_addr = 0;
/* 1^1 = 0; 1^0 = 1 */
*offset = 1 ^ will_have_0;
return zero_addr + *offset;
}
PERL_STATIC_INLINE void
S_invlist_set_len(pTHX_ SV* const invlist, const UV len, const bool offset)
{
/* Sets the current number of elements stored in the inversion list.
* Updates SvCUR correspondingly */
PERL_UNUSED_CONTEXT;
PERL_ARGS_ASSERT_INVLIST_SET_LEN;
assert(is_invlist(invlist));
SvCUR_set(invlist,
(len == 0)
? 0
: TO_INTERNAL_SIZE(len + offset));
assert(SvLEN(invlist) == 0 || SvCUR(invlist) <= SvLEN(invlist));
}
STATIC void
S_invlist_replace_list_destroys_src(pTHX_ SV * dest, SV * src)
{
/* Replaces the inversion list in 'dest' with the one from 'src'. It
* steals the list from 'src', so 'src' is made to have a NULL list. This
* is similar to what SvSetMagicSV() would do, if it were implemented on
* inversion lists, though this routine avoids a copy */
const UV src_len = _invlist_len(src);
const bool src_offset = *get_invlist_offset_addr(src);
const STRLEN src_byte_len = SvLEN(src);
char * array = SvPVX(src);
const int oldtainted = TAINT_get;
PERL_ARGS_ASSERT_INVLIST_REPLACE_LIST_DESTROYS_SRC;
assert(is_invlist(src));
assert(is_invlist(dest));
assert(! invlist_is_iterating(src));
assert(SvCUR(src) == 0 || SvCUR(src) < SvLEN(src));
/* Make sure it ends in the right place with a NUL, as our inversion list
* manipulations aren't careful to keep this true, but sv_usepvn_flags()
* asserts it */
array[src_byte_len - 1] = '\0';
TAINT_NOT; /* Otherwise it breaks */
sv_usepvn_flags(dest,
(char *) array,
src_byte_len - 1,
/* This flag is documented to cause a copy to be avoided */
SV_HAS_TRAILING_NUL);
TAINT_set(oldtainted);
SvPV_set(src, 0);
SvLEN_set(src, 0);
SvCUR_set(src, 0);
/* Finish up copying over the other fields in an inversion list */
*get_invlist_offset_addr(dest) = src_offset;
invlist_set_len(dest, src_len, src_offset);
*get_invlist_previous_index_addr(dest) = 0;
invlist_iterfinish(dest);
}
PERL_STATIC_INLINE IV*
S_get_invlist_previous_index_addr(SV* invlist)
{
/* Return the address of the IV that is reserved to hold the cached index
* */
PERL_ARGS_ASSERT_GET_INVLIST_PREVIOUS_INDEX_ADDR;
assert(is_invlist(invlist));
return &(((XINVLIST*) SvANY(invlist))->prev_index);
}
PERL_STATIC_INLINE IV
S_invlist_previous_index(SV* const invlist)
{
/* Returns cached index of previous search */
PERL_ARGS_ASSERT_INVLIST_PREVIOUS_INDEX;
return *get_invlist_previous_index_addr(invlist);
}
PERL_STATIC_INLINE void
S_invlist_set_previous_index(SV* const invlist, const IV index)
{
/* Caches <index> for later retrieval */
PERL_ARGS_ASSERT_INVLIST_SET_PREVIOUS_INDEX;
assert(index == 0 || index < (int) _invlist_len(invlist));
*get_invlist_previous_index_addr(invlist) = index;
}
PERL_STATIC_INLINE void
S_invlist_trim(SV* invlist)
{
/* Free the not currently-being-used space in an inversion list */
/* But don't free up the space needed for the 0 UV that is always at the
* beginning of the list, nor the trailing NUL */
const UV min_size = TO_INTERNAL_SIZE(1) + 1;
PERL_ARGS_ASSERT_INVLIST_TRIM;
assert(is_invlist(invlist));
SvPV_renew(invlist, MAX(min_size, SvCUR(invlist) + 1));
}
PERL_STATIC_INLINE void
S_invlist_clear(pTHX_ SV* invlist) /* Empty the inversion list */
{
PERL_ARGS_ASSERT_INVLIST_CLEAR;
assert(is_invlist(invlist));
invlist_set_len(invlist, 0, 0);
invlist_trim(invlist);
}
#endif /* ifndef PERL_IN_XSUB_RE */
PERL_STATIC_INLINE bool
S_invlist_is_iterating(SV* const invlist)
{
PERL_ARGS_ASSERT_INVLIST_IS_ITERATING;
return *(get_invlist_iter_addr(invlist)) < (STRLEN) UV_MAX;
}
#ifndef PERL_IN_XSUB_RE
PERL_STATIC_INLINE UV
S_invlist_max(SV* const invlist)
{
/* Returns the maximum number of elements storable in the inversion list's
* array, without having to realloc() */
PERL_ARGS_ASSERT_INVLIST_MAX;
assert(is_invlist(invlist));
/* Assumes worst case, in which the 0 element is not counted in the
* inversion list, so subtracts 1 for that */
return SvLEN(invlist) == 0 /* This happens under _new_invlist_C_array */
? FROM_INTERNAL_SIZE(SvCUR(invlist)) - 1
: FROM_INTERNAL_SIZE(SvLEN(invlist)) - 1;
}
STATIC void
S_initialize_invlist_guts(pTHX_ SV* invlist, const Size_t initial_size)
{
PERL_ARGS_ASSERT_INITIALIZE_INVLIST_GUTS;
/* First 1 is in case the zero element isn't in the list; second 1 is for
* trailing NUL */
SvGROW(invlist, TO_INTERNAL_SIZE(initial_size + 1) + 1);
invlist_set_len(invlist, 0, 0);
/* Force iterinit() to be used to get iteration to work */
invlist_iterfinish(invlist);
*get_invlist_previous_index_addr(invlist) = 0;
}
SV*
Perl__new_invlist(pTHX_ IV initial_size)
{
/* Return a pointer to a newly constructed inversion list, with enough
* space to store 'initial_size' elements. If that number is negative, a
* system default is used instead */
SV* new_list;
if (initial_size < 0) {
initial_size = 10;
}
new_list = newSV_type(SVt_INVLIST);
initialize_invlist_guts(new_list, initial_size);
return new_list;
}
SV*
Perl__new_invlist_C_array(pTHX_ const UV* const list)
{
/* Return a pointer to a newly constructed inversion list, initialized to
* point to <list>, which has to be in the exact correct inversion list
* form, including internal fields. Thus this is a dangerous routine that
* should not be used in the wrong hands. The passed in 'list' contains
* several header fields at the beginning that are not part of the
* inversion list body proper */
const STRLEN length = (STRLEN) list[0];
const UV version_id = list[1];
const bool offset = cBOOL(list[2]);
#define HEADER_LENGTH 3
/* If any of the above changes in any way, you must change HEADER_LENGTH
* (if appropriate) and regenerate INVLIST_VERSION_ID by running
* perl -E 'say int(rand 2**31-1)'
*/
#define INVLIST_VERSION_ID 148565664 /* This is a combination of a version and
data structure type, so that one being
passed in can be validated to be an
inversion list of the correct vintage.
*/
SV* invlist = newSV_type(SVt_INVLIST);
PERL_ARGS_ASSERT__NEW_INVLIST_C_ARRAY;
if (version_id != INVLIST_VERSION_ID) {
Perl_croak(aTHX_ "panic: Incorrect version for previously generated inversion list");
}
/* The generated array passed in includes header elements that aren't part
* of the list proper, so start it just after them */
SvPV_set(invlist, (char *) (list + HEADER_LENGTH));
SvLEN_set(invlist, 0); /* Means we own the contents, and the system
shouldn't touch it */
*(get_invlist_offset_addr(invlist)) = offset;
/* The 'length' passed to us is the physical number of elements in the
* inversion list. But if there is an offset the logical number is one
* less than that */
invlist_set_len(invlist, length - offset, offset);
invlist_set_previous_index(invlist, 0);
/* Initialize the iteration pointer. */
invlist_iterfinish(invlist);
SvREADONLY_on(invlist);
return invlist;
}
STATIC void
S_invlist_extend(pTHX_ SV* const invlist, const UV new_max)
{
/* Grow the maximum size of an inversion list */
PERL_ARGS_ASSERT_INVLIST_EXTEND;
assert(is_invlist(invlist));
/* Add one to account for the zero element at the beginning which may not
* be counted by the calling parameters */
SvGROW((SV *)invlist, TO_INTERNAL_SIZE(new_max + 1));
}
STATIC void
S__append_range_to_invlist(pTHX_ SV* const invlist,
const UV start, const UV end)
{
/* Subject to change or removal. Append the range from 'start' to 'end' at
* the end of the inversion list. The range must be above any existing
* ones. */
UV* array;
UV max = invlist_max(invlist);
UV len = _invlist_len(invlist);
bool offset;
PERL_ARGS_ASSERT__APPEND_RANGE_TO_INVLIST;
if (len == 0) { /* Empty lists must be initialized */
offset = start != 0;
array = _invlist_array_init(invlist, ! offset);
}
else {
/* Here, the existing list is non-empty. The current max entry in the
* list is generally the first value not in the set, except when the
* set extends to the end of permissible values, in which case it is
* the first entry in that final set, and so this call is an attempt to
* append out-of-order */
UV final_element = len - 1;
array = invlist_array(invlist);
if ( array[final_element] > start
|| ELEMENT_RANGE_MATCHES_INVLIST(final_element))
{
Perl_croak(aTHX_ "panic: attempting to append to an inversion list, but wasn't at the end of the list, final=%" UVuf ", start=%" UVuf ", match=%c",
array[final_element], start,
ELEMENT_RANGE_MATCHES_INVLIST(final_element) ? 't' : 'f');
}
/* Here, it is a legal append. If the new range begins 1 above the end
* of the range below it, it is extending the range below it, so the
* new first value not in the set is one greater than the newly
* extended range. */
offset = *get_invlist_offset_addr(invlist);
if (array[final_element] == start) {
if (end != UV_MAX) {
array[final_element] = end + 1;
}
else {
/* But if the end is the maximum representable on the machine,
* assume that infinity was actually what was meant. Just let
* the range that this would extend to have no end */
invlist_set_len(invlist, len - 1, offset);
}
return;
}
}
/* Here the new range doesn't extend any existing set. Add it */
len += 2; /* Includes an element each for the start and end of range */
/* If wll overflow the existing space, extend, which may cause the array to
* be moved */
if (max < len) {
invlist_extend(invlist, len);
/* Have to set len here to avoid assert failure in invlist_array() */
invlist_set_len(invlist, len, offset);
array = invlist_array(invlist);
}
else {
invlist_set_len(invlist, len, offset);
}
/* The next item on the list starts the range, the one after that is
* one past the new range. */
array[len - 2] = start;
if (end != UV_MAX) {
array[len - 1] = end + 1;
}
else {
/* But if the end is the maximum representable on the machine, just let
* the range have no end */
invlist_set_len(invlist, len - 1, offset);
}
}
SSize_t
Perl__invlist_search(SV* const invlist, const UV cp)
{
/* Searches the inversion list for the entry that contains the input code
* point <cp>. If <cp> is not in the list, -1 is returned. Otherwise, the
* return value is the index into the list's array of the range that
* contains <cp>, that is, 'i' such that
* array[i] <= cp < array[i+1]
*/
IV low = 0;
IV mid;
IV high = _invlist_len(invlist);
const IV highest_element = high - 1;
const UV* array;
PERL_ARGS_ASSERT__INVLIST_SEARCH;
/* If list is empty, return failure. */
if (high == 0) {
return -1;
}
/* (We can't get the array unless we know the list is non-empty) */
array = invlist_array(invlist);
mid = invlist_previous_index(invlist);
assert(mid >=0);
if (mid > highest_element) {
mid = highest_element;
}
/* <mid> contains the cache of the result of the previous call to this
* function (0 the first time). See if this call is for the same result,
* or if it is for mid-1. This is under the theory that calls to this
* function will often be for related code points that are near each other.
* And benchmarks show that caching gives better results. We also test
* here if the code point is within the bounds of the list. These tests
* replace others that would have had to be made anyway to make sure that
* the array bounds were not exceeded, and these give us extra information
* at the same time */
if (cp >= array[mid]) {
if (cp >= array[highest_element]) {
return highest_element;
}
/* Here, array[mid] <= cp < array[highest_element]. This means that
* the final element is not the answer, so can exclude it; it also
* means that <mid> is not the final element, so can refer to 'mid + 1'
* safely */
if (cp < array[mid + 1]) {
return mid;
}
high--;
low = mid + 1;
}
else { /* cp < aray[mid] */
if (cp < array[0]) { /* Fail if outside the array */
return -1;
}
high = mid;
if (cp >= array[mid - 1]) {
goto found_entry;
}
}
/* Binary search. What we are looking for is <i> such that
* array[i] <= cp < array[i+1]
* The loop below converges on the i+1. Note that there may not be an
* (i+1)th element in the array, and things work nonetheless */
while (low < high) {
mid = (low + high) / 2;
assert(mid <= highest_element);
if (array[mid] <= cp) { /* cp >= array[mid] */
low = mid + 1;
/* We could do this extra test to exit the loop early.
if (cp < array[low]) {
return mid;
}
*/
}
else { /* cp < array[mid] */
high = mid;
}
}
found_entry:
high--;
invlist_set_previous_index(invlist, high);
return high;
}
void
Perl__invlist_union_maybe_complement_2nd(pTHX_ SV* const a, SV* const b,
const bool complement_b, SV** output)
{
/* Take the union of two inversion lists and point '*output' to it. On
* input, '*output' MUST POINT TO NULL OR TO AN SV* INVERSION LIST (possibly
* even 'a' or 'b'). If to an inversion list, the contents of the original
* list will be replaced by the union. The first list, 'a', may be
* NULL, in which case a copy of the second list is placed in '*output'.
* If 'complement_b' is TRUE, the union is taken of the complement
* (inversion) of 'b' instead of b itself.
*
* The basis for this comes from "Unicode Demystified" Chapter 13 by
* Richard Gillam, published by Addison-Wesley, and explained at some
* length there. The preface says to incorporate its examples into your
* code at your own risk.
*
* The algorithm is like a merge sort. */
const UV* array_a; /* a's array */
const UV* array_b;
UV len_a; /* length of a's array */
UV len_b;
SV* u; /* the resulting union */
UV* array_u;
UV len_u = 0;
UV i_a = 0; /* current index into a's array */
UV i_b = 0;
UV i_u = 0;
/* running count, as explained in the algorithm source book; items are
* stopped accumulating and are output when the count changes to/from 0.
* The count is incremented when we start a range that's in an input's set,
* and decremented when we start a range that's not in a set. So this
* variable can be 0, 1, or 2. When it is 0 neither input is in their set,
* and hence nothing goes into the union; 1, just one of the inputs is in
* its set (and its current range gets added to the union); and 2 when both
* inputs are in their sets. */
UV count = 0;
PERL_ARGS_ASSERT__INVLIST_UNION_MAYBE_COMPLEMENT_2ND;
assert(a != b);
assert(*output == NULL || is_invlist(*output));
len_b = _invlist_len(b);
if (len_b == 0) {
/* Here, 'b' is empty, hence it's complement is all possible code
* points. So if the union includes the complement of 'b', it includes
* everything, and we need not even look at 'a'. It's easiest to
* create a new inversion list that matches everything. */
if (complement_b) {
SV* everything = _add_range_to_invlist(NULL, 0, UV_MAX);
if (*output == NULL) { /* If the output didn't exist, just point it
at the new list */
*output = everything;
}
else { /* Otherwise, replace its contents with the new list */
invlist_replace_list_destroys_src(*output, everything);
SvREFCNT_dec_NN(everything);
}
return;
}
/* Here, we don't want the complement of 'b', and since 'b' is empty,
* the union will come entirely from 'a'. If 'a' is NULL or empty, the
* output will be empty */
if (a == NULL || _invlist_len(a) == 0) {
if (*output == NULL) {
*output = _new_invlist(0);
}
else {
invlist_clear(*output);
}
return;
}
/* Here, 'a' is not empty, but 'b' is, so 'a' entirely determines the
* union. We can just return a copy of 'a' if '*output' doesn't point
* to an existing list */
if (*output == NULL) {
*output = invlist_clone(a, NULL);
return;
}
/* If the output is to overwrite 'a', we have a no-op, as it's
* already in 'a' */
if (*output == a) {
return;
}
/* Here, '*output' is to be overwritten by 'a' */
u = invlist_clone(a, NULL);
invlist_replace_list_destroys_src(*output, u);
SvREFCNT_dec_NN(u);
return;
}
/* Here 'b' is not empty. See about 'a' */
if (a == NULL || ((len_a = _invlist_len(a)) == 0)) {
/* Here, 'a' is empty (and b is not). That means the union will come
* entirely from 'b'. If '*output' is NULL, we can directly return a
* clone of 'b'. Otherwise, we replace the contents of '*output' with
* the clone */
SV ** dest = (*output == NULL) ? output : &u;
*dest = invlist_clone(b, NULL);
if (complement_b) {
_invlist_invert(*dest);
}
if (dest == &u) {
invlist_replace_list_destroys_src(*output, u);
SvREFCNT_dec_NN(u);
}
return;
}
/* Here both lists exist and are non-empty */
array_a = invlist_array(a);
array_b = invlist_array(b);
/* If are to take the union of 'a' with the complement of b, set it
* up so are looking at b's complement. */
if (complement_b) {
/* To complement, we invert: if the first element is 0, remove it. To
* do this, we just pretend the array starts one later */
if (array_b[0] == 0) {
array_b++;
len_b--;
}
else {
/* But if the first element is not zero, we pretend the list starts
* at the 0 that is always stored immediately before the array. */
array_b--;
len_b++;
}
}
/* Size the union for the worst case: that the sets are completely
* disjoint */
u = _new_invlist(len_a + len_b);
/* Will contain U+0000 if either component does */
array_u = _invlist_array_init(u, ( len_a > 0 && array_a[0] == 0)
|| (len_b > 0 && array_b[0] == 0));
/* Go through each input list item by item, stopping when have exhausted
* one of them */
while (i_a < len_a && i_b < len_b) {
UV cp; /* The element to potentially add to the union's array */
bool cp_in_set; /* is it in the the input list's set or not */
/* We need to take one or the other of the two inputs for the union.
* Since we are merging two sorted lists, we take the smaller of the
* next items. In case of a tie, we take first the one that is in its
* set. If we first took the one not in its set, it would decrement
* the count, possibly to 0 which would cause it to be output as ending
* the range, and the next time through we would take the same number,
* and output it again as beginning the next range. By doing it the
* opposite way, there is no possibility that the count will be
* momentarily decremented to 0, and thus the two adjoining ranges will
* be seamlessly merged. (In a tie and both are in the set or both not
* in the set, it doesn't matter which we take first.) */
if ( array_a[i_a] < array_b[i_b]
|| ( array_a[i_a] == array_b[i_b]
&& ELEMENT_RANGE_MATCHES_INVLIST(i_a)))
{
cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_a);
cp = array_a[i_a++];
}
else {
cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_b);
cp = array_b[i_b++];
}
/* Here, have chosen which of the two inputs to look at. Only output
* if the running count changes to/from 0, which marks the
* beginning/end of a range that's in the set */
if (cp_in_set) {
if (count == 0) {
array_u[i_u++] = cp;
}
count++;
}
else {
count--;
if (count == 0) {
array_u[i_u++] = cp;
}
}
}
/* The loop above increments the index into exactly one of the input lists
* each iteration, and ends when either index gets to its list end. That
* means the other index is lower than its end, and so something is
* remaining in that one. We decrement 'count', as explained below, if
* that list is in its set. (i_a and i_b each currently index the element
* beyond the one we care about.) */
if ( (i_a != len_a && PREV_RANGE_MATCHES_INVLIST(i_a))
|| (i_b != len_b && PREV_RANGE_MATCHES_INVLIST(i_b)))
{
count--;
}
/* Above we decremented 'count' if the list that had unexamined elements in
* it was in its set. This has made it so that 'count' being non-zero
* means there isn't anything left to output; and 'count' equal to 0 means
* that what is left to output is precisely that which is left in the
* non-exhausted input list.
*
* To see why, note first that the exhausted input obviously has nothing
* left to add to the union. If it was in its set at its end, that means
* the set extends from here to the platform's infinity, and hence so does
* the union and the non-exhausted set is irrelevant. The exhausted set
* also contributed 1 to 'count'. If 'count' was 2, it got decremented to
* 1, but if it was 1, the non-exhausted set wasn't in its set, and so
* 'count' remains at 1. This is consistent with the decremented 'count'
* != 0 meaning there's nothing left to add to the union.
*
* But if the exhausted input wasn't in its set, it contributed 0 to
* 'count', and the rest of the union will be whatever the other input is.
* If 'count' was 0, neither list was in its set, and 'count' remains 0;
* otherwise it gets decremented to 0. This is consistent with 'count'
* == 0 meaning the remainder of the union is whatever is left in the
* non-exhausted list. */
if (count != 0) {
len_u = i_u;
}
else {
IV copy_count = len_a - i_a;
if (copy_count > 0) { /* The non-exhausted input is 'a' */
Copy(array_a + i_a, array_u + i_u, copy_count, UV);
}
else { /* The non-exhausted input is b */
copy_count = len_b - i_b;
Copy(array_b + i_b, array_u + i_u, copy_count, UV);
}
len_u = i_u + copy_count;
}
/* Set the result to the final length, which can change the pointer to
* array_u, so re-find it. (Note that it is unlikely that this will
* change, as we are shrinking the space, not enlarging it) */
if (len_u != _invlist_len(u)) {
invlist_set_len(u, len_u, *get_invlist_offset_addr(u));
invlist_trim(u);
array_u = invlist_array(u);
}
if (*output == NULL) { /* Simply return the new inversion list */
*output = u;
}
else {
/* Otherwise, overwrite the inversion list that was in '*output'. We
* could instead free '*output', and then set it to 'u', but experience
* has shown [perl #127392] that if the input is a mortal, we can get a
* huge build-up of these during regex compilation before they get
* freed. */
invlist_replace_list_destroys_src(*output, u);
SvREFCNT_dec_NN(u);
}
return;
}
void
Perl__invlist_intersection_maybe_complement_2nd(pTHX_ SV* const a, SV* const b,
const bool complement_b, SV** i)
{
/* Take the intersection of two inversion lists and point '*i' to it. On
* input, '*i' MUST POINT TO NULL OR TO AN SV* INVERSION LIST (possibly
* even 'a' or 'b'). If to an inversion list, the contents of the original
* list will be replaced by the intersection. The first list, 'a', may be
* NULL, in which case '*i' will be an empty list. If 'complement_b' is
* TRUE, the result will be the intersection of 'a' and the complement (or
* inversion) of 'b' instead of 'b' directly.
*
* The basis for this comes from "Unicode Demystified" Chapter 13 by
* Richard Gillam, published by Addison-Wesley, and explained at some
* length there. The preface says to incorporate its examples into your
* code at your own risk. In fact, it had bugs
*
* The algorithm is like a merge sort, and is essentially the same as the
* union above
*/
const UV* array_a; /* a's array */
const UV* array_b;
UV len_a; /* length of a's array */
UV len_b;
SV* r; /* the resulting intersection */
UV* array_r;
UV len_r = 0;
UV i_a = 0; /* current index into a's array */
UV i_b = 0;
UV i_r = 0;
/* running count of how many of the two inputs are postitioned at ranges
* that are in their sets. As explained in the algorithm source book,
* items are stopped accumulating and are output when the count changes
* to/from 2. The count is incremented when we start a range that's in an
* input's set, and decremented when we start a range that's not in a set.
* Only when it is 2 are we in the intersection. */
UV count = 0;
PERL_ARGS_ASSERT__INVLIST_INTERSECTION_MAYBE_COMPLEMENT_2ND;
assert(a != b);
assert(*i == NULL || is_invlist(*i));
/* Special case if either one is empty */
len_a = (a == NULL) ? 0 : _invlist_len(a);
if ((len_a == 0) || ((len_b = _invlist_len(b)) == 0)) {
if (len_a != 0 && complement_b) {
/* Here, 'a' is not empty, therefore from the enclosing 'if', 'b'
* must be empty. Here, also we are using 'b's complement, which
* hence must be every possible code point. Thus the intersection
* is simply 'a'. */
if (*i == a) { /* No-op */
return;
}
if (*i == NULL) {
*i = invlist_clone(a, NULL);
return;
}
r = invlist_clone(a, NULL);
invlist_replace_list_destroys_src(*i, r);
SvREFCNT_dec_NN(r);
return;
}
/* Here, 'a' or 'b' is empty and not using the complement of 'b'. The
* intersection must be empty */
if (*i == NULL) {
*i = _new_invlist(0);
return;
}
invlist_clear(*i);
return;
}
/* Here both lists exist and are non-empty */
array_a = invlist_array(a);
array_b = invlist_array(b);
/* If are to take the intersection of 'a' with the complement of b, set it
* up so are looking at b's complement. */
if (complement_b) {
/* To complement, we invert: if the first element is 0, remove it. To
* do this, we just pretend the array starts one later */
if (array_b[0] == 0) {
array_b++;
len_b--;
}
else {
/* But if the first element is not zero, we pretend the list starts
* at the 0 that is always stored immediately before the array. */
array_b--;
len_b++;
}
}
/* Size the intersection for the worst case: that the intersection ends up
* fragmenting everything to be completely disjoint */
r= _new_invlist(len_a + len_b);
/* Will contain U+0000 iff both components do */
array_r = _invlist_array_init(r, len_a > 0 && array_a[0] == 0
&& len_b > 0 && array_b[0] == 0);
/* Go through each list item by item, stopping when have exhausted one of
* them */
while (i_a < len_a && i_b < len_b) {
UV cp; /* The element to potentially add to the intersection's
array */
bool cp_in_set; /* Is it in the input list's set or not */
/* We need to take one or the other of the two inputs for the
* intersection. Since we are merging two sorted lists, we take the
* smaller of the next items. In case of a tie, we take first the one
* that is not in its set (a difference from the union algorithm). If
* we first took the one in its set, it would increment the count,
* possibly to 2 which would cause it to be output as starting a range
* in the intersection, and the next time through we would take that
* same number, and output it again as ending the set. By doing the
* opposite of this, there is no possibility that the count will be
* momentarily incremented to 2. (In a tie and both are in the set or
* both not in the set, it doesn't matter which we take first.) */
if ( array_a[i_a] < array_b[i_b]
|| ( array_a[i_a] == array_b[i_b]
&& ! ELEMENT_RANGE_MATCHES_INVLIST(i_a)))
{
cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_a);
cp = array_a[i_a++];
}
else {
cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_b);
cp= array_b[i_b++];
}
/* Here, have chosen which of the two inputs to look at. Only output
* if the running count changes to/from 2, which marks the
* beginning/end of a range that's in the intersection */
if (cp_in_set) {
count++;
if (count == 2) {
array_r[i_r++] = cp;
}
}
else {
if (count == 2) {
array_r[i_r++] = cp;
}
count--;
}
}
/* The loop above increments the index into exactly one of the input lists
* each iteration, and ends when either index gets to its list end. That
* means the other index is lower than its end, and so something is
* remaining in that one. We increment 'count', as explained below, if the
* exhausted list was in its set. (i_a and i_b each currently index the
* element beyond the one we care about.) */
if ( (i_a == len_a && PREV_RANGE_MATCHES_INVLIST(i_a))
|| (i_b == len_b && PREV_RANGE_MATCHES_INVLIST(i_b)))
{
count++;
}
/* Above we incremented 'count' if the exhausted list was in its set. This
* has made it so that 'count' being below 2 means there is nothing left to
* output; otheriwse what's left to add to the intersection is precisely
* that which is left in the non-exhausted input list.
*
* To see why, note first that the exhausted input obviously has nothing
* left to affect the intersection. If it was in its set at its end, that
* means the set extends from here to the platform's infinity, and hence
* anything in the non-exhausted's list will be in the intersection, and
* anything not in it won't be. Hence, the rest of the intersection is
* precisely what's in the non-exhausted list The exhausted set also
* contributed 1 to 'count', meaning 'count' was at least 1. Incrementing
* it means 'count' is now at least 2. This is consistent with the
* incremented 'count' being >= 2 means to add the non-exhausted list to
* the intersection.
*
* But if the exhausted input wasn't in its set, it contributed 0 to
* 'count', and the intersection can't include anything further; the
* non-exhausted set is irrelevant. 'count' was at most 1, and doesn't get
* incremented. This is consistent with 'count' being < 2 meaning nothing
* further to add to the intersection. */
if (count < 2) { /* Nothing left to put in the intersection. */
len_r = i_r;
}
else { /* copy the non-exhausted list, unchanged. */
IV copy_count = len_a - i_a;
if (copy_count > 0) { /* a is the one with stuff left */
Copy(array_a + i_a, array_r + i_r, copy_count, UV);
}
else { /* b is the one with stuff left */
copy_count = len_b - i_b;
Copy(array_b + i_b, array_r + i_r, copy_count, UV);
}
len_r = i_r + copy_count;
}
/* Set the result to the final length, which can change the pointer to
* array_r, so re-find it. (Note that it is unlikely that this will
* change, as we are shrinking the space, not enlarging it) */
if (len_r != _invlist_len(r)) {
invlist_set_len(r, len_r, *get_invlist_offset_addr(r));
invlist_trim(r);
array_r = invlist_array(r);
}
if (*i == NULL) { /* Simply return the calculated intersection */
*i = r;
}
else { /* Otherwise, replace the existing inversion list in '*i'. We could
instead free '*i', and then set it to 'r', but experience has
shown [perl #127392] that if the input is a mortal, we can get a
huge build-up of these during regex compilation before they get
freed. */
if (len_r) {
invlist_replace_list_destroys_src(*i, r);
}
else {
invlist_clear(*i);
}
SvREFCNT_dec_NN(r);
}
return;
}
SV*
Perl__add_range_to_invlist(pTHX_ SV* invlist, UV start, UV end)
{
/* Add the range from 'start' to 'end' inclusive to the inversion list's
* set. A pointer to the inversion list is returned. This may actually be
* a new list, in which case the passed in one has been destroyed. The
* passed-in inversion list can be NULL, in which case a new one is created
* with just the one range in it. The new list is not necessarily
* NUL-terminated. Space is not freed if the inversion list shrinks as a
* result of this function. The gain would not be large, and in many
* cases, this is called multiple times on a single inversion list, so
* anything freed may almost immediately be needed again.
*
* This used to mostly call the 'union' routine, but that is much more
* heavyweight than really needed for a single range addition */
UV* array; /* The array implementing the inversion list */
UV len; /* How many elements in 'array' */
SSize_t i_s; /* index into the invlist array where 'start'
should go */
SSize_t i_e = 0; /* And the index where 'end' should go */
UV cur_highest; /* The highest code point in the inversion list
upon entry to this function */
/* This range becomes the whole inversion list if none already existed */
if (invlist == NULL) {
invlist = _new_invlist(2);
_append_range_to_invlist(invlist, start, end);
return invlist;
}
/* Likewise, if the inversion list is currently empty */
len = _invlist_len(invlist);
if (len == 0) {
_append_range_to_invlist(invlist, start, end);
return invlist;
}
/* Starting here, we have to know the internals of the list */
array = invlist_array(invlist);
/* If the new range ends higher than the current highest ... */
cur_highest = invlist_highest(invlist);
if (end > cur_highest) {
/* If the whole range is higher, we can just append it */
if (start > cur_highest) {
_append_range_to_invlist(invlist, start, end);
return invlist;
}
/* Otherwise, add the portion that is higher ... */
_append_range_to_invlist(invlist, cur_highest + 1, end);
/* ... and continue on below to handle the rest. As a result of the
* above append, we know that the index of the end of the range is the
* final even numbered one of the array. Recall that the final element
* always starts a range that extends to infinity. If that range is in
* the set (meaning the set goes from here to infinity), it will be an
* even index, but if it isn't in the set, it's odd, and the final
* range in the set is one less, which is even. */
if (end == UV_MAX) {
i_e = len;
}
else {
i_e = len - 2;
}
}
/* We have dealt with appending, now see about prepending. If the new
* range starts lower than the current lowest ... */
if (start < array[0]) {
/* Adding something which has 0 in it is somewhat tricky, and uncommon.
* Let the union code handle it, rather than having to know the
* trickiness in two code places. */
if (UNLIKELY(start == 0)) {
SV* range_invlist;
range_invlist = _new_invlist(2);
_append_range_to_invlist(range_invlist, start, end);
_invlist_union(invlist, range_invlist, &invlist);
SvREFCNT_dec_NN(range_invlist);
return invlist;
}
/* If the whole new range comes before the first entry, and doesn't
* extend it, we have to insert it as an additional range */
if (end < array[0] - 1) {
i_s = i_e = -1;
goto splice_in_new_range;
}
/* Here the new range adjoins the existing first range, extending it
* downwards. */
array[0] = start;
/* And continue on below to handle the rest. We know that the index of
* the beginning of the range is the first one of the array */
i_s = 0;
}
else { /* Not prepending any part of the new range to the existing list.
* Find where in the list it should go. This finds i_s, such that:
* invlist[i_s] <= start < array[i_s+1]
*/
i_s = _invlist_search(invlist, start);
}
/* At this point, any extending before the beginning of the inversion list
* and/or after the end has been done. This has made it so that, in the
* code below, each endpoint of the new range is either in a range that is
* in the set, or is in a gap between two ranges that are. This means we
* don't have to worry about exceeding the array bounds.
*
* Find where in the list the new range ends (but we can skip this if we
* have already determined what it is, or if it will be the same as i_s,
* which we already have computed) */
if (i_e == 0) {
i_e = (start == end)
? i_s
: _invlist_search(invlist, end);
}
/* Here generally invlist[i_e] <= end < array[i_e+1]. But if invlist[i_e]
* is a range that goes to infinity there is no element at invlist[i_e+1],
* so only the first relation holds. */
if ( ! ELEMENT_RANGE_MATCHES_INVLIST(i_s)) {
/* Here, the ranges on either side of the beginning of the new range
* are in the set, and this range starts in the gap between them.
*
* The new range extends the range above it downwards if the new range
* ends at or above that range's start */
const bool extends_the_range_above = ( end == UV_MAX
|| end + 1 >= array[i_s+1]);
/* The new range extends the range below it upwards if it begins just
* after where that range ends */
if (start == array[i_s]) {
/* If the new range fills the entire gap between the other ranges,
* they will get merged together. Other ranges may also get
* merged, depending on how many of them the new range spans. In
* the general case, we do the merge later, just once, after we
* figure out how many to merge. But in the case where the new
* range exactly spans just this one gap (possibly extending into
* the one above), we do the merge here, and an early exit. This
* is done here to avoid having to special case later. */
if (i_e - i_s <= 1) {
/* If i_e - i_s == 1, it means that the new range terminates
* within the range above, and hence 'extends_the_range_above'
* must be true. (If the range above it extends to infinity,
* 'i_s+2' will be above the array's limit, but 'len-i_s-2'
* will be 0, so no harm done.) */
if (extends_the_range_above) {
Move(array + i_s + 2, array + i_s, len - i_s - 2, UV);
invlist_set_len(invlist,
len - 2,
*(get_invlist_offset_addr(invlist)));
return invlist;
}
/* Here, i_e must == i_s. We keep them in sync, as they apply
* to the same range, and below we are about to decrement i_s
* */
i_e--;
}
/* Here, the new range is adjacent to the one below. (It may also
* span beyond the range above, but that will get resolved later.)
* Extend the range below to include this one. */
array[i_s] = (end == UV_MAX) ? UV_MAX : end + 1;
i_s--;
start = array[i_s];
}
else if (extends_the_range_above) {
/* Here the new range only extends the range above it, but not the
* one below. It merges with the one above. Again, we keep i_e
* and i_s in sync if they point to the same range */
if (i_e == i_s) {
i_e++;
}
i_s++;
array[i_s] = start;
}
}
/* Here, we've dealt with the new range start extending any adjoining
* existing ranges.
*
* If the new range extends to infinity, it is now the final one,
* regardless of what was there before */
if (UNLIKELY(end == UV_MAX)) {
invlist_set_len(invlist, i_s + 1, *(get_invlist_offset_addr(invlist)));
return invlist;
}
/* If i_e started as == i_s, it has also been dealt with,
* and been updated to the new i_s, which will fail the following if */
if (! ELEMENT_RANGE_MATCHES_INVLIST(i_e)) {
/* Here, the ranges on either side of the end of the new range are in
* the set, and this range ends in the gap between them.
*
* If this range is adjacent to (hence extends) the range above it, it
* becomes part of that range; likewise if it extends the range below,
* it becomes part of that range */
if (end + 1 == array[i_e+1]) {
i_e++;
array[i_e] = start;
}
else if (start <= array[i_e]) {
array[i_e] = end + 1;
i_e--;
}
}
if (i_s == i_e) {
/* If the range fits entirely in an existing range (as possibly already
* extended above), it doesn't add anything new */
if (ELEMENT_RANGE_MATCHES_INVLIST(i_s)) {
return invlist;
}
/* Here, no part of the range is in the list. Must add it. It will
* occupy 2 more slots */
splice_in_new_range:
invlist_extend(invlist, len + 2);
array = invlist_array(invlist);
/* Move the rest of the array down two slots. Don't include any
* trailing NUL */
Move(array + i_e + 1, array + i_e + 3, len - i_e - 1, UV);
/* Do the actual splice */
array[i_e+1] = start;
array[i_e+2] = end + 1;
invlist_set_len(invlist, len + 2, *(get_invlist_offset_addr(invlist)));
return invlist;
}
/* Here the new range crossed the boundaries of a pre-existing range. The
* code above has adjusted things so that both ends are in ranges that are
* in the set. This means everything in between must also be in the set.
* Just squash things together */
Move(array + i_e + 1, array + i_s + 1, len - i_e - 1, UV);
invlist_set_len(invlist,
len - i_e + i_s,
*(get_invlist_offset_addr(invlist)));
return invlist;
}
SV*
Perl__setup_canned_invlist(pTHX_ const STRLEN size, const UV element0,
UV** other_elements_ptr)
{
/* Create and return an inversion list whose contents are to be populated
* by the caller. The caller gives the number of elements (in 'size') and
* the very first element ('element0'). This function will set
* '*other_elements_ptr' to an array of UVs, where the remaining elements
* are to be placed.
*
* Obviously there is some trust involved that the caller will properly
* fill in the other elements of the array.
*
* (The first element needs to be passed in, as the underlying code does
* things differently depending on whether it is zero or non-zero) */
SV* invlist = _new_invlist(size);
bool offset;
PERL_ARGS_ASSERT__SETUP_CANNED_INVLIST;
invlist = add_cp_to_invlist(invlist, element0);
offset = *get_invlist_offset_addr(invlist);
invlist_set_len(invlist, size, offset);
*other_elements_ptr = invlist_array(invlist) + 1;
return invlist;
}
#endif
PERL_STATIC_INLINE SV*
S_add_cp_to_invlist(pTHX_ SV* invlist, const UV cp) {
return _add_range_to_invlist(invlist, cp, cp);
}
#ifndef PERL_IN_XSUB_RE
void
Perl__invlist_invert(pTHX_ SV* const invlist)
{
/* Complement the input inversion list. This adds a 0 if the list didn't
* have a zero; removes it otherwise. As described above, the data
* structure is set up so that this is very efficient */
PERL_ARGS_ASSERT__INVLIST_INVERT;
assert(! invlist_is_iterating(invlist));
/* The inverse of matching nothing is matching everything */
if (_invlist_len(invlist) == 0) {
_append_range_to_invlist(invlist, 0, UV_MAX);
return;
}
*get_invlist_offset_addr(invlist) = ! *get_invlist_offset_addr(invlist);
}
SV*
Perl_invlist_clone(pTHX_ SV* const invlist, SV* new_invlist)
{
/* Return a new inversion list that is a copy of the input one, which is
* unchanged. The new list will not be mortal even if the old one was. */
const STRLEN nominal_length = _invlist_len(invlist);
const STRLEN physical_length = SvCUR(invlist);
const bool offset = *(get_invlist_offset_addr(invlist));
PERL_ARGS_ASSERT_INVLIST_CLONE;
if (new_invlist == NULL) {
new_invlist = _new_invlist(nominal_length);
}
else {
sv_upgrade(new_invlist, SVt_INVLIST);
initialize_invlist_guts(new_invlist, nominal_length);
}
*(get_invlist_offset_addr(new_invlist)) = offset;
invlist_set_len(new_invlist, nominal_length, offset);
Copy(SvPVX(invlist), SvPVX(new_invlist), physical_length, char);
return new_invlist;
}
#endif
PERL_STATIC_INLINE STRLEN*
S_get_invlist_iter_addr(SV* invlist)
{
/* Return the address of the UV that contains the current iteration
* position */
PERL_ARGS_ASSERT_GET_INVLIST_ITER_ADDR;
assert(is_invlist(invlist));
return &(((XINVLIST*) SvANY(invlist))->iterator);
}
PERL_STATIC_INLINE void
S_invlist_iterinit(SV* invlist) /* Initialize iterator for invlist */
{
PERL_ARGS_ASSERT_INVLIST_ITERINIT;
*get_invlist_iter_addr(invlist) = 0;
}
PERL_STATIC_INLINE void
S_invlist_iterfinish(SV* invlist)
{
/* Terminate iterator for invlist. This is to catch development errors.
* Any iteration that is interrupted before completed should call this
* function. Functions that add code points anywhere else but to the end
* of an inversion list assert that they are not in the middle of an
* iteration. If they were, the addition would make the iteration
* problematical: if the iteration hadn't reached the place where things
* were being added, it would be ok */
PERL_ARGS_ASSERT_INVLIST_ITERFINISH;
*get_invlist_iter_addr(invlist) = (STRLEN) UV_MAX;
}
STATIC bool
S_invlist_iternext(SV* invlist, UV* start, UV* end)
{
/* An C<invlist_iterinit> call on <invlist> must be used to set this up.
* This call sets in <*start> and <*end>, the next range in <invlist>.
* Returns <TRUE> if successful and the next call will return the next
* range; <FALSE> if was already at the end of the list. If the latter,
* <*start> and <*end> are unchanged, and the next call to this function
* will start over at the beginning of the list */
STRLEN* pos = get_invlist_iter_addr(invlist);
UV len = _invlist_len(invlist);
UV *array;
PERL_ARGS_ASSERT_INVLIST_ITERNEXT;
if (*pos >= len) {
*pos = (STRLEN) UV_MAX; /* Force iterinit() to be required next time */
return FALSE;
}
array = invlist_array(invlist);
*start = array[(*pos)++];
if (*pos >= len) {
*end = UV_MAX;
}
else {
*end = array[(*pos)++] - 1;
}
return TRUE;
}
PERL_STATIC_INLINE UV
S_invlist_highest(SV* const invlist)
{
/* Returns the highest code point that matches an inversion list. This API
* has an ambiguity, as it returns 0 under either the highest is actually
* 0, or if the list is empty. If this distinction matters to you, check
* for emptiness before calling this function */
UV len = _invlist_len(invlist);
UV *array;
PERL_ARGS_ASSERT_INVLIST_HIGHEST;
if (len == 0) {
return 0;
}
array = invlist_array(invlist);
/* The last element in the array in the inversion list always starts a
* range that goes to infinity. That range may be for code points that are
* matched in the inversion list, or it may be for ones that aren't
* matched. In the latter case, the highest code point in the set is one
* less than the beginning of this range; otherwise it is the final element
* of this range: infinity */
return (ELEMENT_RANGE_MATCHES_INVLIST(len - 1))
? UV_MAX
: array[len - 1] - 1;
}
STATIC SV *
S_invlist_contents(pTHX_ SV* const invlist, const bool traditional_style)
{
/* Get the contents of an inversion list into a string SV so that they can
* be printed out. If 'traditional_style' is TRUE, it uses the format
* traditionally done for debug tracing; otherwise it uses a format
* suitable for just copying to the output, with blanks between ranges and
* a dash between range components */
UV start, end;
SV* output;
const char intra_range_delimiter = (traditional_style ? '\t' : '-');
const char inter_range_delimiter = (traditional_style ? '\n' : ' ');
if (traditional_style) {
output = newSVpvs("\n");
}
else {
output = newSVpvs("");
}
PERL_ARGS_ASSERT_INVLIST_CONTENTS;
assert(! invlist_is_iterating(invlist));
invlist_iterinit(invlist);
while (invlist_iternext(invlist, &start, &end)) {
if (end == UV_MAX) {
Perl_sv_catpvf(aTHX_ output, "%04" UVXf "%cINFTY%c",
start, intra_range_delimiter,
inter_range_delimiter);
}
else if (end != start) {
Perl_sv_catpvf(aTHX_ output, "%04" UVXf "%c%04" UVXf "%c",
start,
intra_range_delimiter,
end, inter_range_delimiter);
}
else {
Perl_sv_catpvf(aTHX_ output, "%04" UVXf "%c",
start, inter_range_delimiter);
}
}
if (SvCUR(output) && ! traditional_style) {/* Get rid of trailing blank */
SvCUR_set(output, SvCUR(output) - 1);
}
return output;
}
#ifndef PERL_IN_XSUB_RE
void
Perl__invlist_dump(pTHX_ PerlIO *file, I32 level,
const char * const indent, SV* const invlist)
{
/* Designed to be called only by do_sv_dump(). Dumps out the ranges of the
* inversion list 'invlist' to 'file' at 'level' Each line is prefixed by
* the string 'indent'. The output looks like this:
[0] 0x000A .. 0x000D
[2] 0x0085
[4] 0x2028 .. 0x2029
[6] 0x3104 .. INFTY
* This means that the first range of code points matched by the list are
* 0xA through 0xD; the second range contains only the single code point
* 0x85, etc. An inversion list is an array of UVs. Two array elements
* are used to define each range (except if the final range extends to
* infinity, only a single element is needed). The array index of the
* first element for the corresponding range is given in brackets. */
UV start, end;
STRLEN count = 0;
PERL_ARGS_ASSERT__INVLIST_DUMP;
if (invlist_is_iterating(invlist)) {
Perl_dump_indent(aTHX_ level, file,
"%sCan't dump inversion list because is in middle of iterating\n",
indent);
return;
}
invlist_iterinit(invlist);
while (invlist_iternext(invlist, &start, &end)) {
if (end == UV_MAX) {
Perl_dump_indent(aTHX_ level, file,
"%s[%" UVuf "] 0x%04" UVXf " .. INFTY\n",
indent, (UV)count, start);
}
else if (end != start) {
Perl_dump_indent(aTHX_ level, file,
"%s[%" UVuf "] 0x%04" UVXf " .. 0x%04" UVXf "\n",
indent, (UV)count, start, end);
}
else {
Perl_dump_indent(aTHX_ level, file, "%s[%" UVuf "] 0x%04" UVXf "\n",
indent, (UV)count, start);
}
count += 2;
}
}
#endif
#if defined(PERL_ARGS_ASSERT__INVLISTEQ) && !defined(PERL_IN_XSUB_RE)
bool
Perl__invlistEQ(pTHX_ SV* const a, SV* const b, const bool complement_b)
{
/* Return a boolean as to if the two passed in inversion lists are
* identical. The final argument, if TRUE, says to take the complement of
* the second inversion list before doing the comparison */
const UV len_a = _invlist_len(a);
UV len_b = _invlist_len(b);
const UV* array_a = NULL;
const UV* array_b = NULL;
PERL_ARGS_ASSERT__INVLISTEQ;
/* This code avoids accessing the arrays unless it knows the length is
* non-zero */
if (len_a == 0) {
if (len_b == 0) {
return ! complement_b;
}
}
else {
array_a = invlist_array(a);
}
if (len_b != 0) {
array_b = invlist_array(b);
}
/* If are to compare 'a' with the complement of b, set it
* up so are looking at b's complement. */
if (complement_b) {
/* The complement of nothing is everything, so <a> would have to have
* just one element, starting at zero (ending at infinity) */
if (len_b == 0) {
return (len_a == 1 && array_a[0] == 0);
}
if (array_b[0] == 0) {
/* Otherwise, to complement, we invert. Here, the first element is
* 0, just remove it. To do this, we just pretend the array starts
* one later */
array_b++;
len_b--;
}
else {
/* But if the first element is not zero, we pretend the list starts
* at the 0 that is always stored immediately before the array. */
array_b--;
len_b++;
}
}
return len_a == len_b
&& memEQ(array_a, array_b, len_a * sizeof(array_a[0]));
}
#endif
/*
* As best we can, determine the characters that can match the start of
* the given EXACTF-ish node. This is for use in creating ssc nodes, so there
* can be false positive matches
*
* Returns the invlist as a new SV*; it is the caller's responsibility to
* call SvREFCNT_dec() when done with it.
*/
STATIC SV*
S__make_exactf_invlist(pTHX_ RExC_state_t *pRExC_state, regnode *node)
{
dVAR;
const U8 * s = (U8*)STRING(node);
SSize_t bytelen = STR_LEN(node);
UV uc;
/* Start out big enough for 2 separate code points */
SV* invlist = _new_invlist(4);
PERL_ARGS_ASSERT__MAKE_EXACTF_INVLIST;
if (! UTF) {
uc = *s;
/* We punt and assume can match anything if the node begins
* with a multi-character fold. Things are complicated. For
* example, /ffi/i could match any of:
* "\N{LATIN SMALL LIGATURE FFI}"
* "\N{LATIN SMALL LIGATURE FF}I"
* "F\N{LATIN SMALL LIGATURE FI}"
* plus several other things; and making sure we have all the
* possibilities is hard. */
if (is_MULTI_CHAR_FOLD_latin1_safe(s, s + bytelen)) {
invlist = _add_range_to_invlist(invlist, 0, UV_MAX);
}
else {
/* Any Latin1 range character can potentially match any
* other depending on the locale, and in Turkic locales, U+130 and
* U+131 */
if (OP(node) == EXACTFL) {
_invlist_union(invlist, PL_Latin1, &invlist);
invlist = add_cp_to_invlist(invlist,
LATIN_SMALL_LETTER_DOTLESS_I);
invlist = add_cp_to_invlist(invlist,
LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE);
}
else {
/* But otherwise, it matches at least itself. We can
* quickly tell if it has a distinct fold, and if so,
* it matches that as well */
invlist = add_cp_to_invlist(invlist, uc);
if (IS_IN_SOME_FOLD_L1(uc))
invlist = add_cp_to_invlist(invlist, PL_fold_latin1[uc]);
}
/* Some characters match above-Latin1 ones under /i. This
* is true of EXACTFL ones when the locale is UTF-8 */
if (HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(uc)
&& (! isASCII(uc) || (OP(node) != EXACTFAA
&& OP(node) != EXACTFAA_NO_TRIE)))
{
add_above_Latin1_folds(pRExC_state, (U8) uc, &invlist);
}
}
}
else { /* Pattern is UTF-8 */
U8 folded[UTF8_MAX_FOLD_CHAR_EXPAND * UTF8_MAXBYTES_CASE + 1] = { '\0' };
const U8* e = s + bytelen;
IV fc;
fc = uc = utf8_to_uvchr_buf(s, s + bytelen, NULL);
/* The only code points that aren't folded in a UTF EXACTFish
* node are are the problematic ones in EXACTFL nodes */
if (OP(node) == EXACTFL && is_PROBLEMATIC_LOCALE_FOLDEDS_START_cp(uc)) {
/* We need to check for the possibility that this EXACTFL
* node begins with a multi-char fold. Therefore we fold
* the first few characters of it so that we can make that
* check */
U8 *d = folded;
int i;
fc = -1;
for (i = 0; i < UTF8_MAX_FOLD_CHAR_EXPAND && s < e; i++) {
if (isASCII(*s)) {
*(d++) = (U8) toFOLD(*s);
if (fc < 0) { /* Save the first fold */
fc = *(d-1);
}
s++;
}
else {
STRLEN len;
UV fold = toFOLD_utf8_safe(s, e, d, &len);
if (fc < 0) { /* Save the first fold */
fc = fold;
}
d += len;
s += UTF8SKIP(s);
}
}
/* And set up so the code below that looks in this folded
* buffer instead of the node's string */
e = d;
s = folded;
}
/* When we reach here 's' points to the fold of the first
* character(s) of the node; and 'e' points to far enough along
* the folded string to be just past any possible multi-char
* fold.
*
* Unlike the non-UTF-8 case, the macro for determining if a
* string is a multi-char fold requires all the characters to
* already be folded. This is because of all the complications
* if not. Note that they are folded anyway, except in EXACTFL
* nodes. Like the non-UTF case above, we punt if the node
* begins with a multi-char fold */
if (is_MULTI_CHAR_FOLD_utf8_safe(s, e)) {
invlist = _add_range_to_invlist(invlist, 0, UV_MAX);
}
else { /* Single char fold */
unsigned int k;
unsigned int first_fold;
const unsigned int * remaining_folds;
Size_t folds_count;
/* It matches itself */
invlist = add_cp_to_invlist(invlist, fc);
/* ... plus all the things that fold to it, which are found in
* PL_utf8_foldclosures */
folds_count = _inverse_folds(fc, &first_fold,
&remaining_folds);
for (k = 0; k < folds_count; k++) {
UV c = (k == 0) ? first_fold : remaining_folds[k-1];
/* /aa doesn't allow folds between ASCII and non- */
if ( (OP(node) == EXACTFAA || OP(node) == EXACTFAA_NO_TRIE)
&& isASCII(c) != isASCII(fc))
{
continue;
}
invlist = add_cp_to_invlist(invlist, c);
}
if (OP(node) == EXACTFL) {
/* If either [iI] are present in an EXACTFL node the above code
* should have added its normal case pair, but under a Turkish
* locale they could match instead the case pairs from it. Add
* those as potential matches as well */
if (isALPHA_FOLD_EQ(fc, 'I')) {
invlist = add_cp_to_invlist(invlist,
LATIN_SMALL_LETTER_DOTLESS_I);
invlist = add_cp_to_invlist(invlist,
LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE);
}
else if (fc == LATIN_SMALL_LETTER_DOTLESS_I) {
invlist = add_cp_to_invlist(invlist, 'I');
}
else if (fc == LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE) {
invlist = add_cp_to_invlist(invlist, 'i');
}
}
}
}
return invlist;
}
#undef HEADER_LENGTH
#undef TO_INTERNAL_SIZE
#undef FROM_INTERNAL_SIZE
#undef INVLIST_VERSION_ID
/* End of inversion list object */
STATIC void
S_parse_lparen_question_flags(pTHX_ RExC_state_t *pRExC_state)
{
/* This parses the flags that are in either the '(?foo)' or '(?foo:bar)'
* constructs, and updates RExC_flags with them. On input, RExC_parse
* should point to the first flag; it is updated on output to point to the
* final ')' or ':'. There needs to be at least one flag, or this will
* abort */
/* for (?g), (?gc), and (?o) warnings; warning
about (?c) will warn about (?g) -- japhy */
#define WASTED_O 0x01
#define WASTED_G 0x02
#define WASTED_C 0x04
#define WASTED_GC (WASTED_G|WASTED_C)
I32 wastedflags = 0x00;
U32 posflags = 0, negflags = 0;
U32 *flagsp = &posflags;
char has_charset_modifier = '\0';
regex_charset cs;
bool has_use_defaults = FALSE;
const char* const seqstart = RExC_parse - 1; /* Point to the '?' */
int x_mod_count = 0;
PERL_ARGS_ASSERT_PARSE_LPAREN_QUESTION_FLAGS;
/* '^' as an initial flag sets certain defaults */
if (UCHARAT(RExC_parse) == '^') {
RExC_parse++;
has_use_defaults = TRUE;
STD_PMMOD_FLAGS_CLEAR(&RExC_flags);
cs = (RExC_uni_semantics)
? REGEX_UNICODE_CHARSET
: REGEX_DEPENDS_CHARSET;
set_regex_charset(&RExC_flags, cs);
}
else {
cs = get_regex_charset(RExC_flags);
if ( cs == REGEX_DEPENDS_CHARSET
&& RExC_uni_semantics)
{
cs = REGEX_UNICODE_CHARSET;
}
}
while (RExC_parse < RExC_end) {
/* && strchr("iogcmsx", *RExC_parse) */
/* (?g), (?gc) and (?o) are useless here
and must be globally applied -- japhy */
switch (*RExC_parse) {
/* Code for the imsxn flags */
CASE_STD_PMMOD_FLAGS_PARSE_SET(flagsp, x_mod_count);
case LOCALE_PAT_MOD:
if (has_charset_modifier) {
goto excess_modifier;
}
else if (flagsp == &negflags) {
goto neg_modifier;
}
cs = REGEX_LOCALE_CHARSET;
has_charset_modifier = LOCALE_PAT_MOD;
break;
case UNICODE_PAT_MOD:
if (has_charset_modifier) {
goto excess_modifier;
}
else if (flagsp == &negflags) {
goto neg_modifier;
}
cs = REGEX_UNICODE_CHARSET;
has_charset_modifier = UNICODE_PAT_MOD;
break;
case ASCII_RESTRICT_PAT_MOD:
if (flagsp == &negflags) {
goto neg_modifier;
}
if (has_charset_modifier) {
if (cs != REGEX_ASCII_RESTRICTED_CHARSET) {
goto excess_modifier;
}
/* Doubled modifier implies more restricted */
cs = REGEX_ASCII_MORE_RESTRICTED_CHARSET;
}
else {
cs = REGEX_ASCII_RESTRICTED_CHARSET;
}
has_charset_modifier = ASCII_RESTRICT_PAT_MOD;
break;
case DEPENDS_PAT_MOD:
if (has_use_defaults) {
goto fail_modifiers;
}
else if (flagsp == &negflags) {
goto neg_modifier;
}
else if (has_charset_modifier) {
goto excess_modifier;
}
/* The dual charset means unicode semantics if the
* pattern (or target, not known until runtime) are
* utf8, or something in the pattern indicates unicode
* semantics */
cs = (RExC_uni_semantics)
? REGEX_UNICODE_CHARSET
: REGEX_DEPENDS_CHARSET;
has_charset_modifier = DEPENDS_PAT_MOD;
break;
excess_modifier:
RExC_parse++;
if (has_charset_modifier == ASCII_RESTRICT_PAT_MOD) {
vFAIL2("Regexp modifier \"%c\" may appear a maximum of twice", ASCII_RESTRICT_PAT_MOD);
}
else if (has_charset_modifier == *(RExC_parse - 1)) {
vFAIL2("Regexp modifier \"%c\" may not appear twice",
*(RExC_parse - 1));
}
else {
vFAIL3("Regexp modifiers \"%c\" and \"%c\" are mutually exclusive", has_charset_modifier, *(RExC_parse - 1));
}
NOT_REACHED; /*NOTREACHED*/
neg_modifier:
RExC_parse++;
vFAIL2("Regexp modifier \"%c\" may not appear after the \"-\"",
*(RExC_parse - 1));
NOT_REACHED; /*NOTREACHED*/
case ONCE_PAT_MOD: /* 'o' */
case GLOBAL_PAT_MOD: /* 'g' */
if (ckWARN(WARN_REGEXP)) {
const I32 wflagbit = *RExC_parse == 'o'
? WASTED_O
: WASTED_G;
if (! (wastedflags & wflagbit) ) {
wastedflags |= wflagbit;
/* diag_listed_as: Useless (?-%s) - don't use /%s modifier in regex; marked by <-- HERE in m/%s/ */
vWARN5(
RExC_parse + 1,
"Useless (%s%c) - %suse /%c modifier",
flagsp == &negflags ? "?-" : "?",
*RExC_parse,
flagsp == &negflags ? "don't " : "",
*RExC_parse
);
}
}
break;
case CONTINUE_PAT_MOD: /* 'c' */
if (ckWARN(WARN_REGEXP)) {
if (! (wastedflags & WASTED_C) ) {
wastedflags |= WASTED_GC;
/* diag_listed_as: Useless (?-%s) - don't use /%s modifier in regex; marked by <-- HERE in m/%s/ */
vWARN3(
RExC_parse + 1,
"Useless (%sc) - %suse /gc modifier",
flagsp == &negflags ? "?-" : "?",
flagsp == &negflags ? "don't " : ""
);
}
}
break;
case KEEPCOPY_PAT_MOD: /* 'p' */
if (flagsp == &negflags) {
ckWARNreg(RExC_parse + 1,"Useless use of (?-p)");
} else {
*flagsp |= RXf_PMf_KEEPCOPY;
}
break;
case '-':
/* A flag is a default iff it is following a minus, so
* if there is a minus, it means will be trying to
* re-specify a default which is an error */
if (has_use_defaults || flagsp == &negflags) {
goto fail_modifiers;
}
flagsp = &negflags;
wastedflags = 0; /* reset so (?g-c) warns twice */
x_mod_count = 0;
break;
case ':':
case ')':
if ((posflags & (RXf_PMf_EXTENDED|RXf_PMf_EXTENDED_MORE)) == RXf_PMf_EXTENDED) {
negflags |= RXf_PMf_EXTENDED_MORE;
}
RExC_flags |= posflags;
if (negflags & RXf_PMf_EXTENDED) {
negflags |= RXf_PMf_EXTENDED_MORE;
}
RExC_flags &= ~negflags;
set_regex_charset(&RExC_flags, cs);
return;
default:
fail_modifiers:
RExC_parse += SKIP_IF_CHAR(RExC_parse, RExC_end);
/* diag_listed_as: Sequence (?%s...) not recognized in regex; marked by <-- HERE in m/%s/ */
vFAIL2utf8f("Sequence (%" UTF8f "...) not recognized",
UTF8fARG(UTF, RExC_parse-seqstart, seqstart));
NOT_REACHED; /*NOTREACHED*/
}
RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1;
}
vFAIL("Sequence (?... not terminated");
}
/*
- reg - regular expression, i.e. main body or parenthesized thing
*
* Caller must absorb opening parenthesis.
*
* Combining parenthesis handling with the base level of regular expression
* is a trifle forced, but the need to tie the tails of the branches to what
* follows makes it hard to avoid.
*/
#define REGTAIL(x,y,z) regtail((x),(y),(z),depth+1)
#ifdef DEBUGGING
#define REGTAIL_STUDY(x,y,z) regtail_study((x),(y),(z),depth+1)
#else
#define REGTAIL_STUDY(x,y,z) regtail((x),(y),(z),depth+1)
#endif
PERL_STATIC_INLINE regnode_offset
S_handle_named_backref(pTHX_ RExC_state_t *pRExC_state,
I32 *flagp,
char * parse_start,
char ch
)
{
regnode_offset ret;
char* name_start = RExC_parse;
U32 num = 0;
SV *sv_dat = reg_scan_name(pRExC_state, REG_RSN_RETURN_DATA);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_HANDLE_NAMED_BACKREF;
if (RExC_parse == name_start || *RExC_parse != ch) {
/* diag_listed_as: Sequence \%s... not terminated in regex; marked by <-- HERE in m/%s/ */
vFAIL2("Sequence %.3s... not terminated", parse_start);
}
if (sv_dat) {
num = add_data( pRExC_state, STR_WITH_LEN("S"));
RExC_rxi->data->data[num]=(void*)sv_dat;
SvREFCNT_inc_simple_void_NN(sv_dat);
}
RExC_sawback = 1;
ret = reganode(pRExC_state,
((! FOLD)
? NREF
: (ASCII_FOLD_RESTRICTED)
? NREFFA
: (AT_LEAST_UNI_SEMANTICS)
? NREFFU
: (LOC)
? NREFFL
: NREFF),
num);
*flagp |= HASWIDTH;
Set_Node_Offset(REGNODE_p(ret), parse_start+1);
Set_Node_Cur_Length(REGNODE_p(ret), parse_start);
nextchar(pRExC_state);
return ret;
}
/* On success, returns the offset at which any next node should be placed into
* the regex engine program being compiled.
*
* Returns 0 otherwise, with *flagp set to indicate why:
* TRYAGAIN at the end of (?) that only sets flags.
* RESTART_PARSE if the parse needs to be restarted, or'd with
* NEED_UTF8 if the pattern needs to be upgraded to UTF-8.
* Otherwise would only return 0 if regbranch() returns 0, which cannot
* happen. */
STATIC regnode_offset
S_reg(pTHX_ RExC_state_t *pRExC_state, I32 paren, I32 *flagp, U32 depth)
/* paren: Parenthesized? 0=top; 1,2=inside '(': changed to letter.
* 2 is like 1, but indicates that nextchar() has been called to advance
* RExC_parse beyond the '('. Things like '(?' are indivisible tokens, and
* this flag alerts us to the need to check for that */
{
regnode_offset ret = 0; /* Will be the head of the group. */
regnode_offset br;
regnode_offset lastbr;
regnode_offset ender = 0;
I32 parno = 0;
I32 flags;
U32 oregflags = RExC_flags;
bool have_branch = 0;
bool is_open = 0;
I32 freeze_paren = 0;
I32 after_freeze = 0;
I32 num; /* numeric backreferences */
SV * max_open; /* Max number of unclosed parens */
char * parse_start = RExC_parse; /* MJD */
char * const oregcomp_parse = RExC_parse;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REG;
DEBUG_PARSE("reg ");
max_open = get_sv(RE_COMPILE_RECURSION_LIMIT, GV_ADD);
assert(max_open);
if (!SvIOK(max_open)) {
sv_setiv(max_open, RE_COMPILE_RECURSION_INIT);
}
if (depth > 4 * (UV) SvIV(max_open)) { /* We increase depth by 4 for each
open paren */
vFAIL("Too many nested open parens");
}
*flagp = 0; /* Tentatively. */
/* Having this true makes it feasible to have a lot fewer tests for the
* parse pointer being in scope. For example, we can write
* while(isFOO(*RExC_parse)) RExC_parse++;
* instead of
* while(RExC_parse < RExC_end && isFOO(*RExC_parse)) RExC_parse++;
*/
assert(*RExC_end == '\0');
/* Make an OPEN node, if parenthesized. */
if (paren) {
/* Under /x, space and comments can be gobbled up between the '(' and
* here (if paren ==2). The forms '(*VERB' and '(?...' disallow such
* intervening space, as the sequence is a token, and a token should be
* indivisible */
bool has_intervening_patws = (paren == 2)
&& *(RExC_parse - 1) != '(';
if (RExC_parse >= RExC_end) {
vFAIL("Unmatched (");
}
if (paren == 'r') { /* Atomic script run */
paren = '>';
goto parse_rest;
}
else if ( *RExC_parse == '*') { /* (*VERB:ARG), (*construct:...) */
char *start_verb = RExC_parse + 1;
STRLEN verb_len;
char *start_arg = NULL;
unsigned char op = 0;
int arg_required = 0;
int internal_argval = -1; /* if >-1 we are not allowed an argument*/
bool has_upper = FALSE;
if (has_intervening_patws) {
RExC_parse++; /* past the '*' */
/* For strict backwards compatibility, don't change the message
* now that we also have lowercase operands */
if (isUPPER(*RExC_parse)) {
vFAIL("In '(*VERB...)', the '(' and '*' must be adjacent");
}
else {
vFAIL("In '(*...)', the '(' and '*' must be adjacent");
}
}
while (RExC_parse < RExC_end && *RExC_parse != ')' ) {
if ( *RExC_parse == ':' ) {
start_arg = RExC_parse + 1;
break;
}
else if (! UTF) {
if (isUPPER(*RExC_parse)) {
has_upper = TRUE;
}
RExC_parse++;
}
else {
RExC_parse += UTF8SKIP(RExC_parse);
}
}
verb_len = RExC_parse - start_verb;
if ( start_arg ) {
if (RExC_parse >= RExC_end) {
goto unterminated_verb_pattern;
}
RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1;
while ( RExC_parse < RExC_end && *RExC_parse != ')' ) {
RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1;
}
if ( RExC_parse >= RExC_end || *RExC_parse != ')' ) {
unterminated_verb_pattern:
if (has_upper) {
vFAIL("Unterminated verb pattern argument");
}
else {
vFAIL("Unterminated '(*...' argument");
}
}
} else {
if ( RExC_parse >= RExC_end || *RExC_parse != ')' ) {
if (has_upper) {
vFAIL("Unterminated verb pattern");
}
else {
vFAIL("Unterminated '(*...' construct");
}
}
}
/* Here, we know that RExC_parse < RExC_end */
switch ( *start_verb ) {
case 'A': /* (*ACCEPT) */
if ( memEQs(start_verb, verb_len,"ACCEPT") ) {
op = ACCEPT;
internal_argval = RExC_nestroot;
}
break;
case 'C': /* (*COMMIT) */
if ( memEQs(start_verb, verb_len,"COMMIT") )
op = COMMIT;
break;
case 'F': /* (*FAIL) */
if ( verb_len==1 || memEQs(start_verb, verb_len,"FAIL") ) {
op = OPFAIL;
}
break;
case ':': /* (*:NAME) */
case 'M': /* (*MARK:NAME) */
if ( verb_len==0 || memEQs(start_verb, verb_len,"MARK") ) {
op = MARKPOINT;
arg_required = 1;
}
break;
case 'P': /* (*PRUNE) */
if ( memEQs(start_verb, verb_len,"PRUNE") )
op = PRUNE;
break;
case 'S': /* (*SKIP) */
if ( memEQs(start_verb, verb_len,"SKIP") )
op = SKIP;
break;
case 'T': /* (*THEN) */
/* [19:06] <TimToady> :: is then */
if ( memEQs(start_verb, verb_len,"THEN") ) {
op = CUTGROUP;
RExC_seen |= REG_CUTGROUP_SEEN;
}
break;
case 'a':
if ( memEQs(start_verb, verb_len, "asr")
|| memEQs(start_verb, verb_len, "atomic_script_run"))
{
paren = 'r'; /* Mnemonic: recursed run */
goto script_run;
}
else if (memEQs(start_verb, verb_len, "atomic")) {
paren = 't'; /* AtOMIC */
goto alpha_assertions;
}
break;
case 'p':
if ( memEQs(start_verb, verb_len, "plb")
|| memEQs(start_verb, verb_len, "positive_lookbehind"))
{
paren = 'b';
goto lookbehind_alpha_assertions;
}
else if ( memEQs(start_verb, verb_len, "pla")
|| memEQs(start_verb, verb_len, "positive_lookahead"))
{
paren = 'a';
goto alpha_assertions;
}
break;
case 'n':
if ( memEQs(start_verb, verb_len, "nlb")
|| memEQs(start_verb, verb_len, "negative_lookbehind"))
{
paren = 'B';
goto lookbehind_alpha_assertions;
}
else if ( memEQs(start_verb, verb_len, "nla")
|| memEQs(start_verb, verb_len, "negative_lookahead"))
{
paren = 'A';
goto alpha_assertions;
}
break;
case 's':
if ( memEQs(start_verb, verb_len, "sr")
|| memEQs(start_verb, verb_len, "script_run"))
{
regnode_offset atomic;
paren = 's';
script_run:
/* This indicates Unicode rules. */
REQUIRE_UNI_RULES(flagp, 0);
if (! start_arg) {
goto no_colon;
}
RExC_parse = start_arg;
if (RExC_in_script_run) {
/* Nested script runs are treated as no-ops, because
* if the nested one fails, the outer one must as
* well. It could fail sooner, and avoid (??{} with
* side effects, but that is explicitly documented as
* undefined behavior. */
ret = 0;
if (paren == 's') {
paren = ':';
goto parse_rest;
}
/* But, the atomic part of a nested atomic script run
* isn't a no-op, but can be treated just like a '(?>'
* */
paren = '>';
goto parse_rest;
}
/* By doing this here, we avoid extra warnings for nested
* script runs */
ckWARNexperimental(RExC_parse,
WARN_EXPERIMENTAL__SCRIPT_RUN,
"The script_run feature is experimental");
if (paren == 's') {
/* Here, we're starting a new regular script run */
ret = reg_node(pRExC_state, SROPEN);
RExC_in_script_run = 1;
is_open = 1;
goto parse_rest;
}
/* Here, we are starting an atomic script run. This is
* handled by recursing to deal with the atomic portion
* separately, enclosed in SROPEN ... SRCLOSE nodes */
ret = reg_node(pRExC_state, SROPEN);
RExC_in_script_run = 1;
atomic = reg(pRExC_state, 'r', &flags, depth);
if (flags & (RESTART_PARSE|NEED_UTF8)) {
*flagp = flags & (RESTART_PARSE|NEED_UTF8);
return 0;
}
if (! REGTAIL(pRExC_state, ret, atomic)) {
REQUIRE_BRANCHJ(flagp, 0);
}
if (! REGTAIL(pRExC_state, atomic, reg_node(pRExC_state,
SRCLOSE)))
{
REQUIRE_BRANCHJ(flagp, 0);
}
RExC_in_script_run = 0;
return ret;
}
break;
lookbehind_alpha_assertions:
RExC_seen |= REG_LOOKBEHIND_SEEN;
RExC_in_lookbehind++;
/*FALLTHROUGH*/
alpha_assertions:
ckWARNexperimental(RExC_parse,
WARN_EXPERIMENTAL__ALPHA_ASSERTIONS,
"The alpha_assertions feature is experimental");
RExC_seen_zerolen++;
if (! start_arg) {
goto no_colon;
}
/* An empty negative lookahead assertion simply is failure */
if (paren == 'A' && RExC_parse == start_arg) {
ret=reganode(pRExC_state, OPFAIL, 0);
nextchar(pRExC_state);
return ret;
}
RExC_parse = start_arg;
goto parse_rest;
no_colon:
vFAIL2utf8f(
"'(*%" UTF8f "' requires a terminating ':'",
UTF8fARG(UTF, verb_len, start_verb));
NOT_REACHED; /*NOTREACHED*/
} /* End of switch */
if ( ! op ) {
RExC_parse += UTF
? UTF8_SAFE_SKIP(RExC_parse, RExC_end)
: 1;
if (has_upper || verb_len == 0) {
vFAIL2utf8f(
"Unknown verb pattern '%" UTF8f "'",
UTF8fARG(UTF, verb_len, start_verb));
}
else {
vFAIL2utf8f(
"Unknown '(*...)' construct '%" UTF8f "'",
UTF8fARG(UTF, verb_len, start_verb));
}
}
if ( RExC_parse == start_arg ) {
start_arg = NULL;
}
if ( arg_required && !start_arg ) {
vFAIL3("Verb pattern '%.*s' has a mandatory argument",
verb_len, start_verb);
}
if (internal_argval == -1) {
ret = reganode(pRExC_state, op, 0);
} else {
ret = reg2Lanode(pRExC_state, op, 0, internal_argval);
}
RExC_seen |= REG_VERBARG_SEEN;
if (start_arg) {
SV *sv = newSVpvn( start_arg,
RExC_parse - start_arg);
ARG(REGNODE_p(ret)) = add_data( pRExC_state,
STR_WITH_LEN("S"));
RExC_rxi->data->data[ARG(REGNODE_p(ret))]=(void*)sv;
FLAGS(REGNODE_p(ret)) = 1;
} else {
FLAGS(REGNODE_p(ret)) = 0;
}
if ( internal_argval != -1 )
ARG2L_SET(REGNODE_p(ret), internal_argval);
nextchar(pRExC_state);
return ret;
}
else if (*RExC_parse == '?') { /* (?...) */
bool is_logical = 0;
const char * const seqstart = RExC_parse;
const char * endptr;
if (has_intervening_patws) {
RExC_parse++;
vFAIL("In '(?...)', the '(' and '?' must be adjacent");
}
RExC_parse++; /* past the '?' */
paren = *RExC_parse; /* might be a trailing NUL, if not
well-formed */
RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1;
if (RExC_parse > RExC_end) {
paren = '\0';
}
ret = 0; /* For look-ahead/behind. */
switch (paren) {
case 'P': /* (?P...) variants for those used to PCRE/Python */
paren = *RExC_parse;
if ( paren == '<') { /* (?P<...>) named capture */
RExC_parse++;
if (RExC_parse >= RExC_end) {
vFAIL("Sequence (?P<... not terminated");
}
goto named_capture;
}
else if (paren == '>') { /* (?P>name) named recursion */
RExC_parse++;
if (RExC_parse >= RExC_end) {
vFAIL("Sequence (?P>... not terminated");
}
goto named_recursion;
}
else if (paren == '=') { /* (?P=...) named backref */
RExC_parse++;
return handle_named_backref(pRExC_state, flagp,
parse_start, ')');
}
RExC_parse += SKIP_IF_CHAR(RExC_parse, RExC_end);
/* diag_listed_as: Sequence (?%s...) not recognized in regex; marked by <-- HERE in m/%s/ */
vFAIL3("Sequence (%.*s...) not recognized",
RExC_parse-seqstart, seqstart);
NOT_REACHED; /*NOTREACHED*/
case '<': /* (?<...) */
if (*RExC_parse == '!')
paren = ',';
else if (*RExC_parse != '=')
named_capture:
{ /* (?<...>) */
char *name_start;
SV *svname;
paren= '>';
/* FALLTHROUGH */
case '\'': /* (?'...') */
name_start = RExC_parse;
svname = reg_scan_name(pRExC_state, REG_RSN_RETURN_NAME);
if ( RExC_parse == name_start
|| RExC_parse >= RExC_end
|| *RExC_parse != paren)
{
vFAIL2("Sequence (?%c... not terminated",
paren=='>' ? '<' : paren);
}
{
HE *he_str;
SV *sv_dat = NULL;
if (!svname) /* shouldn't happen */
Perl_croak(aTHX_
"panic: reg_scan_name returned NULL");
if (!RExC_paren_names) {
RExC_paren_names= newHV();
sv_2mortal(MUTABLE_SV(RExC_paren_names));
#ifdef DEBUGGING
RExC_paren_name_list= newAV();
sv_2mortal(MUTABLE_SV(RExC_paren_name_list));
#endif
}
he_str = hv_fetch_ent( RExC_paren_names, svname, 1, 0 );
if ( he_str )
sv_dat = HeVAL(he_str);
if ( ! sv_dat ) {
/* croak baby croak */
Perl_croak(aTHX_
"panic: paren_name hash element allocation failed");
} else if ( SvPOK(sv_dat) ) {
/* (?|...) can mean we have dupes so scan to check
its already been stored. Maybe a flag indicating
we are inside such a construct would be useful,
but the arrays are likely to be quite small, so
for now we punt -- dmq */
IV count = SvIV(sv_dat);
I32 *pv = (I32*)SvPVX(sv_dat);
IV i;
for ( i = 0 ; i < count ; i++ ) {
if ( pv[i] == RExC_npar ) {
count = 0;
break;
}
}
if ( count ) {
pv = (I32*)SvGROW(sv_dat,
SvCUR(sv_dat) + sizeof(I32)+1);
SvCUR_set(sv_dat, SvCUR(sv_dat) + sizeof(I32));
pv[count] = RExC_npar;
SvIV_set(sv_dat, SvIVX(sv_dat) + 1);
}
} else {
(void)SvUPGRADE(sv_dat, SVt_PVNV);
sv_setpvn(sv_dat, (char *)&(RExC_npar),
sizeof(I32));
SvIOK_on(sv_dat);
SvIV_set(sv_dat, 1);
}
#ifdef DEBUGGING
/* Yes this does cause a memory leak in debugging Perls
* */
if (!av_store(RExC_paren_name_list,
RExC_npar, SvREFCNT_inc_NN(svname)))
SvREFCNT_dec_NN(svname);
#endif
/*sv_dump(sv_dat);*/
}
nextchar(pRExC_state);
paren = 1;
goto capturing_parens;
}
RExC_seen |= REG_LOOKBEHIND_SEEN;
RExC_in_lookbehind++;
RExC_parse++;
if (RExC_parse >= RExC_end) {
vFAIL("Sequence (?... not terminated");
}
/* FALLTHROUGH */
case '=': /* (?=...) */
RExC_seen_zerolen++;
break;
case '!': /* (?!...) */
RExC_seen_zerolen++;
/* check if we're really just a "FAIL" assertion */
skip_to_be_ignored_text(pRExC_state, &RExC_parse,
FALSE /* Don't force to /x */ );
if (*RExC_parse == ')') {
ret=reganode(pRExC_state, OPFAIL, 0);
nextchar(pRExC_state);
return ret;
}
break;
case '|': /* (?|...) */
/* branch reset, behave like a (?:...) except that
buffers in alternations share the same numbers */
paren = ':';
after_freeze = freeze_paren = RExC_npar;
/* XXX This construct currently requires an extra pass.
* Investigation would be required to see if that could be
* changed */
REQUIRE_PARENS_PASS;
break;
case ':': /* (?:...) */
case '>': /* (?>...) */
break;
case '$': /* (?$...) */
case '@': /* (?@...) */
vFAIL2("Sequence (?%c...) not implemented", (int)paren);
break;
case '0' : /* (?0) */
case 'R' : /* (?R) */
if (RExC_parse == RExC_end || *RExC_parse != ')')
FAIL("Sequence (?R) not terminated");
num = 0;
RExC_seen |= REG_RECURSE_SEEN;
/* XXX These constructs currently require an extra pass.
* It probably could be changed */
REQUIRE_PARENS_PASS;
*flagp |= POSTPONED;
goto gen_recurse_regop;
/*notreached*/
/* named and numeric backreferences */
case '&': /* (?&NAME) */
parse_start = RExC_parse - 1;
named_recursion:
{
SV *sv_dat = reg_scan_name(pRExC_state,
REG_RSN_RETURN_DATA);
num = sv_dat ? *((I32 *)SvPVX(sv_dat)) : 0;
}
if (RExC_parse >= RExC_end || *RExC_parse != ')')
vFAIL("Sequence (?&... not terminated");
goto gen_recurse_regop;
/* NOTREACHED */
case '+':
if (! inRANGE(RExC_parse[0], '1', '9')) {
RExC_parse++;
vFAIL("Illegal pattern");
}
goto parse_recursion;
/* NOTREACHED*/
case '-': /* (?-1) */
if (! inRANGE(RExC_parse[0], '1', '9')) {
RExC_parse--; /* rewind to let it be handled later */
goto parse_flags;
}
/* FALLTHROUGH */
case '1': case '2': case '3': case '4': /* (?1) */
case '5': case '6': case '7': case '8': case '9':
RExC_parse = (char *) seqstart + 1; /* Point to the digit */
parse_recursion:
{
bool is_neg = FALSE;
UV unum;
parse_start = RExC_parse - 1; /* MJD */
if (*RExC_parse == '-') {
RExC_parse++;
is_neg = TRUE;
}
endptr = RExC_end;
if (grok_atoUV(RExC_parse, &unum, &endptr)
&& unum <= I32_MAX
) {
num = (I32)unum;
RExC_parse = (char*)endptr;
} else
num = I32_MAX;
if (is_neg) {
/* Some limit for num? */
num = -num;
}
}
if (*RExC_parse!=')')
vFAIL("Expecting close bracket");
gen_recurse_regop:
if ( paren == '-' ) {
/*
Diagram of capture buffer numbering.
Top line is the normal capture buffer numbers
Bottom line is the negative indexing as from
the X (the (?-2))
+ 1 2 3 4 5 X 6 7
/(a(x)y)(a(b(c(?-2)d)e)f)(g(h))/
- 5 4 3 2 1 X x x
*/
num = RExC_npar + num;
if (num < 1) {
/* It might be a forward reference; we can't fail until
* we know, by completing the parse to get all the
* groups, and then reparsing */
if (ALL_PARENS_COUNTED) {
RExC_parse++;
vFAIL("Reference to nonexistent group");
}
else {
REQUIRE_PARENS_PASS;
}
}
} else if ( paren == '+' ) {
num = RExC_npar + num - 1;
}
/* We keep track how many GOSUB items we have produced.
To start off the ARG2L() of the GOSUB holds its "id",
which is used later in conjunction with RExC_recurse
to calculate the offset we need to jump for the GOSUB,
which it will store in the final representation.
We have to defer the actual calculation until much later
as the regop may move.
*/
ret = reg2Lanode(pRExC_state, GOSUB, num, RExC_recurse_count);
if (num >= RExC_npar) {
/* It might be a forward reference; we can't fail until we
* know, by completing the parse to get all the groups, and
* then reparsing */
if (ALL_PARENS_COUNTED) {
if (num >= RExC_total_parens) {
RExC_parse++;
vFAIL("Reference to nonexistent group");
}
}
else {
REQUIRE_PARENS_PASS;
}
}
RExC_recurse_count++;
DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_
"%*s%*s Recurse #%" UVuf " to %" IVdf "\n",
22, "| |", (int)(depth * 2 + 1), "",
(UV)ARG(REGNODE_p(ret)),
(IV)ARG2L(REGNODE_p(ret))));
RExC_seen |= REG_RECURSE_SEEN;
Set_Node_Length(REGNODE_p(ret),
1 + regarglen[OP(REGNODE_p(ret))]); /* MJD */
Set_Node_Offset(REGNODE_p(ret), parse_start); /* MJD */
*flagp |= POSTPONED;
assert(*RExC_parse == ')');
nextchar(pRExC_state);
return ret;
/* NOTREACHED */
case '?': /* (??...) */
is_logical = 1;
if (*RExC_parse != '{') {
RExC_parse += SKIP_IF_CHAR(RExC_parse, RExC_end);
/* diag_listed_as: Sequence (?%s...) not recognized in regex; marked by <-- HERE in m/%s/ */
vFAIL2utf8f(
"Sequence (%" UTF8f "...) not recognized",
UTF8fARG(UTF, RExC_parse-seqstart, seqstart));
NOT_REACHED; /*NOTREACHED*/
}
*flagp |= POSTPONED;
paren = '{';
RExC_parse++;
/* FALLTHROUGH */
case '{': /* (?{...}) */
{
U32 n = 0;
struct reg_code_block *cb;
OP * o;
RExC_seen_zerolen++;
if ( !pRExC_state->code_blocks
|| pRExC_state->code_index
>= pRExC_state->code_blocks->count
|| pRExC_state->code_blocks->cb[pRExC_state->code_index].start
!= (STRLEN)((RExC_parse -3 - (is_logical ? 1 : 0))
- RExC_start)
) {
if (RExC_pm_flags & PMf_USE_RE_EVAL)
FAIL("panic: Sequence (?{...}): no code block found\n");
FAIL("Eval-group not allowed at runtime, use re 'eval'");
}
/* this is a pre-compiled code block (?{...}) */
cb = &pRExC_state->code_blocks->cb[pRExC_state->code_index];
RExC_parse = RExC_start + cb->end;
o = cb->block;
if (cb->src_regex) {
n = add_data(pRExC_state, STR_WITH_LEN("rl"));
RExC_rxi->data->data[n] =
(void*)SvREFCNT_inc((SV*)cb->src_regex);
RExC_rxi->data->data[n+1] = (void*)o;
}
else {
n = add_data(pRExC_state,
(RExC_pm_flags & PMf_HAS_CV) ? "L" : "l", 1);
RExC_rxi->data->data[n] = (void*)o;
}
pRExC_state->code_index++;
nextchar(pRExC_state);
if (is_logical) {
regnode_offset eval;
ret = reg_node(pRExC_state, LOGICAL);
eval = reg2Lanode(pRExC_state, EVAL,
n,
/* for later propagation into (??{})
* return value */
RExC_flags & RXf_PMf_COMPILETIME
);
FLAGS(REGNODE_p(ret)) = 2;
if (! REGTAIL(pRExC_state, ret, eval)) {
REQUIRE_BRANCHJ(flagp, 0);
}
/* deal with the length of this later - MJD */
return ret;
}
ret = reg2Lanode(pRExC_state, EVAL, n, 0);
Set_Node_Length(REGNODE_p(ret), RExC_parse - parse_start + 1);
Set_Node_Offset(REGNODE_p(ret), parse_start);
return ret;
}
case '(': /* (?(?{...})...) and (?(?=...)...) */
{
int is_define= 0;
const int DEFINE_len = sizeof("DEFINE") - 1;
if ( RExC_parse < RExC_end - 1
&& ( ( RExC_parse[0] == '?' /* (?(?...)) */
&& ( RExC_parse[1] == '='
|| RExC_parse[1] == '!'
|| RExC_parse[1] == '<'
|| RExC_parse[1] == '{'))
|| ( RExC_parse[0] == '*' /* (?(*...)) */
&& ( memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"pla:")
|| memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"plb:")
|| memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"nla:")
|| memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"nlb:")
|| memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"positive_lookahead:")
|| memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"positive_lookbehind:")
|| memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"negative_lookahead:")
|| memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"negative_lookbehind:"))))
) { /* Lookahead or eval. */
I32 flag;
regnode_offset tail;
ret = reg_node(pRExC_state, LOGICAL);
FLAGS(REGNODE_p(ret)) = 1;
tail = reg(pRExC_state, 1, &flag, depth+1);
RETURN_FAIL_ON_RESTART(flag, flagp);
if (! REGTAIL(pRExC_state, ret, tail)) {
REQUIRE_BRANCHJ(flagp, 0);
}
goto insert_if;
}
else if ( RExC_parse[0] == '<' /* (?(<NAME>)...) */
|| RExC_parse[0] == '\'' ) /* (?('NAME')...) */
{
char ch = RExC_parse[0] == '<' ? '>' : '\'';
char *name_start= RExC_parse++;
U32 num = 0;
SV *sv_dat=reg_scan_name(pRExC_state, REG_RSN_RETURN_DATA);
if ( RExC_parse == name_start
|| RExC_parse >= RExC_end
|| *RExC_parse != ch)
{
vFAIL2("Sequence (?(%c... not terminated",
(ch == '>' ? '<' : ch));
}
RExC_parse++;
if (sv_dat) {
num = add_data( pRExC_state, STR_WITH_LEN("S"));
RExC_rxi->data->data[num]=(void*)sv_dat;
SvREFCNT_inc_simple_void_NN(sv_dat);
}
ret = reganode(pRExC_state, NGROUPP, num);
goto insert_if_check_paren;
}
else if (memBEGINs(RExC_parse,
(STRLEN) (RExC_end - RExC_parse),
"DEFINE"))
{
ret = reganode(pRExC_state, DEFINEP, 0);
RExC_parse += DEFINE_len;
is_define = 1;
goto insert_if_check_paren;
}
else if (RExC_parse[0] == 'R') {
RExC_parse++;
/* parno == 0 => /(?(R)YES|NO)/ "in any form of recursion OR eval"
* parno == 1 => /(?(R0)YES|NO)/ "in GOSUB (?0) / (?R)"
* parno == 2 => /(?(R1)YES|NO)/ "in GOSUB (?1) (parno-1)"
*/
parno = 0;
if (RExC_parse[0] == '0') {
parno = 1;
RExC_parse++;
}
else if (inRANGE(RExC_parse[0], '1', '9')) {
UV uv;
endptr = RExC_end;
if (grok_atoUV(RExC_parse, &uv, &endptr)
&& uv <= I32_MAX
) {
parno = (I32)uv + 1;
RExC_parse = (char*)endptr;
}
/* else "Switch condition not recognized" below */
} else if (RExC_parse[0] == '&') {
SV *sv_dat;
RExC_parse++;
sv_dat = reg_scan_name(pRExC_state,
REG_RSN_RETURN_DATA);
if (sv_dat)
parno = 1 + *((I32 *)SvPVX(sv_dat));
}
ret = reganode(pRExC_state, INSUBP, parno);
goto insert_if_check_paren;
}
else if (inRANGE(RExC_parse[0], '1', '9')) {
/* (?(1)...) */
char c;
UV uv;
endptr = RExC_end;
if (grok_atoUV(RExC_parse, &uv, &endptr)
&& uv <= I32_MAX
) {
parno = (I32)uv;
RExC_parse = (char*)endptr;
}
else {
vFAIL("panic: grok_atoUV returned FALSE");
}
ret = reganode(pRExC_state, GROUPP, parno);
insert_if_check_paren:
if (UCHARAT(RExC_parse) != ')') {
RExC_parse += UTF
? UTF8_SAFE_SKIP(RExC_parse, RExC_end)
: 1;
vFAIL("Switch condition not recognized");
}
nextchar(pRExC_state);
insert_if:
if (! REGTAIL(pRExC_state, ret, reganode(pRExC_state,
IFTHEN, 0)))
{
REQUIRE_BRANCHJ(flagp, 0);
}
br = regbranch(pRExC_state, &flags, 1, depth+1);
if (br == 0) {
RETURN_FAIL_ON_RESTART(flags,flagp);
FAIL2("panic: regbranch returned failure, flags=%#" UVxf,
(UV) flags);
} else
if (! REGTAIL(pRExC_state, br, reganode(pRExC_state,
LONGJMP, 0)))
{
REQUIRE_BRANCHJ(flagp, 0);
}
c = UCHARAT(RExC_parse);
nextchar(pRExC_state);
if (flags&HASWIDTH)
*flagp |= HASWIDTH;
if (c == '|') {
if (is_define)
vFAIL("(?(DEFINE)....) does not allow branches");
/* Fake one for optimizer. */
lastbr = reganode(pRExC_state, IFTHEN, 0);
if (!regbranch(pRExC_state, &flags, 1, depth+1)) {
RETURN_FAIL_ON_RESTART(flags, flagp);
FAIL2("panic: regbranch returned failure, flags=%#" UVxf,
(UV) flags);
}
if (! REGTAIL(pRExC_state, ret, lastbr)) {
REQUIRE_BRANCHJ(flagp, 0);
}
if (flags&HASWIDTH)
*flagp |= HASWIDTH;
c = UCHARAT(RExC_parse);
nextchar(pRExC_state);
}
else
lastbr = 0;
if (c != ')') {
if (RExC_parse >= RExC_end)
vFAIL("Switch (?(condition)... not terminated");
else
vFAIL("Switch (?(condition)... contains too many branches");
}
ender = reg_node(pRExC_state, TAIL);
if (! REGTAIL(pRExC_state, br, ender)) {
REQUIRE_BRANCHJ(flagp, 0);
}
if (lastbr) {
if (! REGTAIL(pRExC_state, lastbr, ender)) {
REQUIRE_BRANCHJ(flagp, 0);
}
if (! REGTAIL(pRExC_state,
REGNODE_OFFSET(
NEXTOPER(
NEXTOPER(REGNODE_p(lastbr)))),
ender))
{
REQUIRE_BRANCHJ(flagp, 0);
}
}
else
if (! REGTAIL(pRExC_state, ret, ender)) {
REQUIRE_BRANCHJ(flagp, 0);
}
#if 0 /* Removing this doesn't cause failures in the test suite -- khw */
RExC_size++; /* XXX WHY do we need this?!!
For large programs it seems to be required
but I can't figure out why. -- dmq*/
#endif
return ret;
}
RExC_parse += UTF
? UTF8_SAFE_SKIP(RExC_parse, RExC_end)
: 1;
vFAIL("Unknown switch condition (?(...))");
}
case '[': /* (?[ ... ]) */
return handle_regex_sets(pRExC_state, NULL, flagp, depth+1,
oregcomp_parse);
case 0: /* A NUL */
RExC_parse--; /* for vFAIL to print correctly */
vFAIL("Sequence (? incomplete");
break;
case ')':
if (RExC_strict) { /* [perl #132851] */
ckWARNreg(RExC_parse, "Empty (?) without any modifiers");
}
/* FALLTHROUGH */
default: /* e.g., (?i) */
RExC_parse = (char *) seqstart + 1;
parse_flags:
parse_lparen_question_flags(pRExC_state);
if (UCHARAT(RExC_parse) != ':') {
if (RExC_parse < RExC_end)
nextchar(pRExC_state);
*flagp = TRYAGAIN;
return 0;
}
paren = ':';
nextchar(pRExC_state);
ret = 0;
goto parse_rest;
} /* end switch */
}
else {
if (*RExC_parse == '{') {
ckWARNregdep(RExC_parse + 1,
"Unescaped left brace in regex is "
"deprecated here (and will be fatal "
"in Perl 5.32), passed through");
}
/* Not bothering to indent here, as the above 'else' is temporary
* */
if (!(RExC_flags & RXf_PMf_NOCAPTURE)) { /* (...) */
capturing_parens:
parno = RExC_npar;
RExC_npar++;
if (! ALL_PARENS_COUNTED) {
/* If we are in our first pass through (and maybe only pass),
* we need to allocate memory for the capturing parentheses
* data structures.
*/
if (!RExC_parens_buf_size) {
/* first guess at number of parens we might encounter */
RExC_parens_buf_size = 10;
/* setup RExC_open_parens, which holds the address of each
* OPEN tag, and to make things simpler for the 0 index the
* start of the program - this is used later for offsets */
Newxz(RExC_open_parens, RExC_parens_buf_size,
regnode_offset);
RExC_open_parens[0] = 1; /* +1 for REG_MAGIC */
/* setup RExC_close_parens, which holds the address of each
* CLOSE tag, and to make things simpler for the 0 index
* the end of the program - this is used later for offsets
* */
Newxz(RExC_close_parens, RExC_parens_buf_size,
regnode_offset);
/* we dont know where end op starts yet, so we dont need to
* set RExC_close_parens[0] like we do RExC_open_parens[0]
* above */
}
else if (RExC_npar > RExC_parens_buf_size) {
I32 old_size = RExC_parens_buf_size;
RExC_parens_buf_size *= 2;
Renew(RExC_open_parens, RExC_parens_buf_size,
regnode_offset);
Zero(RExC_open_parens + old_size,
RExC_parens_buf_size - old_size, regnode_offset);
Renew(RExC_close_parens, RExC_parens_buf_size,
regnode_offset);
Zero(RExC_close_parens + old_size,
RExC_parens_buf_size - old_size, regnode_offset);
}
}
ret = reganode(pRExC_state, OPEN, parno);
if (!RExC_nestroot)
RExC_nestroot = parno;
if (RExC_open_parens && !RExC_open_parens[parno])
{
DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_
"%*s%*s Setting open paren #%" IVdf " to %d\n",
22, "| |", (int)(depth * 2 + 1), "",
(IV)parno, ret));
RExC_open_parens[parno]= ret;
}
Set_Node_Length(REGNODE_p(ret), 1); /* MJD */
Set_Node_Offset(REGNODE_p(ret), RExC_parse); /* MJD */
is_open = 1;
} else {
/* with RXf_PMf_NOCAPTURE treat (...) as (?:...) */
paren = ':';
ret = 0;
}
}
}
else /* ! paren */
ret = 0;
parse_rest:
/* Pick up the branches, linking them together. */
parse_start = RExC_parse; /* MJD */
br = regbranch(pRExC_state, &flags, 1, depth+1);
/* branch_len = (paren != 0); */
if (br == 0) {
RETURN_FAIL_ON_RESTART(flags, flagp);
FAIL2("panic: regbranch returned failure, flags=%#" UVxf, (UV) flags);
}
if (*RExC_parse == '|') {
if (RExC_use_BRANCHJ) {
reginsert(pRExC_state, BRANCHJ, br, depth+1);
}
else { /* MJD */
reginsert(pRExC_state, BRANCH, br, depth+1);
Set_Node_Length(REGNODE_p(br), paren != 0);
Set_Node_Offset_To_R(br, parse_start-RExC_start);
}
have_branch = 1;
}
else if (paren == ':') {
*flagp |= flags&SIMPLE;
}
if (is_open) { /* Starts with OPEN. */
if (! REGTAIL(pRExC_state, ret, br)) { /* OPEN -> first. */
REQUIRE_BRANCHJ(flagp, 0);
}
}
else if (paren != '?') /* Not Conditional */
ret = br;
*flagp |= flags & (SPSTART | HASWIDTH | POSTPONED);
lastbr = br;
while (*RExC_parse == '|') {
if (RExC_use_BRANCHJ) {
bool shut_gcc_up;
ender = reganode(pRExC_state, LONGJMP, 0);
/* Append to the previous. */
shut_gcc_up = REGTAIL(pRExC_state,
REGNODE_OFFSET(NEXTOPER(NEXTOPER(REGNODE_p(lastbr)))),
ender);
PERL_UNUSED_VAR(shut_gcc_up);
}
nextchar(pRExC_state);
if (freeze_paren) {
if (RExC_npar > after_freeze)
after_freeze = RExC_npar;
RExC_npar = freeze_paren;
}
br = regbranch(pRExC_state, &flags, 0, depth+1);
if (br == 0) {
RETURN_FAIL_ON_RESTART(flags, flagp);
FAIL2("panic: regbranch returned failure, flags=%#" UVxf, (UV) flags);
}
if (! REGTAIL(pRExC_state, lastbr, br)) { /* BRANCH -> BRANCH. */
REQUIRE_BRANCHJ(flagp, 0);
}
lastbr = br;
*flagp |= flags & (SPSTART | HASWIDTH | POSTPONED);
}
if (have_branch || paren != ':') {
regnode * br;
/* Make a closing node, and hook it on the end. */
switch (paren) {
case ':':
ender = reg_node(pRExC_state, TAIL);
break;
case 1: case 2:
ender = reganode(pRExC_state, CLOSE, parno);
if ( RExC_close_parens ) {
DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_
"%*s%*s Setting close paren #%" IVdf " to %d\n",
22, "| |", (int)(depth * 2 + 1), "",
(IV)parno, ender));
RExC_close_parens[parno]= ender;
if (RExC_nestroot == parno)
RExC_nestroot = 0;
}
Set_Node_Offset(REGNODE_p(ender), RExC_parse+1); /* MJD */
Set_Node_Length(REGNODE_p(ender), 1); /* MJD */
break;
case 's':
ender = reg_node(pRExC_state, SRCLOSE);
RExC_in_script_run = 0;
break;
case '<':
case 'a':
case 'A':
case 'b':
case 'B':
case ',':
case '=':
case '!':
*flagp &= ~HASWIDTH;
/* FALLTHROUGH */
case 't': /* aTomic */
case '>':
ender = reg_node(pRExC_state, SUCCEED);
break;
case 0:
ender = reg_node(pRExC_state, END);
assert(!RExC_end_op); /* there can only be one! */
RExC_end_op = REGNODE_p(ender);
if (RExC_close_parens) {
DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_
"%*s%*s Setting close paren #0 (END) to %d\n",
22, "| |", (int)(depth * 2 + 1), "",
ender));
RExC_close_parens[0]= ender;
}
break;
}
DEBUG_PARSE_r({
DEBUG_PARSE_MSG("lsbr");
regprop(RExC_rx, RExC_mysv1, REGNODE_p(lastbr), NULL, pRExC_state);
regprop(RExC_rx, RExC_mysv2, REGNODE_p(ender), NULL, pRExC_state);
Perl_re_printf( aTHX_ "~ tying lastbr %s (%" IVdf ") to ender %s (%" IVdf ") offset %" IVdf "\n",
SvPV_nolen_const(RExC_mysv1),
(IV)lastbr,
SvPV_nolen_const(RExC_mysv2),
(IV)ender,
(IV)(ender - lastbr)
);
});
if (! REGTAIL(pRExC_state, lastbr, ender)) {
REQUIRE_BRANCHJ(flagp, 0);
}
if (have_branch) {
char is_nothing= 1;
if (depth==1)
RExC_seen |= REG_TOP_LEVEL_BRANCHES_SEEN;
/* Hook the tails of the branches to the closing node. */
for (br = REGNODE_p(ret); br; br = regnext(br)) {
const U8 op = PL_regkind[OP(br)];
if (op == BRANCH) {
if (! REGTAIL_STUDY(pRExC_state,
REGNODE_OFFSET(NEXTOPER(br)),
ender))
{
REQUIRE_BRANCHJ(flagp, 0);
}
if ( OP(NEXTOPER(br)) != NOTHING
|| regnext(NEXTOPER(br)) != REGNODE_p(ender))
is_nothing= 0;
}
else if (op == BRANCHJ) {
bool shut_gcc_up = REGTAIL_STUDY(pRExC_state,
REGNODE_OFFSET(NEXTOPER(NEXTOPER(br))),
ender);
PERL_UNUSED_VAR(shut_gcc_up);
/* for now we always disable this optimisation * /
if ( OP(NEXTOPER(NEXTOPER(br))) != NOTHING
|| regnext(NEXTOPER(NEXTOPER(br))) != REGNODE_p(ender))
*/
is_nothing= 0;
}
}
if (is_nothing) {
regnode * ret_as_regnode = REGNODE_p(ret);
br= PL_regkind[OP(ret_as_regnode)] != BRANCH
? regnext(ret_as_regnode)
: ret_as_regnode;
DEBUG_PARSE_r({
DEBUG_PARSE_MSG("NADA");
regprop(RExC_rx, RExC_mysv1, ret_as_regnode,
NULL, pRExC_state);
regprop(RExC_rx, RExC_mysv2, REGNODE_p(ender),
NULL, pRExC_state);
Perl_re_printf( aTHX_ "~ converting ret %s (%" IVdf ") to ender %s (%" IVdf ") offset %" IVdf "\n",
SvPV_nolen_const(RExC_mysv1),
(IV)REG_NODE_NUM(ret_as_regnode),
SvPV_nolen_const(RExC_mysv2),
(IV)ender,
(IV)(ender - ret)
);
});
OP(br)= NOTHING;
if (OP(REGNODE_p(ender)) == TAIL) {
NEXT_OFF(br)= 0;
RExC_emit= REGNODE_OFFSET(br) + 1;
} else {
regnode *opt;
for ( opt= br + 1; opt < REGNODE_p(ender) ; opt++ )
OP(opt)= OPTIMIZED;
NEXT_OFF(br)= REGNODE_p(ender) - br;
}
}
}
}
{
const char *p;
/* Even/odd or x=don't care: 010101x10x */
static const char parens[] = "=!aA<,>Bbt";
/* flag below is set to 0 up through 'A'; 1 for larger */
if (paren && (p = strchr(parens, paren))) {
U8 node = ((p - parens) % 2) ? UNLESSM : IFMATCH;
int flag = (p - parens) > 3;
if (paren == '>' || paren == 't') {
node = SUSPEND, flag = 0;
}
reginsert(pRExC_state, node, ret, depth+1);
Set_Node_Cur_Length(REGNODE_p(ret), parse_start);
Set_Node_Offset(REGNODE_p(ret), parse_start + 1);
FLAGS(REGNODE_p(ret)) = flag;
if (! REGTAIL_STUDY(pRExC_state, ret, reg_node(pRExC_state, TAIL)))
{
REQUIRE_BRANCHJ(flagp, 0);
}
}
}
/* Check for proper termination. */
if (paren) {
/* restore original flags, but keep (?p) and, if we've encountered
* something in the parse that changes /d rules into /u, keep the /u */
RExC_flags = oregflags | (RExC_flags & RXf_PMf_KEEPCOPY);
if (DEPENDS_SEMANTICS && RExC_uni_semantics) {
set_regex_charset(&RExC_flags, REGEX_UNICODE_CHARSET);
}
if (RExC_parse >= RExC_end || UCHARAT(RExC_parse) != ')') {
RExC_parse = oregcomp_parse;
vFAIL("Unmatched (");
}
nextchar(pRExC_state);
}
else if (!paren && RExC_parse < RExC_end) {
if (*RExC_parse == ')') {
RExC_parse++;
vFAIL("Unmatched )");
}
else
FAIL("Junk on end of regexp"); /* "Can't happen". */
NOT_REACHED; /* NOTREACHED */
}
if (RExC_in_lookbehind) {
RExC_in_lookbehind--;
}
if (after_freeze > RExC_npar)
RExC_npar = after_freeze;
return(ret);
}
/*
- regbranch - one alternative of an | operator
*
* Implements the concatenation operator.
*
* On success, returns the offset at which any next node should be placed into
* the regex engine program being compiled.
*
* Returns 0 otherwise, setting flagp to RESTART_PARSE if the parse needs
* to be restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded to
* UTF-8
*/
STATIC regnode_offset
S_regbranch(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, I32 first, U32 depth)
{
regnode_offset ret;
regnode_offset chain = 0;
regnode_offset latest;
I32 flags = 0, c = 0;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGBRANCH;
DEBUG_PARSE("brnc");
if (first)
ret = 0;
else {
if (RExC_use_BRANCHJ)
ret = reganode(pRExC_state, BRANCHJ, 0);
else {
ret = reg_node(pRExC_state, BRANCH);
Set_Node_Length(REGNODE_p(ret), 1);
}
}
*flagp = WORST; /* Tentatively. */
skip_to_be_ignored_text(pRExC_state, &RExC_parse,
FALSE /* Don't force to /x */ );
while (RExC_parse < RExC_end && *RExC_parse != '|' && *RExC_parse != ')') {
flags &= ~TRYAGAIN;
latest = regpiece(pRExC_state, &flags, depth+1);
if (latest == 0) {
if (flags & TRYAGAIN)
continue;
RETURN_FAIL_ON_RESTART(flags, flagp);
FAIL2("panic: regpiece returned failure, flags=%#" UVxf, (UV) flags);
}
else if (ret == 0)
ret = latest;
*flagp |= flags&(HASWIDTH|POSTPONED);
if (chain == 0) /* First piece. */
*flagp |= flags&SPSTART;
else {
/* FIXME adding one for every branch after the first is probably
* excessive now we have TRIE support. (hv) */
MARK_NAUGHTY(1);
if (! REGTAIL(pRExC_state, chain, latest)) {
/* XXX We could just redo this branch, but figuring out what
* bookkeeping needs to be reset is a pain, and it's likely
* that other branches that goto END will also be too large */
REQUIRE_BRANCHJ(flagp, 0);
}
}
chain = latest;
c++;
}
if (chain == 0) { /* Loop ran zero times. */
chain = reg_node(pRExC_state, NOTHING);
if (ret == 0)
ret = chain;
}
if (c == 1) {
*flagp |= flags&SIMPLE;
}
return ret;
}
/*
- regpiece - something followed by possible quantifier * + ? {n,m}
*
* Note that the branching code sequences used for ? and the general cases
* of * and + are somewhat optimized: they use the same NOTHING node as
* both the endmarker for their branch list and the body of the last branch.
* It might seem that this node could be dispensed with entirely, but the
* endmarker role is not redundant.
*
* On success, returns the offset at which any next node should be placed into
* the regex engine program being compiled.
*
* Returns 0 otherwise, with *flagp set to indicate why:
* TRYAGAIN if regatom() returns 0 with TRYAGAIN.
* RESTART_PARSE if the parse needs to be restarted, or'd with
* NEED_UTF8 if the pattern needs to be upgraded to UTF-8.
*/
STATIC regnode_offset
S_regpiece(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, U32 depth)
{
regnode_offset ret;
char op;
char *next;
I32 flags;
const char * const origparse = RExC_parse;
I32 min;
I32 max = REG_INFTY;
#ifdef RE_TRACK_PATTERN_OFFSETS
char *parse_start;
#endif
const char *maxpos = NULL;
UV uv;
/* Save the original in case we change the emitted regop to a FAIL. */
const regnode_offset orig_emit = RExC_emit;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGPIECE;
DEBUG_PARSE("piec");
ret = regatom(pRExC_state, &flags, depth+1);
if (ret == 0) {
RETURN_FAIL_ON_RESTART_OR_FLAGS(flags, flagp, TRYAGAIN);
FAIL2("panic: regatom returned failure, flags=%#" UVxf, (UV) flags);
}
op = *RExC_parse;
if (op == '{' && regcurly(RExC_parse)) {
maxpos = NULL;
#ifdef RE_TRACK_PATTERN_OFFSETS
parse_start = RExC_parse; /* MJD */
#endif
next = RExC_parse + 1;
while (isDIGIT(*next) || *next == ',') {
if (*next == ',') {
if (maxpos)
break;
else
maxpos = next;
}
next++;
}
if (*next == '}') { /* got one */
const char* endptr;
if (!maxpos)
maxpos = next;
RExC_parse++;
if (isDIGIT(*RExC_parse)) {
endptr = RExC_end;
if (!grok_atoUV(RExC_parse, &uv, &endptr))
vFAIL("Invalid quantifier in {,}");
if (uv >= REG_INFTY)
vFAIL2("Quantifier in {,} bigger than %d", REG_INFTY - 1);
min = (I32)uv;
} else {
min = 0;
}
if (*maxpos == ',')
maxpos++;
else
maxpos = RExC_parse;
if (isDIGIT(*maxpos)) {
endptr = RExC_end;
if (!grok_atoUV(maxpos, &uv, &endptr))
vFAIL("Invalid quantifier in {,}");
if (uv >= REG_INFTY)
vFAIL2("Quantifier in {,} bigger than %d", REG_INFTY - 1);
max = (I32)uv;
} else {
max = REG_INFTY; /* meaning "infinity" */
}
RExC_parse = next;
nextchar(pRExC_state);
if (max < min) { /* If can't match, warn and optimize to fail
unconditionally */
reginsert(pRExC_state, OPFAIL, orig_emit, depth+1);
ckWARNreg(RExC_parse, "Quantifier {n,m} with n > m can't match");
NEXT_OFF(REGNODE_p(orig_emit)) =
regarglen[OPFAIL] + NODE_STEP_REGNODE;
return ret;
}
else if (min == max && *RExC_parse == '?')
{
ckWARN2reg(RExC_parse + 1,
"Useless use of greediness modifier '%c'",
*RExC_parse);
}
do_curly:
if ((flags&SIMPLE)) {
if (min == 0 && max == REG_INFTY) {
reginsert(pRExC_state, STAR, ret, depth+1);
MARK_NAUGHTY(4);
RExC_seen |= REG_UNBOUNDED_QUANTIFIER_SEEN;
goto nest_check;
}
if (min == 1 && max == REG_INFTY) {
reginsert(pRExC_state, PLUS, ret, depth+1);
MARK_NAUGHTY(3);
RExC_seen |= REG_UNBOUNDED_QUANTIFIER_SEEN;
goto nest_check;
}
MARK_NAUGHTY_EXP(2, 2);
reginsert(pRExC_state, CURLY, ret, depth+1);
Set_Node_Offset(REGNODE_p(ret), parse_start+1); /* MJD */
Set_Node_Cur_Length(REGNODE_p(ret), parse_start);
}
else {
const regnode_offset w = reg_node(pRExC_state, WHILEM);
FLAGS(REGNODE_p(w)) = 0;
if (! REGTAIL(pRExC_state, ret, w)) {
REQUIRE_BRANCHJ(flagp, 0);
}
if (RExC_use_BRANCHJ) {
reginsert(pRExC_state, LONGJMP, ret, depth+1);
reginsert(pRExC_state, NOTHING, ret, depth+1);
NEXT_OFF(REGNODE_p(ret)) = 3; /* Go over LONGJMP. */
}
reginsert(pRExC_state, CURLYX, ret, depth+1);
/* MJD hk */
Set_Node_Offset(REGNODE_p(ret), parse_start+1);
Set_Node_Length(REGNODE_p(ret),
op == '{' ? (RExC_parse - parse_start) : 1);
if (RExC_use_BRANCHJ)
NEXT_OFF(REGNODE_p(ret)) = 3; /* Go over NOTHING to
LONGJMP. */
if (! REGTAIL(pRExC_state, ret, reg_node(pRExC_state,
NOTHING)))
{
REQUIRE_BRANCHJ(flagp, 0);
}
RExC_whilem_seen++;
MARK_NAUGHTY_EXP(1, 4); /* compound interest */
}
FLAGS(REGNODE_p(ret)) = 0;
if (min > 0)
*flagp = WORST;
if (max > 0)
*flagp |= HASWIDTH;
ARG1_SET(REGNODE_p(ret), (U16)min);
ARG2_SET(REGNODE_p(ret), (U16)max);
if (max == REG_INFTY)
RExC_seen |= REG_UNBOUNDED_QUANTIFIER_SEEN;
goto nest_check;
}
}
if (!ISMULT1(op)) {
*flagp = flags;
return(ret);
}
#if 0 /* Now runtime fix should be reliable. */
/* if this is reinstated, don't forget to put this back into perldiag:
=item Regexp *+ operand could be empty at {#} in regex m/%s/
(F) The part of the regexp subject to either the * or + quantifier
could match an empty string. The {#} shows in the regular
expression about where the problem was discovered.
*/
if (!(flags&HASWIDTH) && op != '?')
vFAIL("Regexp *+ operand could be empty");
#endif
#ifdef RE_TRACK_PATTERN_OFFSETS
parse_start = RExC_parse;
#endif
nextchar(pRExC_state);
*flagp = (op != '+') ? (WORST|SPSTART|HASWIDTH) : (WORST|HASWIDTH);
if (op == '*') {
min = 0;
goto do_curly;
}
else if (op == '+') {
min = 1;
goto do_curly;
}
else if (op == '?') {
min = 0; max = 1;
goto do_curly;
}
nest_check:
if (!(flags&(HASWIDTH|POSTPONED)) && max > REG_INFTY/3) {
ckWARN2reg(RExC_parse,
"%" UTF8f " matches null string many times",
UTF8fARG(UTF, (RExC_parse >= origparse
? RExC_parse - origparse
: 0),
origparse));
}
if (*RExC_parse == '?') {
nextchar(pRExC_state);
reginsert(pRExC_state, MINMOD, ret, depth+1);
if (! REGTAIL(pRExC_state, ret, ret + NODE_STEP_REGNODE)) {
REQUIRE_BRANCHJ(flagp, 0);
}
}
else if (*RExC_parse == '+') {
regnode_offset ender;
nextchar(pRExC_state);
ender = reg_node(pRExC_state, SUCCEED);
if (! REGTAIL(pRExC_state, ret, ender)) {
REQUIRE_BRANCHJ(flagp, 0);
}
reginsert(pRExC_state, SUSPEND, ret, depth+1);
ender = reg_node(pRExC_state, TAIL);
if (! REGTAIL(pRExC_state, ret, ender)) {
REQUIRE_BRANCHJ(flagp, 0);
}
}
if (ISMULT2(RExC_parse)) {
RExC_parse++;
vFAIL("Nested quantifiers");
}
return(ret);
}
STATIC bool
S_grok_bslash_N(pTHX_ RExC_state_t *pRExC_state,
regnode_offset * node_p,
UV * code_point_p,
int * cp_count,
I32 * flagp,
const bool strict,
const U32 depth
)
{
/* This routine teases apart the various meanings of \N and returns
* accordingly. The input parameters constrain which meaning(s) is/are valid
* in the current context.
*
* Exactly one of <node_p> and <code_point_p> must be non-NULL.
*
* If <code_point_p> is not NULL, the context is expecting the result to be a
* single code point. If this \N instance turns out to a single code point,
* the function returns TRUE and sets *code_point_p to that code point.
*
* If <node_p> is not NULL, the context is expecting the result to be one of
* the things representable by a regnode. If this \N instance turns out to be
* one such, the function generates the regnode, returns TRUE and sets *node_p
* to point to the offset of that regnode into the regex engine program being
* compiled.
*
* If this instance of \N isn't legal in any context, this function will
* generate a fatal error and not return.
*
* On input, RExC_parse should point to the first char following the \N at the
* time of the call. On successful return, RExC_parse will have been updated
* to point to just after the sequence identified by this routine. Also
* *flagp has been updated as needed.
*
* When there is some problem with the current context and this \N instance,
* the function returns FALSE, without advancing RExC_parse, nor setting
* *node_p, nor *code_point_p, nor *flagp.
*
* If <cp_count> is not NULL, the caller wants to know the length (in code
* points) that this \N sequence matches. This is set, and the input is
* parsed for errors, even if the function returns FALSE, as detailed below.
*
* There are 6 possibilities here, as detailed in the next 6 paragraphs.
*
* Probably the most common case is for the \N to specify a single code point.
* *cp_count will be set to 1, and *code_point_p will be set to that code
* point.
*
* Another possibility is for the input to be an empty \N{}. This is no
* longer accepted, and will generate a fatal error.
*
* Another possibility is for a custom charnames handler to be in effect which
* translates the input name to an empty string. *cp_count will be set to 0.
* *node_p will be set to a generated NOTHING node.
*
* Still another possibility is for the \N to mean [^\n]. *cp_count will be
* set to 0. *node_p will be set to a generated REG_ANY node.
*
* The fifth possibility is that \N resolves to a sequence of more than one
* code points. *cp_count will be set to the number of code points in the
* sequence. *node_p will be set to a generated node returned by this
* function calling S_reg().
*
* The final possibility is that it is premature to be calling this function;
* the parse needs to be restarted. This can happen when this changes from
* /d to /u rules, or when the pattern needs to be upgraded to UTF-8. The
* latter occurs only when the fifth possibility would otherwise be in
* effect, and is because one of those code points requires the pattern to be
* recompiled as UTF-8. The function returns FALSE, and sets the
* RESTART_PARSE and NEED_UTF8 flags in *flagp, as appropriate. When this
* happens, the caller needs to desist from continuing parsing, and return
* this information to its caller. This is not set for when there is only one
* code point, as this can be called as part of an ANYOF node, and they can
* store above-Latin1 code points without the pattern having to be in UTF-8.
*
* For non-single-quoted regexes, the tokenizer has resolved character and
* sequence names inside \N{...} into their Unicode values, normalizing the
* result into what we should see here: '\N{U+c1.c2...}', where c1... are the
* hex-represented code points in the sequence. This is done there because
* the names can vary based on what charnames pragma is in scope at the time,
* so we need a way to take a snapshot of what they resolve to at the time of
* the original parse. [perl #56444].
*
* That parsing is skipped for single-quoted regexes, so here we may get
* '\N{NAME}', which is parsed now. If the single-quoted regex is something
* like '\N{U+41}', that code point is Unicode, and has to be translated into
* the native character set for non-ASCII platforms. The other possibilities
* are already native, so no translation is done. */
char * endbrace; /* points to '}' following the name */
char* p = RExC_parse; /* Temporary */
SV * substitute_parse = NULL;
char *orig_end;
char *save_start;
I32 flags;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_GROK_BSLASH_N;
GET_RE_DEBUG_FLAGS;
assert(cBOOL(node_p) ^ cBOOL(code_point_p)); /* Exactly one should be set */
assert(! (node_p && cp_count)); /* At most 1 should be set */
if (cp_count) { /* Initialize return for the most common case */
*cp_count = 1;
}
/* The [^\n] meaning of \N ignores spaces and comments under the /x
* modifier. The other meanings do not, so use a temporary until we find
* out which we are being called with */
skip_to_be_ignored_text(pRExC_state, &p,
FALSE /* Don't force to /x */ );
/* Disambiguate between \N meaning a named character versus \N meaning
* [^\n]. The latter is assumed when the {...} following the \N is a legal
* quantifier, or if there is no '{' at all */
if (*p != '{' || regcurly(p)) {
RExC_parse = p;
if (cp_count) {
*cp_count = -1;
}
if (! node_p) {
return FALSE;
}
*node_p = reg_node(pRExC_state, REG_ANY);
*flagp |= HASWIDTH|SIMPLE;
MARK_NAUGHTY(1);
Set_Node_Length(REGNODE_p(*(node_p)), 1); /* MJD */
return TRUE;
}
/* The test above made sure that the next real character is a '{', but
* under the /x modifier, it could be separated by space (or a comment and
* \n) and this is not allowed (for consistency with \x{...} and the
* tokenizer handling of \N{NAME}). */
if (*RExC_parse != '{') {
vFAIL("Missing braces on \\N{}");
}
RExC_parse++; /* Skip past the '{' */
endbrace = (char *) memchr(RExC_parse, '}', RExC_end - RExC_parse);
if (! endbrace) { /* no trailing brace */
vFAIL2("Missing right brace on \\%c{}", 'N');
}
/* Here, we have decided it should be a named character or sequence. These
* imply Unicode semantics */
REQUIRE_UNI_RULES(flagp, FALSE);
/* \N{_} is what toke.c returns to us to indicate a name that evaluates to
* nothing at all (not allowed under strict) */
if (endbrace - RExC_parse == 1 && *RExC_parse == '_') {
RExC_parse = endbrace;
if (strict) {
RExC_parse++; /* Position after the "}" */
vFAIL("Zero length \\N{}");
}
if (cp_count) {
*cp_count = 0;
}
nextchar(pRExC_state);
if (! node_p) {
return FALSE;
}
*node_p = reg_node(pRExC_state, NOTHING);
return TRUE;
}
if (endbrace - RExC_parse < 2 || ! strBEGINs(RExC_parse, "U+")) {
/* Here, the name isn't of the form U+.... This can happen if the
* pattern is single-quoted, so didn't get evaluated in toke.c. Now
* is the time to find out what the name means */
const STRLEN name_len = endbrace - RExC_parse;
SV * value_sv; /* What does this name evaluate to */
SV ** value_svp;
const U8 * value; /* string of name's value */
STRLEN value_len; /* and its length */
/* RExC_unlexed_names is a hash of names that weren't evaluated by
* toke.c, and their values. Make sure is initialized */
if (! RExC_unlexed_names) {
RExC_unlexed_names = newHV();
}
/* If we have already seen this name in this pattern, use that. This
* allows us to only call the charnames handler once per name per
* pattern. A broken or malicious handler could return something
* different each time, which could cause the results to vary depending
* on if something gets added or subtracted from the pattern that
* causes the number of passes to change, for example */
if ((value_svp = hv_fetch(RExC_unlexed_names, RExC_parse,
name_len, 0)))
{
value_sv = *value_svp;
}
else { /* Otherwise we have to go out and get the name */
const char * error_msg = NULL;
value_sv = get_and_check_backslash_N_name(RExC_parse, endbrace,
UTF,
&error_msg);
if (error_msg) {
RExC_parse = endbrace;
vFAIL(error_msg);
}
/* If no error message, should have gotten a valid return */
assert (value_sv);
/* Save the name's meaning for later use */
if (! hv_store(RExC_unlexed_names, RExC_parse, name_len,
value_sv, 0))
{
Perl_croak(aTHX_ "panic: hv_store() unexpectedly failed");
}
}
/* Here, we have the value the name evaluates to in 'value_sv' */
value = (U8 *) SvPV(value_sv, value_len);
/* See if the result is one code point vs 0 or multiple */
if (value_len > 0 && value_len <= (UV) ((SvUTF8(value_sv))
? UTF8SKIP(value)
: 1))
{
/* Here, exactly one code point. If that isn't what is wanted,
* fail */
if (! code_point_p) {
RExC_parse = p;
return FALSE;
}
/* Convert from string to numeric code point */
*code_point_p = (SvUTF8(value_sv))
? valid_utf8_to_uvchr(value, NULL)
: *value;
/* Have parsed this entire single code point \N{...}. *cp_count
* has already been set to 1, so don't do it again. */
RExC_parse = endbrace;
nextchar(pRExC_state);
return TRUE;
} /* End of is a single code point */
/* Count the code points, if caller desires. The API says to do this
* even if we will later return FALSE */
if (cp_count) {
*cp_count = 0;
*cp_count = (SvUTF8(value_sv))
? utf8_length(value, value + value_len)
: value_len;
}
/* Fail if caller doesn't want to handle a multi-code-point sequence.
* But don't back the pointer up if the caller wants to know how many
* code points there are (they need to handle it themselves in this
* case). */
if (! node_p) {
if (! cp_count) {
RExC_parse = p;
}
return FALSE;
}
/* Convert this to a sub-pattern of the form "(?: ... )", and then call
* reg recursively to parse it. That way, it retains its atomicness,
* while not having to worry about any special handling that some code
* points may have. */
substitute_parse = newSVpvs("?:");
sv_catsv(substitute_parse, value_sv);
sv_catpv(substitute_parse, ")");
#ifdef EBCDIC
/* The value should already be native, so no need to convert on EBCDIC
* platforms.*/
assert(! RExC_recode_x_to_native);
#endif
}
else { /* \N{U+...} */
Size_t count = 0; /* code point count kept internally */
/* We can get to here when the input is \N{U+...} or when toke.c has
* converted a name to the \N{U+...} form. This include changing a
* name that evaluates to multiple code points to \N{U+c1.c2.c3 ...} */
RExC_parse += 2; /* Skip past the 'U+' */
/* Code points are separated by dots. The '}' terminates the whole
* thing. */
do { /* Loop until the ending brace */
UV cp = 0;
char * start_digit; /* The first of the current code point */
if (! isXDIGIT(*RExC_parse)) {
RExC_parse++;
vFAIL("Invalid hexadecimal number in \\N{U+...}");
}
start_digit = RExC_parse;
count++;
/* Loop through the hex digits of the current code point */
do {
/* Adding this digit will shift the result 4 bits. If that
* result would be above the legal max, it's overflow */
if (cp > MAX_LEGAL_CP >> 4) {
/* Find the end of the code point */
do {
RExC_parse ++;
} while (isXDIGIT(*RExC_parse) || *RExC_parse == '_');
/* Be sure to synchronize this message with the similar one
* in utf8.c */
vFAIL4("Use of code point 0x%.*s is not allowed; the"
" permissible max is 0x%" UVxf,
(int) (RExC_parse - start_digit), start_digit,
MAX_LEGAL_CP);
}
/* Accumulate this (valid) digit into the running total */
cp = (cp << 4) + READ_XDIGIT(RExC_parse);
/* READ_XDIGIT advanced the input pointer. Ignore a single
* underscore separator */
if (*RExC_parse == '_' && isXDIGIT(RExC_parse[1])) {
RExC_parse++;
}
} while (isXDIGIT(*RExC_parse));
/* Here, have accumulated the next code point */
if (RExC_parse >= endbrace) { /* If done ... */
if (count != 1) {
goto do_concat;
}
/* Here, is a single code point; fail if doesn't want that */
if (! code_point_p) {
RExC_parse = p;
return FALSE;
}
/* A single code point is easy to handle; just return it */
*code_point_p = UNI_TO_NATIVE(cp);
RExC_parse = endbrace;
nextchar(pRExC_state);
return TRUE;
}
/* Here, the only legal thing would be a multiple character
* sequence (of the form "\N{U+c1.c2. ... }". So the next
* character must be a dot (and the one after that can't be the
* endbrace, or we'd have something like \N{U+100.} ) */
if (*RExC_parse != '.' || RExC_parse + 1 >= endbrace) {
RExC_parse += (RExC_orig_utf8) /* point to after 1st invalid */
? UTF8SKIP(RExC_parse)
: 1;
if (RExC_parse >= endbrace) { /* Guard against malformed utf8 */
RExC_parse = endbrace;
}
vFAIL("Invalid hexadecimal number in \\N{U+...}");
}
/* Here, looks like its really a multiple character sequence. Fail
* if that's not what the caller wants. But continue with counting
* and error checking if they still want a count */
if (! node_p && ! cp_count) {
return FALSE;
}
/* What is done here is to convert this to a sub-pattern of the
* form \x{char1}\x{char2}... and then call reg recursively to
* parse it (enclosing in "(?: ... )" ). That way, it retains its
* atomicness, while not having to worry about special handling
* that some code points may have. We don't create a subpattern,
* but go through the motions of code point counting and error
* checking, if the caller doesn't want a node returned. */
if (node_p && count == 1) {
substitute_parse = newSVpvs("?:");
}
do_concat:
if (node_p) {
/* Convert to notation the rest of the code understands */
sv_catpvs(substitute_parse, "\\x{");
sv_catpvn(substitute_parse, start_digit,
RExC_parse - start_digit);
sv_catpvs(substitute_parse, "}");
}
/* Move to after the dot (or ending brace the final time through.)
* */
RExC_parse++;
count++;
} while (RExC_parse < endbrace);
if (! node_p) { /* Doesn't want the node */
assert (cp_count);
*cp_count = count;
return FALSE;
}
sv_catpvs(substitute_parse, ")");
#ifdef EBCDIC
/* The values are Unicode, and therefore have to be converted to native
* on a non-Unicode (meaning non-ASCII) platform. */
RExC_recode_x_to_native = 1;
#endif
}
/* Here, we have the string the name evaluates to, ready to be parsed,
* stored in 'substitute_parse' as a series of valid "\x{...}\x{...}"
* constructs. This can be called from within a substitute parse already.
* The error reporting mechanism doesn't work for 2 levels of this, but the
* code above has validated this new construct, so there should be no
* errors generated by the below. And this isn' an exact copy, so the
* mechanism to seamlessly deal with this won't work, so turn off warnings
* during it */
save_start = RExC_start;
orig_end = RExC_end;
RExC_parse = RExC_start = SvPVX(substitute_parse);
RExC_end = RExC_parse + SvCUR(substitute_parse);
TURN_OFF_WARNINGS_IN_SUBSTITUTE_PARSE;
*node_p = reg(pRExC_state, 1, &flags, depth+1);
/* Restore the saved values */
RESTORE_WARNINGS;
RExC_start = save_start;
RExC_parse = endbrace;
RExC_end = orig_end;
#ifdef EBCDIC
RExC_recode_x_to_native = 0;
#endif
SvREFCNT_dec_NN(substitute_parse);
if (! *node_p) {
RETURN_FAIL_ON_RESTART(flags, flagp);
FAIL2("panic: reg returned failure to grok_bslash_N, flags=%#" UVxf,
(UV) flags);
}
*flagp |= flags&(HASWIDTH|SPSTART|SIMPLE|POSTPONED);
nextchar(pRExC_state);
return TRUE;
}
PERL_STATIC_INLINE U8
S_compute_EXACTish(RExC_state_t *pRExC_state)
{
U8 op;
PERL_ARGS_ASSERT_COMPUTE_EXACTISH;
if (! FOLD) {
return (LOC)
? EXACTL
: EXACT;
}
op = get_regex_charset(RExC_flags);
if (op >= REGEX_ASCII_RESTRICTED_CHARSET) {
op--; /* /a is same as /u, and map /aa's offset to what /a's would have
been, so there is no hole */
}
return op + EXACTF;
}
STATIC bool
S_new_regcurly(const char *s, const char *e)
{
/* This is a temporary function designed to match the most lenient form of
* a {m,n} quantifier we ever envision, with either number omitted, and
* spaces anywhere between/before/after them.
*
* If this function fails, then the string it matches is very unlikely to
* ever be considered a valid quantifier, so we can allow the '{' that
* begins it to be considered as a literal */
bool has_min = FALSE;
bool has_max = FALSE;
PERL_ARGS_ASSERT_NEW_REGCURLY;
if (s >= e || *s++ != '{')
return FALSE;
while (s < e && isSPACE(*s)) {
s++;
}
while (s < e && isDIGIT(*s)) {
has_min = TRUE;
s++;
}
while (s < e && isSPACE(*s)) {
s++;
}
if (*s == ',') {
s++;
while (s < e && isSPACE(*s)) {
s++;
}
while (s < e && isDIGIT(*s)) {
has_max = TRUE;
s++;
}
while (s < e && isSPACE(*s)) {
s++;
}
}
return s < e && *s == '}' && (has_min || has_max);
}
/* Parse backref decimal value, unless it's too big to sensibly be a backref,
* in which case return I32_MAX (rather than possibly 32-bit wrapping) */
static I32
S_backref_value(char *p, char *e)
{
const char* endptr = e;
UV val;
if (grok_atoUV(p, &val, &endptr) && val <= I32_MAX)
return (I32)val;
return I32_MAX;
}
/*
- regatom - the lowest level
Try to identify anything special at the start of the current parse position.
If there is, then handle it as required. This may involve generating a
single regop, such as for an assertion; or it may involve recursing, such as
to handle a () structure.
If the string doesn't start with something special then we gobble up
as much literal text as we can. If we encounter a quantifier, we have to
back off the final literal character, as that quantifier applies to just it
and not to the whole string of literals.
Once we have been able to handle whatever type of thing started the
sequence, we return the offset into the regex engine program being compiled
at which any next regnode should be placed.
Returns 0, setting *flagp to TRYAGAIN if reg() returns 0 with TRYAGAIN.
Returns 0, setting *flagp to RESTART_PARSE if the parse needs to be
restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded to UTF-8
Otherwise does not return 0.
Note: we have to be careful with escapes, as they can be both literal
and special, and in the case of \10 and friends, context determines which.
A summary of the code structure is:
switch (first_byte) {
cases for each special:
handle this special;
break;
case '\\':
switch (2nd byte) {
cases for each unambiguous special:
handle this special;
break;
cases for each ambigous special/literal:
disambiguate;
if (special) handle here
else goto defchar;
default: // unambiguously literal:
goto defchar;
}
default: // is a literal char
// FALL THROUGH
defchar:
create EXACTish node for literal;
while (more input and node isn't full) {
switch (input_byte) {
cases for each special;
make sure parse pointer is set so that the next call to
regatom will see this special first
goto loopdone; // EXACTish node terminated by prev. char
default:
append char to EXACTISH node;
}
get next input byte;
}
loopdone:
}
return the generated node;
Specifically there are two separate switches for handling
escape sequences, with the one for handling literal escapes requiring
a dummy entry for all of the special escapes that are actually handled
by the other.
*/
STATIC regnode_offset
S_regatom(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, U32 depth)
{
dVAR;
regnode_offset ret = 0;
I32 flags = 0;
char *parse_start;
U8 op;
int invert = 0;
U8 arg;
GET_RE_DEBUG_FLAGS_DECL;
*flagp = WORST; /* Tentatively. */
DEBUG_PARSE("atom");
PERL_ARGS_ASSERT_REGATOM;
tryagain:
parse_start = RExC_parse;
assert(RExC_parse < RExC_end);
switch ((U8)*RExC_parse) {
case '^':
RExC_seen_zerolen++;
nextchar(pRExC_state);
if (RExC_flags & RXf_PMf_MULTILINE)
ret = reg_node(pRExC_state, MBOL);
else
ret = reg_node(pRExC_state, SBOL);
Set_Node_Length(REGNODE_p(ret), 1); /* MJD */
break;
case '$':
nextchar(pRExC_state);
if (*RExC_parse)
RExC_seen_zerolen++;
if (RExC_flags & RXf_PMf_MULTILINE)
ret = reg_node(pRExC_state, MEOL);
else
ret = reg_node(pRExC_state, SEOL);
Set_Node_Length(REGNODE_p(ret), 1); /* MJD */
break;
case '.':
nextchar(pRExC_state);
if (RExC_flags & RXf_PMf_SINGLELINE)
ret = reg_node(pRExC_state, SANY);
else
ret = reg_node(pRExC_state, REG_ANY);
*flagp |= HASWIDTH|SIMPLE;
MARK_NAUGHTY(1);
Set_Node_Length(REGNODE_p(ret), 1); /* MJD */
break;
case '[':
{
char * const oregcomp_parse = ++RExC_parse;
ret = regclass(pRExC_state, flagp, depth+1,
FALSE, /* means parse the whole char class */
TRUE, /* allow multi-char folds */
FALSE, /* don't silence non-portable warnings. */
(bool) RExC_strict,
TRUE, /* Allow an optimized regnode result */
NULL);
if (ret == 0) {
RETURN_FAIL_ON_RESTART_FLAGP(flagp);
FAIL2("panic: regclass returned failure to regatom, flags=%#" UVxf,
(UV) *flagp);
}
if (*RExC_parse != ']') {
RExC_parse = oregcomp_parse;
vFAIL("Unmatched [");
}
nextchar(pRExC_state);
Set_Node_Length(REGNODE_p(ret), RExC_parse - oregcomp_parse + 1); /* MJD */
break;
}
case '(':
nextchar(pRExC_state);
ret = reg(pRExC_state, 2, &flags, depth+1);
if (ret == 0) {
if (flags & TRYAGAIN) {
if (RExC_parse >= RExC_end) {
/* Make parent create an empty node if needed. */
*flagp |= TRYAGAIN;
return(0);
}
goto tryagain;
}
RETURN_FAIL_ON_RESTART(flags, flagp);
FAIL2("panic: reg returned failure to regatom, flags=%#" UVxf,
(UV) flags);
}
*flagp |= flags&(HASWIDTH|SPSTART|SIMPLE|POSTPONED);
break;
case '|':
case ')':
if (flags & TRYAGAIN) {
*flagp |= TRYAGAIN;
return 0;
}
vFAIL("Internal urp");
/* Supposed to be caught earlier. */
break;
case '?':
case '+':
case '*':
RExC_parse++;
vFAIL("Quantifier follows nothing");
break;
case '\\':
/* Special Escapes
This switch handles escape sequences that resolve to some kind
of special regop and not to literal text. Escape sequences that
resolve to literal text are handled below in the switch marked
"Literal Escapes".
Every entry in this switch *must* have a corresponding entry
in the literal escape switch. However, the opposite is not
required, as the default for this switch is to jump to the
literal text handling code.
*/
RExC_parse++;
switch ((U8)*RExC_parse) {
/* Special Escapes */
case 'A':
RExC_seen_zerolen++;
ret = reg_node(pRExC_state, SBOL);
/* SBOL is shared with /^/ so we set the flags so we can tell
* /\A/ from /^/ in split. */
FLAGS(REGNODE_p(ret)) = 1;
*flagp |= SIMPLE;
goto finish_meta_pat;
case 'G':
ret = reg_node(pRExC_state, GPOS);
RExC_seen |= REG_GPOS_SEEN;
*flagp |= SIMPLE;
goto finish_meta_pat;
case 'K':
RExC_seen_zerolen++;
ret = reg_node(pRExC_state, KEEPS);
*flagp |= SIMPLE;
/* XXX:dmq : disabling in-place substitution seems to
* be necessary here to avoid cases of memory corruption, as
* with: C<$_="x" x 80; s/x\K/y/> -- rgs
*/
RExC_seen |= REG_LOOKBEHIND_SEEN;
goto finish_meta_pat;
case 'Z':
ret = reg_node(pRExC_state, SEOL);
*flagp |= SIMPLE;
RExC_seen_zerolen++; /* Do not optimize RE away */
goto finish_meta_pat;
case 'z':
ret = reg_node(pRExC_state, EOS);
*flagp |= SIMPLE;
RExC_seen_zerolen++; /* Do not optimize RE away */
goto finish_meta_pat;
case 'C':
vFAIL("\\C no longer supported");
case 'X':
ret = reg_node(pRExC_state, CLUMP);
*flagp |= HASWIDTH;
goto finish_meta_pat;
case 'W':
invert = 1;
/* FALLTHROUGH */
case 'w':
arg = ANYOF_WORDCHAR;
goto join_posix;
case 'B':
invert = 1;
/* FALLTHROUGH */
case 'b':
{
U8 flags = 0;
regex_charset charset = get_regex_charset(RExC_flags);
RExC_seen_zerolen++;
RExC_seen |= REG_LOOKBEHIND_SEEN;
op = BOUND + charset;
if (RExC_parse >= RExC_end || *(RExC_parse + 1) != '{') {
flags = TRADITIONAL_BOUND;
if (op > BOUNDA) { /* /aa is same as /a */
op = BOUNDA;
}
}
else {
STRLEN length;
char name = *RExC_parse;
char * endbrace = NULL;
RExC_parse += 2;
endbrace = (char *) memchr(RExC_parse, '}', RExC_end - RExC_parse);
if (! endbrace) {
vFAIL2("Missing right brace on \\%c{}", name);
}
/* XXX Need to decide whether to take spaces or not. Should be
* consistent with \p{}, but that currently is SPACE, which
* means vertical too, which seems wrong
* while (isBLANK(*RExC_parse)) {
RExC_parse++;
}*/
if (endbrace == RExC_parse) {
RExC_parse++; /* After the '}' */
vFAIL2("Empty \\%c{}", name);
}
length = endbrace - RExC_parse;
/*while (isBLANK(*(RExC_parse + length - 1))) {
length--;
}*/
switch (*RExC_parse) {
case 'g':
if ( length != 1
&& (memNEs(RExC_parse + 1, length - 1, "cb")))
{
goto bad_bound_type;
}
flags = GCB_BOUND;
break;
case 'l':
if (length != 2 || *(RExC_parse + 1) != 'b') {
goto bad_bound_type;
}
flags = LB_BOUND;
break;
case 's':
if (length != 2 || *(RExC_parse + 1) != 'b') {
goto bad_bound_type;
}
flags = SB_BOUND;
break;
case 'w':
if (length != 2 || *(RExC_parse + 1) != 'b') {
goto bad_bound_type;
}
flags = WB_BOUND;
break;
default:
bad_bound_type:
RExC_parse = endbrace;
vFAIL2utf8f(
"'%" UTF8f "' is an unknown bound type",
UTF8fARG(UTF, length, endbrace - length));
NOT_REACHED; /*NOTREACHED*/
}
RExC_parse = endbrace;
REQUIRE_UNI_RULES(flagp, 0);
if (op == BOUND) {
op = BOUNDU;
}
else if (op >= BOUNDA) { /* /aa is same as /a */
op = BOUNDU;
length += 4;
/* Don't have to worry about UTF-8, in this message because
* to get here the contents of the \b must be ASCII */
ckWARN4reg(RExC_parse + 1, /* Include the '}' in msg */
"Using /u for '%.*s' instead of /%s",
(unsigned) length,
endbrace - length + 1,
(charset == REGEX_ASCII_RESTRICTED_CHARSET)
? ASCII_RESTRICT_PAT_MODS
: ASCII_MORE_RESTRICT_PAT_MODS);
}
}
if (op == BOUND) {
RExC_seen_d_op = TRUE;
}
else if (op == BOUNDL) {
RExC_contains_locale = 1;
}
if (invert) {
op += NBOUND - BOUND;
}
ret = reg_node(pRExC_state, op);
FLAGS(REGNODE_p(ret)) = flags;
*flagp |= SIMPLE;
goto finish_meta_pat;
}
case 'D':
invert = 1;
/* FALLTHROUGH */
case 'd':
arg = ANYOF_DIGIT;
if (! DEPENDS_SEMANTICS) {
goto join_posix;
}
/* \d doesn't have any matches in the upper Latin1 range, hence /d
* is equivalent to /u. Changing to /u saves some branches at
* runtime */
op = POSIXU;
goto join_posix_op_known;
case 'R':
ret = reg_node(pRExC_state, LNBREAK);
*flagp |= HASWIDTH|SIMPLE;
goto finish_meta_pat;
case 'H':
invert = 1;
/* FALLTHROUGH */
case 'h':
arg = ANYOF_BLANK;
op = POSIXU;
goto join_posix_op_known;
case 'V':
invert = 1;
/* FALLTHROUGH */
case 'v':
arg = ANYOF_VERTWS;
op = POSIXU;
goto join_posix_op_known;
case 'S':
invert = 1;
/* FALLTHROUGH */
case 's':
arg = ANYOF_SPACE;
join_posix:
op = POSIXD + get_regex_charset(RExC_flags);
if (op > POSIXA) { /* /aa is same as /a */
op = POSIXA;
}
else if (op == POSIXL) {
RExC_contains_locale = 1;
}
else if (op == POSIXD) {
RExC_seen_d_op = TRUE;
}
join_posix_op_known:
if (invert) {
op += NPOSIXD - POSIXD;
}
ret = reg_node(pRExC_state, op);
FLAGS(REGNODE_p(ret)) = namedclass_to_classnum(arg);
*flagp |= HASWIDTH|SIMPLE;
/* FALLTHROUGH */
finish_meta_pat:
if ( UCHARAT(RExC_parse + 1) == '{'
&& UNLIKELY(! new_regcurly(RExC_parse + 1, RExC_end)))
{
RExC_parse += 2;
vFAIL("Unescaped left brace in regex is illegal here");
}
nextchar(pRExC_state);
Set_Node_Length(REGNODE_p(ret), 2); /* MJD */
break;
case 'p':
case 'P':
RExC_parse--;
ret = regclass(pRExC_state, flagp, depth+1,
TRUE, /* means just parse this element */
FALSE, /* don't allow multi-char folds */
FALSE, /* don't silence non-portable warnings. It
would be a bug if these returned
non-portables */
(bool) RExC_strict,
TRUE, /* Allow an optimized regnode result */
NULL);
RETURN_FAIL_ON_RESTART_FLAGP(flagp);
/* regclass() can only return RESTART_PARSE and NEED_UTF8 if
* multi-char folds are allowed. */
if (!ret)
FAIL2("panic: regclass returned failure to regatom, flags=%#" UVxf,
(UV) *flagp);
RExC_parse--;
Set_Node_Offset(REGNODE_p(ret), parse_start);
Set_Node_Cur_Length(REGNODE_p(ret), parse_start - 2);
nextchar(pRExC_state);
break;
case 'N':
/* Handle \N, \N{} and \N{NAMED SEQUENCE} (the latter meaning the
* \N{...} evaluates to a sequence of more than one code points).
* The function call below returns a regnode, which is our result.
* The parameters cause it to fail if the \N{} evaluates to a
* single code point; we handle those like any other literal. The
* reason that the multicharacter case is handled here and not as
* part of the EXACtish code is because of quantifiers. In
* /\N{BLAH}+/, the '+' applies to the whole thing, and doing it
* this way makes that Just Happen. dmq.
* join_exact() will join this up with adjacent EXACTish nodes
* later on, if appropriate. */
++RExC_parse;
if (grok_bslash_N(pRExC_state,
&ret, /* Want a regnode returned */
NULL, /* Fail if evaluates to a single code
point */
NULL, /* Don't need a count of how many code
points */
flagp,
RExC_strict,
depth)
) {
break;
}
RETURN_FAIL_ON_RESTART_FLAGP(flagp);
/* Here, evaluates to a single code point. Go get that */
RExC_parse = parse_start;
goto defchar;
case 'k': /* Handle \k<NAME> and \k'NAME' */
parse_named_seq:
{
char ch;
if ( RExC_parse >= RExC_end - 1
|| (( ch = RExC_parse[1]) != '<'
&& ch != '\''
&& ch != '{'))
{
RExC_parse++;
/* diag_listed_as: Sequence \%s... not terminated in regex; marked by <-- HERE in m/%s/ */
vFAIL2("Sequence %.2s... not terminated", parse_start);
} else {
RExC_parse += 2;
ret = handle_named_backref(pRExC_state,
flagp,
parse_start,
(ch == '<')
? '>'
: (ch == '{')
? '}'
: '\'');
}
break;
}
case 'g':
case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
{
I32 num;
bool hasbrace = 0;
if (*RExC_parse == 'g') {
bool isrel = 0;
RExC_parse++;
if (*RExC_parse == '{') {
RExC_parse++;
hasbrace = 1;
}
if (*RExC_parse == '-') {
RExC_parse++;
isrel = 1;
}
if (hasbrace && !isDIGIT(*RExC_parse)) {
if (isrel) RExC_parse--;
RExC_parse -= 2;
goto parse_named_seq;
}
if (RExC_parse >= RExC_end) {
goto unterminated_g;
}
num = S_backref_value(RExC_parse, RExC_end);
if (num == 0)
vFAIL("Reference to invalid group 0");
else if (num == I32_MAX) {
if (isDIGIT(*RExC_parse))
vFAIL("Reference to nonexistent group");
else
unterminated_g:
vFAIL("Unterminated \\g... pattern");
}
if (isrel) {
num = RExC_npar - num;
if (num < 1)
vFAIL("Reference to nonexistent or unclosed group");
}
}
else {
num = S_backref_value(RExC_parse, RExC_end);
/* bare \NNN might be backref or octal - if it is larger
* than or equal RExC_npar then it is assumed to be an
* octal escape. Note RExC_npar is +1 from the actual
* number of parens. */
/* Note we do NOT check if num == I32_MAX here, as that is
* handled by the RExC_npar check */
if (
/* any numeric escape < 10 is always a backref */
num > 9
/* any numeric escape < RExC_npar is a backref */
&& num >= RExC_npar
/* cannot be an octal escape if it starts with 8 */
&& *RExC_parse != '8'
/* cannot be an octal escape it it starts with 9 */
&& *RExC_parse != '9'
) {
/* Probably not meant to be a backref, instead likely
* to be an octal character escape, e.g. \35 or \777.
* The above logic should make it obvious why using
* octal escapes in patterns is problematic. - Yves */
RExC_parse = parse_start;
goto defchar;
}
}
/* At this point RExC_parse points at a numeric escape like
* \12 or \88 or something similar, which we should NOT treat
* as an octal escape. It may or may not be a valid backref
* escape. For instance \88888888 is unlikely to be a valid
* backref. */
while (isDIGIT(*RExC_parse))
RExC_parse++;
if (hasbrace) {
if (*RExC_parse != '}')
vFAIL("Unterminated \\g{...} pattern");
RExC_parse++;
}
if (num >= (I32)RExC_npar) {
/* It might be a forward reference; we can't fail until we
* know, by completing the parse to get all the groups, and
* then reparsing */
if (ALL_PARENS_COUNTED) {
if (num >= RExC_total_parens) {
vFAIL("Reference to nonexistent group");
}
}
else {
REQUIRE_PARENS_PASS;
}
}
RExC_sawback = 1;
ret = reganode(pRExC_state,
((! FOLD)
? REF
: (ASCII_FOLD_RESTRICTED)
? REFFA
: (AT_LEAST_UNI_SEMANTICS)
? REFFU
: (LOC)
? REFFL
: REFF),
num);
if (OP(REGNODE_p(ret)) == REFF) {
RExC_seen_d_op = TRUE;
}
*flagp |= HASWIDTH;
/* override incorrect value set in reganode MJD */
Set_Node_Offset(REGNODE_p(ret), parse_start);
Set_Node_Cur_Length(REGNODE_p(ret), parse_start-1);
skip_to_be_ignored_text(pRExC_state, &RExC_parse,
FALSE /* Don't force to /x */ );
}
break;
case '\0':
if (RExC_parse >= RExC_end)
FAIL("Trailing \\");
/* FALLTHROUGH */
default:
/* Do not generate "unrecognized" warnings here, we fall
back into the quick-grab loop below */
RExC_parse = parse_start;
goto defchar;
} /* end of switch on a \foo sequence */
break;
case '#':
/* '#' comments should have been spaced over before this function was
* called */
assert((RExC_flags & RXf_PMf_EXTENDED) == 0);
/*
if (RExC_flags & RXf_PMf_EXTENDED) {
RExC_parse = reg_skipcomment( pRExC_state, RExC_parse );
if (RExC_parse < RExC_end)
goto tryagain;
}
*/
/* FALLTHROUGH */
default:
defchar: {
/* Here, we have determined that the next thing is probably a
* literal character. RExC_parse points to the first byte of its
* definition. (It still may be an escape sequence that evaluates
* to a single character) */
STRLEN len = 0;
UV ender = 0;
char *p;
char *s;
/* This allows us to fill a node with just enough spare so that if the final
* character folds, its expansion is guaranteed to fit */
#define MAX_NODE_STRING_SIZE (255-UTF8_MAXBYTES_CASE)
char *s0;
U8 upper_parse = MAX_NODE_STRING_SIZE;
/* We start out as an EXACT node, even if under /i, until we find a
* character which is in a fold. The algorithm now segregates into
* separate nodes, characters that fold from those that don't under
* /i. (This hopefully will create nodes that are fixed strings
* even under /i, giving the optimizer something to grab on to.)
* So, if a node has something in it and the next character is in
* the opposite category, that node is closed up, and the function
* returns. Then regatom is called again, and a new node is
* created for the new category. */
U8 node_type = EXACT;
/* Assume the node will be fully used; the excess is given back at
* the end. We can't make any other length assumptions, as a byte
* input sequence could shrink down. */
Ptrdiff_t initial_size = STR_SZ(256);
bool next_is_quantifier;
char * oldp = NULL;
/* We can convert EXACTF nodes to EXACTFU if they contain only
* characters that match identically regardless of the target
* string's UTF8ness. The reason to do this is that EXACTF is not
* trie-able, EXACTFU is, and EXACTFU requires fewer operations at
* runtime.
*
* Similarly, we can convert EXACTFL nodes to EXACTFLU8 if they
* contain only above-Latin1 characters (hence must be in UTF8),
* which don't participate in folds with Latin1-range characters,
* as the latter's folds aren't known until runtime. */
bool maybe_exactfu = FOLD && (DEPENDS_SEMANTICS || LOC);
/* Single-character EXACTish nodes are almost always SIMPLE. This
* allows us to override this as encountered */
U8 maybe_SIMPLE = SIMPLE;
/* Does this node contain something that can't match unless the
* target string is (also) in UTF-8 */
bool requires_utf8_target = FALSE;
/* The sequence 'ss' is problematic in non-UTF-8 patterns. */
bool has_ss = FALSE;
/* So is the MICRO SIGN */
bool has_micro_sign = FALSE;
/* Allocate an EXACT node. The node_type may change below to
* another EXACTish node, but since the size of the node doesn't
* change, it works */
ret = regnode_guts(pRExC_state, node_type, initial_size, "exact");
FILL_NODE(ret, node_type);
RExC_emit++;
s = STRING(REGNODE_p(ret));
s0 = s;
reparse:
/* This breaks under rare circumstances. If folding, we do not
* want to split a node at a character that is a non-final in a
* multi-char fold, as an input string could just happen to want to
* match across the node boundary. The code at the end of the loop
* looks for this, and backs off until it finds not such a
* character, but it is possible (though extremely, extremely
* unlikely) for all characters in the node to be non-final fold
* ones, in which case we just leave the node fully filled, and
* hope that it doesn't match the string in just the wrong place */
assert( ! UTF /* Is at the beginning of a character */
|| UTF8_IS_INVARIANT(UCHARAT(RExC_parse))
|| UTF8_IS_START(UCHARAT(RExC_parse)));
/* Here, we have a literal character. Find the maximal string of
* them in the input that we can fit into a single EXACTish node.
* We quit at the first non-literal or when the node gets full, or
* under /i the categorization of folding/non-folding character
* changes */
for (p = RExC_parse; len < upper_parse && p < RExC_end; ) {
/* In most cases each iteration adds one byte to the output.
* The exceptions override this */
Size_t added_len = 1;
oldp = p;
/* White space has already been ignored */
assert( (RExC_flags & RXf_PMf_EXTENDED) == 0
|| ! is_PATWS_safe((p), RExC_end, UTF));
switch ((U8)*p) {
case '^':
case '$':
case '.':
case '[':
case '(':
case ')':
case '|':
goto loopdone;
case '\\':
/* Literal Escapes Switch
This switch is meant to handle escape sequences that
resolve to a literal character.
Every escape sequence that represents something
else, like an assertion or a char class, is handled
in the switch marked 'Special Escapes' above in this
routine, but also has an entry here as anything that
isn't explicitly mentioned here will be treated as
an unescaped equivalent literal.
*/
switch ((U8)*++p) {
/* These are all the special escapes. */
case 'A': /* Start assertion */
case 'b': case 'B': /* Word-boundary assertion*/
case 'C': /* Single char !DANGEROUS! */
case 'd': case 'D': /* digit class */
case 'g': case 'G': /* generic-backref, pos assertion */
case 'h': case 'H': /* HORIZWS */
case 'k': case 'K': /* named backref, keep marker */
case 'p': case 'P': /* Unicode property */
case 'R': /* LNBREAK */
case 's': case 'S': /* space class */
case 'v': case 'V': /* VERTWS */
case 'w': case 'W': /* word class */
case 'X': /* eXtended Unicode "combining
character sequence" */
case 'z': case 'Z': /* End of line/string assertion */
--p;
goto loopdone;
/* Anything after here is an escape that resolves to a
literal. (Except digits, which may or may not)
*/
case 'n':
ender = '\n';
p++;
break;
case 'N': /* Handle a single-code point named character. */
RExC_parse = p + 1;
if (! grok_bslash_N(pRExC_state,
NULL, /* Fail if evaluates to
anything other than a
single code point */
&ender, /* The returned single code
point */
NULL, /* Don't need a count of
how many code points */
flagp,
RExC_strict,
depth)
) {
if (*flagp & NEED_UTF8)
FAIL("panic: grok_bslash_N set NEED_UTF8");
RETURN_FAIL_ON_RESTART_FLAGP(flagp);
/* Here, it wasn't a single code point. Go close
* up this EXACTish node. The switch() prior to
* this switch handles the other cases */
RExC_parse = p = oldp;
goto loopdone;
}
p = RExC_parse;
RExC_parse = parse_start;
/* The \N{} means the pattern, if previously /d,
* becomes /u. That means it can't be an EXACTF node,
* but an EXACTFU */
if (node_type == EXACTF) {
node_type = EXACTFU;
/* If the node already contains something that
* differs between EXACTF and EXACTFU, reparse it
* as EXACTFU */
if (! maybe_exactfu) {
len = 0;
s = s0;
goto reparse;
}
}
break;
case 'r':
ender = '\r';
p++;
break;
case 't':
ender = '\t';
p++;
break;
case 'f':
ender = '\f';
p++;
break;
case 'e':
ender = ESC_NATIVE;
p++;
break;
case 'a':
ender = '\a';
p++;
break;
case 'o':
{
UV result;
const char* error_msg;
bool valid = grok_bslash_o(&p,
RExC_end,
&result,
&error_msg,
TO_OUTPUT_WARNINGS(p),
(bool) RExC_strict,
TRUE, /* Output warnings
for non-
portables */
UTF);
if (! valid) {
RExC_parse = p; /* going to die anyway; point
to exact spot of failure */
vFAIL(error_msg);
}
UPDATE_WARNINGS_LOC(p - 1);
ender = result;
break;
}
case 'x':
{
UV result = UV_MAX; /* initialize to erroneous
value */
const char* error_msg;
bool valid = grok_bslash_x(&p,
RExC_end,
&result,
&error_msg,
TO_OUTPUT_WARNINGS(p),
(bool) RExC_strict,
TRUE, /* Silence warnings
for non-
portables */
UTF);
if (! valid) {
RExC_parse = p; /* going to die anyway; point
to exact spot of failure */
vFAIL(error_msg);
}
UPDATE_WARNINGS_LOC(p - 1);
ender = result;
if (ender < 0x100) {
#ifdef EBCDIC
if (RExC_recode_x_to_native) {
ender = LATIN1_TO_NATIVE(ender);
}
#endif
}
break;
}
case 'c':
p++;
ender = grok_bslash_c(*p, TO_OUTPUT_WARNINGS(p));
UPDATE_WARNINGS_LOC(p);
p++;
break;
case '8': case '9': /* must be a backreference */
--p;
/* we have an escape like \8 which cannot be an octal escape
* so we exit the loop, and let the outer loop handle this
* escape which may or may not be a legitimate backref. */
goto loopdone;
case '1': case '2': case '3':case '4':
case '5': case '6': case '7':
/* When we parse backslash escapes there is ambiguity
* between backreferences and octal escapes. Any escape
* from \1 - \9 is a backreference, any multi-digit
* escape which does not start with 0 and which when
* evaluated as decimal could refer to an already
* parsed capture buffer is a back reference. Anything
* else is octal.
*
* Note this implies that \118 could be interpreted as
* 118 OR as "\11" . "8" depending on whether there
* were 118 capture buffers defined already in the
* pattern. */
/* NOTE, RExC_npar is 1 more than the actual number of
* parens we have seen so far, hence the "<" as opposed
* to "<=" */
if ( !isDIGIT(p[1]) || S_backref_value(p, RExC_end) < RExC_npar)
{ /* Not to be treated as an octal constant, go
find backref */
--p;
goto loopdone;
}
/* FALLTHROUGH */
case '0':
{
I32 flags = PERL_SCAN_SILENT_ILLDIGIT;
STRLEN numlen = 3;
ender = grok_oct(p, &numlen, &flags, NULL);
p += numlen;
if ( isDIGIT(*p) /* like \08, \178 */
&& ckWARN(WARN_REGEXP)
&& numlen < 3)
{
reg_warn_non_literal_string(
p + 1,
form_short_octal_warning(p, numlen));
}
}
break;
case '\0':
if (p >= RExC_end)
FAIL("Trailing \\");
/* FALLTHROUGH */
default:
if (isALPHANUMERIC(*p)) {
/* An alpha followed by '{' is going to fail next
* iteration, so don't output this warning in that
* case */
if (! isALPHA(*p) || *(p + 1) != '{') {
ckWARN2reg(p + 1, "Unrecognized escape \\%.1s"
" passed through", p);
}
}
goto normal_default;
} /* End of switch on '\' */
break;
case '{':
/* Trying to gain new uses for '{' without breaking too
* much existing code is hard. The solution currently
* adopted is:
* 1) If there is no ambiguity that a '{' should always
* be taken literally, at the start of a construct, we
* just do so.
* 2) If the literal '{' conflicts with our desired use
* of it as a metacharacter, we die. The deprecation
* cycles for this have come and gone.
* 3) If there is ambiguity, we raise a simple warning.
* This could happen, for example, if the user
* intended it to introduce a quantifier, but slightly
* misspelled the quantifier. Without this warning,
* the quantifier would silently be taken as a literal
* string of characters instead of a meta construct */
if (len || (p > RExC_start && isALPHA_A(*(p - 1)))) {
if ( RExC_strict
|| ( p > parse_start + 1
&& isALPHA_A(*(p - 1))
&& *(p - 2) == '\\')
|| new_regcurly(p, RExC_end))
{
RExC_parse = p + 1;
vFAIL("Unescaped left brace in regex is "
"illegal here");
}
ckWARNreg(p + 1, "Unescaped left brace in regex is"
" passed through");
}
goto normal_default;
case '}':
case ']':
if (p > RExC_parse && RExC_strict) {
ckWARN2reg(p + 1, "Unescaped literal '%c'", *p);
}
/*FALLTHROUGH*/
default: /* A literal character */
normal_default:
if (! UTF8_IS_INVARIANT(*p) && UTF) {
STRLEN numlen;
ender = utf8n_to_uvchr((U8*)p, RExC_end - p,
&numlen, UTF8_ALLOW_DEFAULT);
p += numlen;
}
else
ender = (U8) *p++;
break;
} /* End of switch on the literal */
/* Here, have looked at the literal character, and <ender>
* contains its ordinal; <p> points to the character after it.
* */
if (ender > 255) {
REQUIRE_UTF8(flagp);
}
/* We need to check if the next non-ignored thing is a
* quantifier. Move <p> to after anything that should be
* ignored, which, as a side effect, positions <p> for the next
* loop iteration */
skip_to_be_ignored_text(pRExC_state, &p,
FALSE /* Don't force to /x */ );
/* If the next thing is a quantifier, it applies to this
* character only, which means that this character has to be in
* its own node and can't just be appended to the string in an
* existing node, so if there are already other characters in
* the node, close the node with just them, and set up to do
* this character again next time through, when it will be the
* only thing in its new node */
next_is_quantifier = LIKELY(p < RExC_end)
&& UNLIKELY(ISMULT2(p));
if (next_is_quantifier && LIKELY(len)) {
p = oldp;
goto loopdone;
}
/* Ready to add 'ender' to the node */
if (! FOLD) { /* The simple case, just append the literal */
not_fold_common:
if (UVCHR_IS_INVARIANT(ender) || ! UTF) {
*(s++) = (char) ender;
}
else {
U8 * new_s = uvchr_to_utf8((U8*)s, ender);
added_len = (char *) new_s - s;
s = (char *) new_s;
if (ender > 255) {
requires_utf8_target = TRUE;
}
}
}
else if (LOC && is_PROBLEMATIC_LOCALE_FOLD_cp(ender)) {
/* Here are folding under /l, and the code point is
* problematic. If this is the first character in the
* node, change the node type to folding. Otherwise, if
* this is the first problematic character, close up the
* existing node, so can start a new node with this one */
if (! len) {
node_type = EXACTFL;
RExC_contains_locale = 1;
}
else if (node_type == EXACT) {
p = oldp;
goto loopdone;
}
/* This problematic code point means we can't simplify
* things */
maybe_exactfu = FALSE;
/* Here, we are adding a problematic fold character.
* "Problematic" in this context means that its fold isn't
* known until runtime. (The non-problematic code points
* are the above-Latin1 ones that fold to also all
* above-Latin1. Their folds don't vary no matter what the
* locale is.) But here we have characters whose fold
* depends on the locale. We just add in the unfolded
* character, and wait until runtime to fold it */
goto not_fold_common;
}
else /* regular fold; see if actually is in a fold */
if ( (ender < 256 && ! IS_IN_SOME_FOLD_L1(ender))
|| (ender > 255
&& ! _invlist_contains_cp(PL_in_some_fold, ender)))
{
/* Here, folding, but the character isn't in a fold.
*
* Start a new node if previous characters in the node were
* folded */
if (len && node_type != EXACT) {
p = oldp;
goto loopdone;
}
/* Here, continuing a node with non-folded characters. Add
* this one */
goto not_fold_common;
}
else { /* Here, does participate in some fold */
/* If this is the first character in the node, change its
* type to folding. Otherwise, if this is the first
* folding character in the node, close up the existing
* node, so can start a new node with this one. */
if (! len) {
node_type = compute_EXACTish(pRExC_state);
}
else if (node_type == EXACT) {
p = oldp;
goto loopdone;
}
if (UTF) { /* Use the folded value */
if (UVCHR_IS_INVARIANT(ender)) {
*(s)++ = (U8) toFOLD(ender);
}
else {
ender = _to_uni_fold_flags(
ender,
(U8 *) s,
&added_len,
FOLD_FLAGS_FULL | ((ASCII_FOLD_RESTRICTED)
? FOLD_FLAGS_NOMIX_ASCII
: 0));
s += added_len;
if ( ender > 255
&& LIKELY(ender != GREEK_SMALL_LETTER_MU))
{
/* U+B5 folds to the MU, so its possible for a
* non-UTF-8 target to match it */
requires_utf8_target = TRUE;
}
}
}
else {
/* Here is non-UTF8. First, see if the character's
* fold differs between /d and /u. */
if (PL_fold[ender] != PL_fold_latin1[ender]) {
maybe_exactfu = FALSE;
}
#if UNICODE_MAJOR_VERSION > 3 /* no multifolds in early Unicode */ \
|| (UNICODE_MAJOR_VERSION == 3 && ( UNICODE_DOT_VERSION > 0) \
|| UNICODE_DOT_DOT_VERSION > 0)
/* On non-ancient Unicode versions, this includes the
* multi-char fold SHARP S to 'ss' */
if ( UNLIKELY(ender == LATIN_SMALL_LETTER_SHARP_S)
|| ( isALPHA_FOLD_EQ(ender, 's')
&& len > 0
&& isALPHA_FOLD_EQ(*(s-1), 's')))
{
/* Here, we have one of the following:
* a) a SHARP S. This folds to 'ss' only under
* /u rules. If we are in that situation,
* fold the SHARP S to 'ss'. See the comments
* for join_exact() as to why we fold this
* non-UTF at compile time, and no others.
* b) 'ss'. When under /u, there's nothing
* special needed to be done here. The
* previous iteration handled the first 's',
* and this iteration will handle the second.
* If, on the otherhand it's not /u, we have
* to exclude the possibility of moving to /u,
* so that we won't generate an unwanted
* match, unless, at runtime, the target
* string is in UTF-8.
* */
has_ss = TRUE;
maybe_exactfu = FALSE; /* Can't generate an
EXACTFU node (unless we
already are in one) */
if (UNLIKELY(ender == LATIN_SMALL_LETTER_SHARP_S)) {
maybe_SIMPLE = 0;
if (node_type == EXACTFU) {
*(s++) = 's';
/* Let the code below add in the extra 's' */
ender = 's';
added_len = 2;
}
}
}
#endif
else if (UNLIKELY(ender == MICRO_SIGN)) {
has_micro_sign = TRUE;
}
*(s++) = (DEPENDS_SEMANTICS)
? (char) toFOLD(ender)
/* Under /u, the fold of any character in
* the 0-255 range happens to be its
* lowercase equivalent, except for LATIN
* SMALL LETTER SHARP S, which was handled
* above, and the MICRO SIGN, whose fold
* requires UTF-8 to represent. */
: (char) toLOWER_L1(ender);
}
} /* End of adding current character to the node */
len += added_len;
if (next_is_quantifier) {
/* Here, the next input is a quantifier, and to get here,
* the current character is the only one in the node. */
goto loopdone;
}
} /* End of loop through literal characters */
/* Here we have either exhausted the input or ran out of room in
* the node. (If we encountered a character that can't be in the
* node, transfer is made directly to <loopdone>, and so we
* wouldn't have fallen off the end of the loop.) In the latter
* case, we artificially have to split the node into two, because
* we just don't have enough space to hold everything. This
* creates a problem if the final character participates in a
* multi-character fold in the non-final position, as a match that
* should have occurred won't, due to the way nodes are matched,
* and our artificial boundary. So back off until we find a non-
* problematic character -- one that isn't at the beginning or
* middle of such a fold. (Either it doesn't participate in any
* folds, or appears only in the final position of all the folds it
* does participate in.) A better solution with far fewer false
* positives, and that would fill the nodes more completely, would
* be to actually have available all the multi-character folds to
* test against, and to back-off only far enough to be sure that
* this node isn't ending with a partial one. <upper_parse> is set
* further below (if we need to reparse the node) to include just
* up through that final non-problematic character that this code
* identifies, so when it is set to less than the full node, we can
* skip the rest of this */
if (FOLD && p < RExC_end && upper_parse == MAX_NODE_STRING_SIZE) {
PERL_UINT_FAST8_T backup_count = 0;
const STRLEN full_len = len;
assert(len >= MAX_NODE_STRING_SIZE);
/* Here, <s> points to just beyond where we have output the
* final character of the node. Look backwards through the
* string until find a non- problematic character */
if (! UTF) {
/* This has no multi-char folds to non-UTF characters */
if (ASCII_FOLD_RESTRICTED) {
goto loopdone;
}
while (--s >= s0 && IS_NON_FINAL_FOLD(*s)) {
backup_count++;
}
len = s - s0 + 1;
}
else {
/* Point to the first byte of the final character */
s = (char *) utf8_hop_back((U8 *) s, -1, (U8 *) s0);
while (s >= s0) { /* Search backwards until find
a non-problematic char */
if (UTF8_IS_INVARIANT(*s)) {
/* There are no ascii characters that participate
* in multi-char folds under /aa. In EBCDIC, the
* non-ascii invariants are all control characters,
* so don't ever participate in any folds. */
if (ASCII_FOLD_RESTRICTED
|| ! IS_NON_FINAL_FOLD(*s))
{
break;
}
}
else if (UTF8_IS_DOWNGRADEABLE_START(*s)) {
if (! IS_NON_FINAL_FOLD(EIGHT_BIT_UTF8_TO_NATIVE(
*s, *(s+1))))
{
break;
}
}
else if (! _invlist_contains_cp(
PL_NonFinalFold,
valid_utf8_to_uvchr((U8 *) s, NULL)))
{
break;
}
/* Here, the current character is problematic in that
* it does occur in the non-final position of some
* fold, so try the character before it, but have to
* special case the very first byte in the string, so
* we don't read outside the string */
s = (s == s0) ? s -1 : (char *) utf8_hop((U8 *) s, -1);
backup_count++;
} /* End of loop backwards through the string */
/* If there were only problematic characters in the string,
* <s> will point to before s0, in which case the length
* should be 0, otherwise include the length of the
* non-problematic character just found */
len = (s < s0) ? 0 : s - s0 + UTF8SKIP(s);
}
/* Here, have found the final character, if any, that is
* non-problematic as far as ending the node without splitting
* it across a potential multi-char fold. <len> contains the
* number of bytes in the node up-to and including that
* character, or is 0 if there is no such character, meaning
* the whole node contains only problematic characters. In
* this case, give up and just take the node as-is. We can't
* do any better */
if (len == 0) {
len = full_len;
} else {
/* Here, the node does contain some characters that aren't
* problematic. If we didn't have to backup any, then the
* final character in the node is non-problematic, and we
* can take the node as-is */
if (backup_count == 0) {
goto loopdone;
}
else if (backup_count == 1) {
/* If the final character is problematic, but the
* penultimate is not, back-off that last character to
* later start a new node with it */
p = oldp;
goto loopdone;
}
/* Here, the final non-problematic character is earlier
* in the input than the penultimate character. What we do
* is reparse from the beginning, going up only as far as
* this final ok one, thus guaranteeing that the node ends
* in an acceptable character. The reason we reparse is
* that we know how far in the character is, but we don't
* know how to correlate its position with the input parse.
* An alternate implementation would be to build that
* correlation as we go along during the original parse,
* but that would entail extra work for every node, whereas
* this code gets executed only when the string is too
* large for the node, and the final two characters are
* problematic, an infrequent occurrence. Yet another
* possible strategy would be to save the tail of the
* string, and the next time regatom is called, initialize
* with that. The problem with this is that unless you
* back off one more character, you won't be guaranteed
* regatom will get called again, unless regbranch,
* regpiece ... are also changed. If you do back off that
* extra character, so that there is input guaranteed to
* force calling regatom, you can't handle the case where
* just the first character in the node is acceptable. I
* (khw) decided to try this method which doesn't have that
* pitfall; if performance issues are found, we can do a
* combination of the current approach plus that one */
upper_parse = len;
len = 0;
s = s0;
goto reparse;
}
} /* End of verifying node ends with an appropriate char */
loopdone: /* Jumped to when encounters something that shouldn't be
in the node */
/* Free up any over-allocated space; cast is to silence bogus
* warning in MS VC */
change_engine_size(pRExC_state,
- (Ptrdiff_t) (initial_size - STR_SZ(len)));
/* I (khw) don't know if you can get here with zero length, but the
* old code handled this situation by creating a zero-length EXACT
* node. Might as well be NOTHING instead */
if (len == 0) {
OP(REGNODE_p(ret)) = NOTHING;
}
else {
/* If the node type is EXACT here, check to see if it
* should be EXACTL, or EXACT_ONLY8. */
if (node_type == EXACT) {
if (LOC) {
node_type = EXACTL;
}
else if (requires_utf8_target) {
node_type = EXACT_ONLY8;
}
} else if (FOLD) {
if ( UNLIKELY(has_micro_sign || has_ss)
&& (node_type == EXACTFU || ( node_type == EXACTF
&& maybe_exactfu)))
{ /* These two conditions are problematic in non-UTF-8
EXACTFU nodes. */
assert(! UTF);
node_type = EXACTFUP;
}
else if (node_type == EXACTFL) {
/* 'maybe_exactfu' is deliberately set above to
* indicate this node type, where all code points in it
* are above 255 */
if (maybe_exactfu) {
node_type = EXACTFLU8;
}
else if (UNLIKELY(
_invlist_contains_cp(PL_HasMultiCharFold, ender)))
{
/* A character that folds to more than one will
* match multiple characters, so can't be SIMPLE.
* We don't have to worry about this with EXACTFLU8
* nodes just above, as they have already been
* folded (since the fold doesn't vary at run
* time). Here, if the final character in the node
* folds to multiple, it can't be simple. (This
* only has an effect if the node has only a single
* character, hence the final one, as elsewhere we
* turn off simple for nodes whose length > 1 */
maybe_SIMPLE = 0;
}
}
else if (node_type == EXACTF) { /* Means is /di */
/* If 'maybe_exactfu' is clear, then we need to stay
* /di. If it is set, it means there are no code
* points that match differently depending on UTF8ness
* of the target string, so it can become an EXACTFU
* node */
if (! maybe_exactfu) {
RExC_seen_d_op = TRUE;
}
else if ( isALPHA_FOLD_EQ(* STRING(REGNODE_p(ret)), 's')
|| isALPHA_FOLD_EQ(ender, 's'))
{
/* But, if the node begins or ends in an 's' we
* have to defer changing it into an EXACTFU, as
* the node could later get joined with another one
* that ends or begins with 's' creating an 'ss'
* sequence which would then wrongly match the
* sharp s without the target being UTF-8. We
* create a special node that we resolve later when
* we join nodes together */
node_type = EXACTFU_S_EDGE;
}
else {
node_type = EXACTFU;
}
}
if (requires_utf8_target && node_type == EXACTFU) {
node_type = EXACTFU_ONLY8;
}
}
OP(REGNODE_p(ret)) = node_type;
STR_LEN(REGNODE_p(ret)) = len;
RExC_emit += STR_SZ(len);
/* If the node isn't a single character, it can't be SIMPLE */
if (len > (Size_t) ((UTF) ? UVCHR_SKIP(ender) : 1)) {
maybe_SIMPLE = 0;
}
*flagp |= HASWIDTH | maybe_SIMPLE;
}
Set_Node_Length(REGNODE_p(ret), p - parse_start - 1);
RExC_parse = p;
{
/* len is STRLEN which is unsigned, need to copy to signed */
IV iv = len;
if (iv < 0)
vFAIL("Internal disaster");
}
} /* End of label 'defchar:' */
break;
} /* End of giant switch on input character */
/* Position parse to next real character */
skip_to_be_ignored_text(pRExC_state, &RExC_parse,
FALSE /* Don't force to /x */ );
if ( *RExC_parse == '{'
&& OP(REGNODE_p(ret)) != SBOL && ! regcurly(RExC_parse))
{
if (RExC_strict || new_regcurly(RExC_parse, RExC_end)) {
RExC_parse++;
vFAIL("Unescaped left brace in regex is illegal here");
}
ckWARNreg(RExC_parse + 1, "Unescaped left brace in regex is"
" passed through");
}
return(ret);
}
STATIC void
S_populate_ANYOF_from_invlist(pTHX_ regnode *node, SV** invlist_ptr)
{
/* Uses the inversion list '*invlist_ptr' to populate the ANYOF 'node'. It
* sets up the bitmap and any flags, removing those code points from the
* inversion list, setting it to NULL should it become completely empty */
dVAR;
PERL_ARGS_ASSERT_POPULATE_ANYOF_FROM_INVLIST;
assert(PL_regkind[OP(node)] == ANYOF);
/* There is no bitmap for this node type */
if (OP(node) == ANYOFH) {
return;
}
ANYOF_BITMAP_ZERO(node);
if (*invlist_ptr) {
/* This gets set if we actually need to modify things */
bool change_invlist = FALSE;
UV start, end;
/* Start looking through *invlist_ptr */
invlist_iterinit(*invlist_ptr);
while (invlist_iternext(*invlist_ptr, &start, &end)) {
UV high;
int i;
if (end == UV_MAX && start <= NUM_ANYOF_CODE_POINTS) {
ANYOF_FLAGS(node) |= ANYOF_MATCHES_ALL_ABOVE_BITMAP;
}
/* Quit if are above what we should change */
if (start >= NUM_ANYOF_CODE_POINTS) {
break;
}
change_invlist = TRUE;
/* Set all the bits in the range, up to the max that we are doing */
high = (end < NUM_ANYOF_CODE_POINTS - 1)
? end
: NUM_ANYOF_CODE_POINTS - 1;
for (i = start; i <= (int) high; i++) {
if (! ANYOF_BITMAP_TEST(node, i)) {
ANYOF_BITMAP_SET(node, i);
}
}
}
invlist_iterfinish(*invlist_ptr);
/* Done with loop; remove any code points that are in the bitmap from
* *invlist_ptr; similarly for code points above the bitmap if we have
* a flag to match all of them anyways */
if (change_invlist) {
_invlist_subtract(*invlist_ptr, PL_InBitmap, invlist_ptr);
}
if (ANYOF_FLAGS(node) & ANYOF_MATCHES_ALL_ABOVE_BITMAP) {
_invlist_intersection(*invlist_ptr, PL_InBitmap, invlist_ptr);
}
/* If have completely emptied it, remove it completely */
if (_invlist_len(*invlist_ptr) == 0) {
SvREFCNT_dec_NN(*invlist_ptr);
*invlist_ptr = NULL;
}
}
}
/* Parse POSIX character classes: [[:foo:]], [[=foo=]], [[.foo.]].
Character classes ([:foo:]) can also be negated ([:^foo:]).
Returns a named class id (ANYOF_XXX) if successful, -1 otherwise.
Equivalence classes ([=foo=]) and composites ([.foo.]) are parsed,
but trigger failures because they are currently unimplemented. */
#define POSIXCC_DONE(c) ((c) == ':')
#define POSIXCC_NOTYET(c) ((c) == '=' || (c) == '.')
#define POSIXCC(c) (POSIXCC_DONE(c) || POSIXCC_NOTYET(c))
#define MAYBE_POSIXCC(c) (POSIXCC(c) || (c) == '^' || (c) == ';')
#define WARNING_PREFIX "Assuming NOT a POSIX class since "
#define NO_BLANKS_POSIX_WARNING "no blanks are allowed in one"
#define SEMI_COLON_POSIX_WARNING "a semi-colon was found instead of a colon"
#define NOT_MEANT_TO_BE_A_POSIX_CLASS (OOB_NAMEDCLASS - 1)
/* 'posix_warnings' and 'warn_text' are names of variables in the following
* routine. q.v. */
#define ADD_POSIX_WARNING(p, text) STMT_START { \
if (posix_warnings) { \
if (! RExC_warn_text ) RExC_warn_text = \
(AV *) sv_2mortal((SV *) newAV()); \
av_push(RExC_warn_text, Perl_newSVpvf(aTHX_ \
WARNING_PREFIX \
text \
REPORT_LOCATION, \
REPORT_LOCATION_ARGS(p))); \
} \
} STMT_END
#define CLEAR_POSIX_WARNINGS() \
STMT_START { \
if (posix_warnings && RExC_warn_text) \
av_clear(RExC_warn_text); \
} STMT_END
#define CLEAR_POSIX_WARNINGS_AND_RETURN(ret) \
STMT_START { \
CLEAR_POSIX_WARNINGS(); \
return ret; \
} STMT_END
STATIC int
S_handle_possible_posix(pTHX_ RExC_state_t *pRExC_state,
const char * const s, /* Where the putative posix class begins.
Normally, this is one past the '['. This
parameter exists so it can be somewhere
besides RExC_parse. */
char ** updated_parse_ptr, /* Where to set the updated parse pointer, or
NULL */
AV ** posix_warnings, /* Where to place any generated warnings, or
NULL */
const bool check_only /* Don't die if error */
)
{
/* This parses what the caller thinks may be one of the three POSIX
* constructs:
* 1) a character class, like [:blank:]
* 2) a collating symbol, like [. .]
* 3) an equivalence class, like [= =]
* In the latter two cases, it croaks if it finds a syntactically legal
* one, as these are not handled by Perl.
*
* The main purpose is to look for a POSIX character class. It returns:
* a) the class number
* if it is a completely syntactically and semantically legal class.
* 'updated_parse_ptr', if not NULL, is set to point to just after the
* closing ']' of the class
* b) OOB_NAMEDCLASS
* if it appears that one of the three POSIX constructs was meant, but
* its specification was somehow defective. 'updated_parse_ptr', if
* not NULL, is set to point to the character just after the end
* character of the class. See below for handling of warnings.
* c) NOT_MEANT_TO_BE_A_POSIX_CLASS
* if it doesn't appear that a POSIX construct was intended.
* 'updated_parse_ptr' is not changed. No warnings nor errors are
* raised.
*
* In b) there may be errors or warnings generated. If 'check_only' is
* TRUE, then any errors are discarded. Warnings are returned to the
* caller via an AV* created into '*posix_warnings' if it is not NULL. If
* instead it is NULL, warnings are suppressed.
*
* The reason for this function, and its complexity is that a bracketed
* character class can contain just about anything. But it's easy to
* mistype the very specific posix class syntax but yielding a valid
* regular bracketed class, so it silently gets compiled into something
* quite unintended.
*
* The solution adopted here maintains backward compatibility except that
* it adds a warning if it looks like a posix class was intended but
* improperly specified. The warning is not raised unless what is input
* very closely resembles one of the 14 legal posix classes. To do this,
* it uses fuzzy parsing. It calculates how many single-character edits it
* would take to transform what was input into a legal posix class. Only
* if that number is quite small does it think that the intention was a
* posix class. Obviously these are heuristics, and there will be cases
* where it errs on one side or another, and they can be tweaked as
* experience informs.
*
* The syntax for a legal posix class is:
*
* qr/(?xa: \[ : \^? [[:lower:]]{4,6} : \] )/
*
* What this routine considers syntactically to be an intended posix class
* is this (the comments indicate some restrictions that the pattern
* doesn't show):
*
* qr/(?x: \[? # The left bracket, possibly
* # omitted
* \h* # possibly followed by blanks
* (?: \^ \h* )? # possibly a misplaced caret
* [:;]? # The opening class character,
* # possibly omitted. A typo
* # semi-colon can also be used.
* \h*
* \^? # possibly a correctly placed
* # caret, but not if there was also
* # a misplaced one
* \h*
* .{3,15} # The class name. If there are
* # deviations from the legal syntax,
* # its edit distance must be close
* # to a real class name in order
* # for it to be considered to be
* # an intended posix class.
* \h*
* [[:punct:]]? # The closing class character,
* # possibly omitted. If not a colon
* # nor semi colon, the class name
* # must be even closer to a valid
* # one
* \h*
* \]? # The right bracket, possibly
* # omitted.
* )/
*
* In the above, \h must be ASCII-only.
*
* These are heuristics, and can be tweaked as field experience dictates.
* There will be cases when someone didn't intend to specify a posix class
* that this warns as being so. The goal is to minimize these, while
* maximizing the catching of things intended to be a posix class that
* aren't parsed as such.
*/
const char* p = s;
const char * const e = RExC_end;
unsigned complement = 0; /* If to complement the class */
bool found_problem = FALSE; /* Assume OK until proven otherwise */
bool has_opening_bracket = FALSE;
bool has_opening_colon = FALSE;
int class_number = OOB_NAMEDCLASS; /* Out-of-bounds until find
valid class */
const char * possible_end = NULL; /* used for a 2nd parse pass */
const char* name_start; /* ptr to class name first char */
/* If the number of single-character typos the input name is away from a
* legal name is no more than this number, it is considered to have meant
* the legal name */
int max_distance = 2;
/* to store the name. The size determines the maximum length before we
* decide that no posix class was intended. Should be at least
* sizeof("alphanumeric") */
UV input_text[15];
STATIC_ASSERT_DECL(C_ARRAY_LENGTH(input_text) >= sizeof "alphanumeric");
PERL_ARGS_ASSERT_HANDLE_POSSIBLE_POSIX;
CLEAR_POSIX_WARNINGS();
if (p >= e) {
return NOT_MEANT_TO_BE_A_POSIX_CLASS;
}
if (*(p - 1) != '[') {
ADD_POSIX_WARNING(p, "it doesn't start with a '['");
found_problem = TRUE;
}
else {
has_opening_bracket = TRUE;
}
/* They could be confused and think you can put spaces between the
* components */
if (isBLANK(*p)) {
found_problem = TRUE;
do {
p++;
} while (p < e && isBLANK(*p));
ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING);
}
/* For [. .] and [= =]. These are quite different internally from [: :],
* so they are handled separately. */
if (POSIXCC_NOTYET(*p) && p < e - 3) /* 1 for the close, and 1 for the ']'
and 1 for at least one char in it
*/
{
const char open_char = *p;
const char * temp_ptr = p + 1;
/* These two constructs are not handled by perl, and if we find a
* syntactically valid one, we croak. khw, who wrote this code, finds
* this explanation of them very unclear:
* http://pubs.opengroup.org/onlinepubs/009696899/basedefs/xbd_chap09.html
* And searching the rest of the internet wasn't very helpful either.
* It looks like just about any byte can be in these constructs,
* depending on the locale. But unless the pattern is being compiled
* under /l, which is very rare, Perl runs under the C or POSIX locale.
* In that case, it looks like [= =] isn't allowed at all, and that
* [. .] could be any single code point, but for longer strings the
* constituent characters would have to be the ASCII alphabetics plus
* the minus-hyphen. Any sensible locale definition would limit itself
* to these. And any portable one definitely should. Trying to parse
* the general case is a nightmare (see [perl #127604]). So, this code
* looks only for interiors of these constructs that match:
* qr/.|[-\w]{2,}/
* Using \w relaxes the apparent rules a little, without adding much
* danger of mistaking something else for one of these constructs.
*
* [. .] in some implementations described on the internet is usable to
* escape a character that otherwise is special in bracketed character
* classes. For example [.].] means a literal right bracket instead of
* the ending of the class
*
* [= =] can legitimately contain a [. .] construct, but we don't
* handle this case, as that [. .] construct will later get parsed
* itself and croak then. And [= =] is checked for even when not under
* /l, as Perl has long done so.
*
* The code below relies on there being a trailing NUL, so it doesn't
* have to keep checking if the parse ptr < e.
*/
if (temp_ptr[1] == open_char) {
temp_ptr++;
}
else while ( temp_ptr < e
&& (isWORDCHAR(*temp_ptr) || *temp_ptr == '-'))
{
temp_ptr++;
}
if (*temp_ptr == open_char) {
temp_ptr++;
if (*temp_ptr == ']') {
temp_ptr++;
if (! found_problem && ! check_only) {
RExC_parse = (char *) temp_ptr;
vFAIL3("POSIX syntax [%c %c] is reserved for future "
"extensions", open_char, open_char);
}
/* Here, the syntax wasn't completely valid, or else the call
* is to check-only */
if (updated_parse_ptr) {
*updated_parse_ptr = (char *) temp_ptr;
}
CLEAR_POSIX_WARNINGS_AND_RETURN(OOB_NAMEDCLASS);
}
}
/* If we find something that started out to look like one of these
* constructs, but isn't, we continue below so that it can be checked
* for being a class name with a typo of '.' or '=' instead of a colon.
* */
}
/* Here, we think there is a possibility that a [: :] class was meant, and
* we have the first real character. It could be they think the '^' comes
* first */
if (*p == '^') {
found_problem = TRUE;
ADD_POSIX_WARNING(p + 1, "the '^' must come after the colon");
complement = 1;
p++;
if (isBLANK(*p)) {
found_problem = TRUE;
do {
p++;
} while (p < e && isBLANK(*p));
ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING);
}
}
/* But the first character should be a colon, which they could have easily
* mistyped on a qwerty keyboard as a semi-colon (and which may be hard to
* distinguish from a colon, so treat that as a colon). */
if (*p == ':') {
p++;
has_opening_colon = TRUE;
}
else if (*p == ';') {
found_problem = TRUE;
p++;
ADD_POSIX_WARNING(p, SEMI_COLON_POSIX_WARNING);
has_opening_colon = TRUE;
}
else {
found_problem = TRUE;
ADD_POSIX_WARNING(p, "there must be a starting ':'");
/* Consider an initial punctuation (not one of the recognized ones) to
* be a left terminator */
if (*p != '^' && *p != ']' && isPUNCT(*p)) {
p++;
}
}
/* They may think that you can put spaces between the components */
if (isBLANK(*p)) {
found_problem = TRUE;
do {
p++;
} while (p < e && isBLANK(*p));
ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING);
}
if (*p == '^') {
/* We consider something like [^:^alnum:]] to not have been intended to
* be a posix class, but XXX maybe we should */
if (complement) {
CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS);
}
complement = 1;
p++;
}
/* Again, they may think that you can put spaces between the components */
if (isBLANK(*p)) {
found_problem = TRUE;
do {
p++;
} while (p < e && isBLANK(*p));
ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING);
}
if (*p == ']') {
/* XXX This ']' may be a typo, and something else was meant. But
* treating it as such creates enough complications, that that
* possibility isn't currently considered here. So we assume that the
* ']' is what is intended, and if we've already found an initial '[',
* this leaves this construct looking like [:] or [:^], which almost
* certainly weren't intended to be posix classes */
if (has_opening_bracket) {
CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS);
}
/* But this function can be called when we parse the colon for
* something like qr/[alpha:]]/, so we back up to look for the
* beginning */
p--;
if (*p == ';') {
found_problem = TRUE;
ADD_POSIX_WARNING(p, SEMI_COLON_POSIX_WARNING);
}
else if (*p != ':') {
/* XXX We are currently very restrictive here, so this code doesn't
* consider the possibility that, say, /[alpha.]]/ was intended to
* be a posix class. */
CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS);
}
/* Here we have something like 'foo:]'. There was no initial colon,
* and we back up over 'foo. XXX Unlike the going forward case, we
* don't handle typos of non-word chars in the middle */
has_opening_colon = FALSE;
p--;
while (p > RExC_start && isWORDCHAR(*p)) {
p--;
}
p++;
/* Here, we have positioned ourselves to where we think the first
* character in the potential class is */
}
/* Now the interior really starts. There are certain key characters that
* can end the interior, or these could just be typos. To catch both
* cases, we may have to do two passes. In the first pass, we keep on
* going unless we come to a sequence that matches
* qr/ [[:punct:]] [[:blank:]]* \] /xa
* This means it takes a sequence to end the pass, so two typos in a row if
* that wasn't what was intended. If the class is perfectly formed, just
* this one pass is needed. We also stop if there are too many characters
* being accumulated, but this number is deliberately set higher than any
* real class. It is set high enough so that someone who thinks that
* 'alphanumeric' is a correct name would get warned that it wasn't.
* While doing the pass, we keep track of where the key characters were in
* it. If we don't find an end to the class, and one of the key characters
* was found, we redo the pass, but stop when we get to that character.
* Thus the key character was considered a typo in the first pass, but a
* terminator in the second. If two key characters are found, we stop at
* the second one in the first pass. Again this can miss two typos, but
* catches a single one
*
* In the first pass, 'possible_end' starts as NULL, and then gets set to
* point to the first key character. For the second pass, it starts as -1.
* */
name_start = p;
parse_name:
{
bool has_blank = FALSE;
bool has_upper = FALSE;
bool has_terminating_colon = FALSE;
bool has_terminating_bracket = FALSE;
bool has_semi_colon = FALSE;
unsigned int name_len = 0;
int punct_count = 0;
while (p < e) {
/* Squeeze out blanks when looking up the class name below */
if (isBLANK(*p) ) {
has_blank = TRUE;
found_problem = TRUE;
p++;
continue;
}
/* The name will end with a punctuation */
if (isPUNCT(*p)) {
const char * peek = p + 1;
/* Treat any non-']' punctuation followed by a ']' (possibly
* with intervening blanks) as trying to terminate the class.
* ']]' is very likely to mean a class was intended (but
* missing the colon), but the warning message that gets
* generated shows the error position better if we exit the
* loop at the bottom (eventually), so skip it here. */
if (*p != ']') {
if (peek < e && isBLANK(*peek)) {
has_blank = TRUE;
found_problem = TRUE;
do {
peek++;
} while (peek < e && isBLANK(*peek));
}
if (peek < e && *peek == ']') {
has_terminating_bracket = TRUE;
if (*p == ':') {
has_terminating_colon = TRUE;
}
else if (*p == ';') {
has_semi_colon = TRUE;
has_terminating_colon = TRUE;
}
else {
found_problem = TRUE;
}
p = peek + 1;
goto try_posix;
}
}
/* Here we have punctuation we thought didn't end the class.
* Keep track of the position of the key characters that are
* more likely to have been class-enders */
if (*p == ']' || *p == '[' || *p == ':' || *p == ';') {
/* Allow just one such possible class-ender not actually
* ending the class. */
if (possible_end) {
break;
}
possible_end = p;
}
/* If we have too many punctuation characters, no use in
* keeping going */
if (++punct_count > max_distance) {
break;
}
/* Treat the punctuation as a typo. */
input_text[name_len++] = *p;
p++;
}
else if (isUPPER(*p)) { /* Use lowercase for lookup */
input_text[name_len++] = toLOWER(*p);
has_upper = TRUE;
found_problem = TRUE;
p++;
} else if (! UTF || UTF8_IS_INVARIANT(*p)) {
input_text[name_len++] = *p;
p++;
}
else {
input_text[name_len++] = utf8_to_uvchr_buf((U8 *) p, e, NULL);
p+= UTF8SKIP(p);
}
/* The declaration of 'input_text' is how long we allow a potential
* class name to be, before saying they didn't mean a class name at
* all */
if (name_len >= C_ARRAY_LENGTH(input_text)) {
break;
}
}
/* We get to here when the possible class name hasn't been properly
* terminated before:
* 1) we ran off the end of the pattern; or
* 2) found two characters, each of which might have been intended to
* be the name's terminator
* 3) found so many punctuation characters in the purported name,
* that the edit distance to a valid one is exceeded
* 4) we decided it was more characters than anyone could have
* intended to be one. */
found_problem = TRUE;
/* In the final two cases, we know that looking up what we've
* accumulated won't lead to a match, even a fuzzy one. */
if ( name_len >= C_ARRAY_LENGTH(input_text)
|| punct_count > max_distance)
{
/* If there was an intermediate key character that could have been
* an intended end, redo the parse, but stop there */
if (possible_end && possible_end != (char *) -1) {
possible_end = (char *) -1; /* Special signal value to say
we've done a first pass */
p = name_start;
goto parse_name;
}
/* Otherwise, it can't have meant to have been a class */
CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS);
}
/* If we ran off the end, and the final character was a punctuation
* one, back up one, to look at that final one just below. Later, we
* will restore the parse pointer if appropriate */
if (name_len && p == e && isPUNCT(*(p-1))) {
p--;
name_len--;
}
if (p < e && isPUNCT(*p)) {
if (*p == ']') {
has_terminating_bracket = TRUE;
/* If this is a 2nd ']', and the first one is just below this
* one, consider that to be the real terminator. This gives a
* uniform and better positioning for the warning message */
if ( possible_end
&& possible_end != (char *) -1
&& *possible_end == ']'
&& name_len && input_text[name_len - 1] == ']')
{
name_len--;
p = possible_end;
/* And this is actually equivalent to having done the 2nd
* pass now, so set it to not try again */
possible_end = (char *) -1;
}
}
else {
if (*p == ':') {
has_terminating_colon = TRUE;
}
else if (*p == ';') {
has_semi_colon = TRUE;
has_terminating_colon = TRUE;
}
p++;
}
}
try_posix:
/* Here, we have a class name to look up. We can short circuit the
* stuff below for short names that can't possibly be meant to be a
* class name. (We can do this on the first pass, as any second pass
* will yield an even shorter name) */
if (name_len < 3) {
CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS);
}
/* Find which class it is. Initially switch on the length of the name.
* */
switch (name_len) {
case 4:
if (memEQs(name_start, 4, "word")) {
/* this is not POSIX, this is the Perl \w */
class_number = ANYOF_WORDCHAR;
}
break;
case 5:
/* Names all of length 5: alnum alpha ascii blank cntrl digit
* graph lower print punct space upper
* Offset 4 gives the best switch position. */
switch (name_start[4]) {
case 'a':
if (memBEGINs(name_start, 5, "alph")) /* alpha */
class_number = ANYOF_ALPHA;
break;
case 'e':
if (memBEGINs(name_start, 5, "spac")) /* space */
class_number = ANYOF_SPACE;
break;
case 'h':
if (memBEGINs(name_start, 5, "grap")) /* graph */
class_number = ANYOF_GRAPH;
break;
case 'i':
if (memBEGINs(name_start, 5, "asci")) /* ascii */
class_number = ANYOF_ASCII;
break;
case 'k':
if (memBEGINs(name_start, 5, "blan")) /* blank */
class_number = ANYOF_BLANK;
break;
case 'l':
if (memBEGINs(name_start, 5, "cntr")) /* cntrl */
class_number = ANYOF_CNTRL;
break;
case 'm':
if (memBEGINs(name_start, 5, "alnu")) /* alnum */
class_number = ANYOF_ALPHANUMERIC;
break;
case 'r':
if (memBEGINs(name_start, 5, "lowe")) /* lower */
class_number = (FOLD) ? ANYOF_CASED : ANYOF_LOWER;
else if (memBEGINs(name_start, 5, "uppe")) /* upper */
class_number = (FOLD) ? ANYOF_CASED : ANYOF_UPPER;
break;
case 't':
if (memBEGINs(name_start, 5, "digi")) /* digit */
class_number = ANYOF_DIGIT;
else if (memBEGINs(name_start, 5, "prin")) /* print */
class_number = ANYOF_PRINT;
else if (memBEGINs(name_start, 5, "punc")) /* punct */
class_number = ANYOF_PUNCT;
break;
}
break;
case 6:
if (memEQs(name_start, 6, "xdigit"))
class_number = ANYOF_XDIGIT;
break;
}
/* If the name exactly matches a posix class name the class number will
* here be set to it, and the input almost certainly was meant to be a
* posix class, so we can skip further checking. If instead the syntax
* is exactly correct, but the name isn't one of the legal ones, we
* will return that as an error below. But if neither of these apply,
* it could be that no posix class was intended at all, or that one
* was, but there was a typo. We tease these apart by doing fuzzy
* matching on the name */
if (class_number == OOB_NAMEDCLASS && found_problem) {
const UV posix_names[][6] = {
{ 'a', 'l', 'n', 'u', 'm' },
{ 'a', 'l', 'p', 'h', 'a' },
{ 'a', 's', 'c', 'i', 'i' },
{ 'b', 'l', 'a', 'n', 'k' },
{ 'c', 'n', 't', 'r', 'l' },
{ 'd', 'i', 'g', 'i', 't' },
{ 'g', 'r', 'a', 'p', 'h' },
{ 'l', 'o', 'w', 'e', 'r' },
{ 'p', 'r', 'i', 'n', 't' },
{ 'p', 'u', 'n', 'c', 't' },
{ 's', 'p', 'a', 'c', 'e' },
{ 'u', 'p', 'p', 'e', 'r' },
{ 'w', 'o', 'r', 'd' },
{ 'x', 'd', 'i', 'g', 'i', 't' }
};
/* The names of the above all have added NULs to make them the same
* size, so we need to also have the real lengths */
const UV posix_name_lengths[] = {
sizeof("alnum") - 1,
sizeof("alpha") - 1,
sizeof("ascii") - 1,
sizeof("blank") - 1,
sizeof("cntrl") - 1,
sizeof("digit") - 1,
sizeof("graph") - 1,
sizeof("lower") - 1,
sizeof("print") - 1,
sizeof("punct") - 1,
sizeof("space") - 1,
sizeof("upper") - 1,
sizeof("word") - 1,
sizeof("xdigit")- 1
};
unsigned int i;
int temp_max = max_distance; /* Use a temporary, so if we
reparse, we haven't changed the
outer one */
/* Use a smaller max edit distance if we are missing one of the
* delimiters */
if ( has_opening_bracket + has_opening_colon < 2
|| has_terminating_bracket + has_terminating_colon < 2)
{
temp_max--;
}
/* See if the input name is close to a legal one */
for (i = 0; i < C_ARRAY_LENGTH(posix_names); i++) {
/* Short circuit call if the lengths are too far apart to be
* able to match */
if (abs( (int) (name_len - posix_name_lengths[i]))
> temp_max)
{
continue;
}
if (edit_distance(input_text,
posix_names[i],
name_len,
posix_name_lengths[i],
temp_max
)
> -1)
{ /* If it is close, it probably was intended to be a class */
goto probably_meant_to_be;
}
}
/* Here the input name is not close enough to a valid class name
* for us to consider it to be intended to be a posix class. If
* we haven't already done so, and the parse found a character that
* could have been terminators for the name, but which we absorbed
* as typos during the first pass, repeat the parse, signalling it
* to stop at that character */
if (possible_end && possible_end != (char *) -1) {
possible_end = (char *) -1;
p = name_start;
goto parse_name;
}
/* Here neither pass found a close-enough class name */
CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS);
}
probably_meant_to_be:
/* Here we think that a posix specification was intended. Update any
* parse pointer */
if (updated_parse_ptr) {
*updated_parse_ptr = (char *) p;
}
/* If a posix class name was intended but incorrectly specified, we
* output or return the warnings */
if (found_problem) {
/* We set flags for these issues in the parse loop above instead of
* adding them to the list of warnings, because we can parse it
* twice, and we only want one warning instance */
if (has_upper) {
ADD_POSIX_WARNING(p, "the name must be all lowercase letters");
}
if (has_blank) {
ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING);
}
if (has_semi_colon) {
ADD_POSIX_WARNING(p, SEMI_COLON_POSIX_WARNING);
}
else if (! has_terminating_colon) {
ADD_POSIX_WARNING(p, "there is no terminating ':'");
}
if (! has_terminating_bracket) {
ADD_POSIX_WARNING(p, "there is no terminating ']'");
}
if ( posix_warnings
&& RExC_warn_text
&& av_top_index(RExC_warn_text) > -1)
{
*posix_warnings = RExC_warn_text;
}
}
else if (class_number != OOB_NAMEDCLASS) {
/* If it is a known class, return the class. The class number
* #defines are structured so each complement is +1 to the normal
* one */
CLEAR_POSIX_WARNINGS_AND_RETURN(class_number + complement);
}
else if (! check_only) {
/* Here, it is an unrecognized class. This is an error (unless the
* call is to check only, which we've already handled above) */
const char * const complement_string = (complement)
? "^"
: "";
RExC_parse = (char *) p;
vFAIL3utf8f("POSIX class [:%s%" UTF8f ":] unknown",
complement_string,
UTF8fARG(UTF, RExC_parse - name_start - 2, name_start));
}
}
return OOB_NAMEDCLASS;
}
#undef ADD_POSIX_WARNING
STATIC unsigned int
S_regex_set_precedence(const U8 my_operator) {
/* Returns the precedence in the (?[...]) construct of the input operator,
* specified by its character representation. The precedence follows
* general Perl rules, but it extends this so that ')' and ']' have (low)
* precedence even though they aren't really operators */
switch (my_operator) {
case '!':
return 5;
case '&':
return 4;
case '^':
case '|':
case '+':
case '-':
return 3;
case ')':
return 2;
case ']':
return 1;
}
NOT_REACHED; /* NOTREACHED */
return 0; /* Silence compiler warning */
}
STATIC regnode_offset
S_handle_regex_sets(pTHX_ RExC_state_t *pRExC_state, SV** return_invlist,
I32 *flagp, U32 depth,
char * const oregcomp_parse)
{
/* Handle the (?[...]) construct to do set operations */
U8 curchar; /* Current character being parsed */
UV start, end; /* End points of code point ranges */
SV* final = NULL; /* The end result inversion list */
SV* result_string; /* 'final' stringified */
AV* stack; /* stack of operators and operands not yet
resolved */
AV* fence_stack = NULL; /* A stack containing the positions in
'stack' of where the undealt-with left
parens would be if they were actually
put there */
/* The 'volatile' is a workaround for an optimiser bug
* in Solaris Studio 12.3. See RT #127455 */
volatile IV fence = 0; /* Position of where most recent undealt-
with left paren in stack is; -1 if none.
*/
STRLEN len; /* Temporary */
regnode_offset node; /* Temporary, and final regnode returned by
this function */
const bool save_fold = FOLD; /* Temporary */
char *save_end, *save_parse; /* Temporaries */
const bool in_locale = LOC; /* we turn off /l during processing */
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_HANDLE_REGEX_SETS;
DEBUG_PARSE("xcls");
if (in_locale) {
set_regex_charset(&RExC_flags, REGEX_UNICODE_CHARSET);
}
/* The use of this operator implies /u. This is required so that the
* compile time values are valid in all runtime cases */
REQUIRE_UNI_RULES(flagp, 0);
ckWARNexperimental(RExC_parse,
WARN_EXPERIMENTAL__REGEX_SETS,
"The regex_sets feature is experimental");
/* Everything in this construct is a metacharacter. Operands begin with
* either a '\' (for an escape sequence), or a '[' for a bracketed
* character class. Any other character should be an operator, or
* parenthesis for grouping. Both types of operands are handled by calling
* regclass() to parse them. It is called with a parameter to indicate to
* return the computed inversion list. The parsing here is implemented via
* a stack. Each entry on the stack is a single character representing one
* of the operators; or else a pointer to an operand inversion list. */
#define IS_OPERATOR(a) SvIOK(a)
#define IS_OPERAND(a) (! IS_OPERATOR(a))
/* The stack is kept in Łukasiewicz order. (That's pronounced similar
* to luke-a-shave-itch (or -itz), but people who didn't want to bother
* with pronouncing it called it Reverse Polish instead, but now that YOU
* know how to pronounce it you can use the correct term, thus giving due
* credit to the person who invented it, and impressing your geek friends.
* Wikipedia says that the pronounciation of "Ł" has been changing so that
* it is now more like an English initial W (as in wonk) than an L.)
*
* This means that, for example, 'a | b & c' is stored on the stack as
*
* c [4]
* b [3]
* & [2]
* a [1]
* | [0]
*
* where the numbers in brackets give the stack [array] element number.
* In this implementation, parentheses are not stored on the stack.
* Instead a '(' creates a "fence" so that the part of the stack below the
* fence is invisible except to the corresponding ')' (this allows us to
* replace testing for parens, by using instead subtraction of the fence
* position). As new operands are processed they are pushed onto the stack
* (except as noted in the next paragraph). New operators of higher
* precedence than the current final one are inserted on the stack before
* the lhs operand (so that when the rhs is pushed next, everything will be
* in the correct positions shown above. When an operator of equal or
* lower precedence is encountered in parsing, all the stacked operations
* of equal or higher precedence are evaluated, leaving the result as the
* top entry on the stack. This makes higher precedence operations
* evaluate before lower precedence ones, and causes operations of equal
* precedence to left associate.
*
* The only unary operator '!' is immediately pushed onto the stack when
* encountered. When an operand is encountered, if the top of the stack is
* a '!", the complement is immediately performed, and the '!' popped. The
* resulting value is treated as a new operand, and the logic in the
* previous paragraph is executed. Thus in the expression
* [a] + ! [b]
* the stack looks like
*
* !
* a
* +
*
* as 'b' gets parsed, the latter gets evaluated to '!b', and the stack
* becomes
*
* !b
* a
* +
*
* A ')' is treated as an operator with lower precedence than all the
* aforementioned ones, which causes all operations on the stack above the
* corresponding '(' to be evaluated down to a single resultant operand.
* Then the fence for the '(' is removed, and the operand goes through the
* algorithm above, without the fence.
*
* A separate stack is kept of the fence positions, so that the position of
* the latest so-far unbalanced '(' is at the top of it.
*
* The ']' ending the construct is treated as the lowest operator of all,
* so that everything gets evaluated down to a single operand, which is the
* result */
sv_2mortal((SV *)(stack = newAV()));
sv_2mortal((SV *)(fence_stack = newAV()));
while (RExC_parse < RExC_end) {
I32 top_index; /* Index of top-most element in 'stack' */
SV** top_ptr; /* Pointer to top 'stack' element */
SV* current = NULL; /* To contain the current inversion list
operand */
SV* only_to_avoid_leaks;
skip_to_be_ignored_text(pRExC_state, &RExC_parse,
TRUE /* Force /x */ );
if (RExC_parse >= RExC_end) { /* Fail */
break;
}
curchar = UCHARAT(RExC_parse);
redo_curchar:
#ifdef ENABLE_REGEX_SETS_DEBUGGING
/* Enable with -Accflags=-DENABLE_REGEX_SETS_DEBUGGING */
DEBUG_U(dump_regex_sets_structures(pRExC_state,
stack, fence, fence_stack));
#endif
top_index = av_tindex_skip_len_mg(stack);
switch (curchar) {
SV** stacked_ptr; /* Ptr to something already on 'stack' */
char stacked_operator; /* The topmost operator on the 'stack'. */
SV* lhs; /* Operand to the left of the operator */
SV* rhs; /* Operand to the right of the operator */
SV* fence_ptr; /* Pointer to top element of the fence
stack */
case '(':
if ( RExC_parse < RExC_end - 2
&& UCHARAT(RExC_parse + 1) == '?'
&& UCHARAT(RExC_parse + 2) == '^')
{
/* If is a '(?', could be an embedded '(?^flags:(?[...])'.
* This happens when we have some thing like
*
* my $thai_or_lao = qr/(?[ \p{Thai} + \p{Lao} ])/;
* ...
* qr/(?[ \p{Digit} & $thai_or_lao ])/;
*
* Here we would be handling the interpolated
* '$thai_or_lao'. We handle this by a recursive call to
* ourselves which returns the inversion list the
* interpolated expression evaluates to. We use the flags
* from the interpolated pattern. */
U32 save_flags = RExC_flags;
const char * save_parse;
RExC_parse += 2; /* Skip past the '(?' */
save_parse = RExC_parse;
/* Parse the flags for the '(?'. We already know the first
* flag to parse is a '^' */
parse_lparen_question_flags(pRExC_state);
if ( RExC_parse >= RExC_end - 4
|| UCHARAT(RExC_parse) != ':'
|| UCHARAT(++RExC_parse) != '('
|| UCHARAT(++RExC_parse) != '?'
|| UCHARAT(++RExC_parse) != '[')
{
/* In combination with the above, this moves the
* pointer to the point just after the first erroneous
* character. */
if (RExC_parse >= RExC_end - 4) {
RExC_parse = RExC_end;
}
else if (RExC_parse != save_parse) {
RExC_parse += (UTF)
? UTF8_SAFE_SKIP(RExC_parse, RExC_end)
: 1;
}
vFAIL("Expecting '(?flags:(?[...'");
}
/* Recurse, with the meat of the embedded expression */
RExC_parse++;
if (! handle_regex_sets(pRExC_state, ¤t, flagp,
depth+1, oregcomp_parse))
{
RETURN_FAIL_ON_RESTART(*flagp, flagp);
}
/* Here, 'current' contains the embedded expression's
* inversion list, and RExC_parse points to the trailing
* ']'; the next character should be the ')' */
RExC_parse++;
if (UCHARAT(RExC_parse) != ')')
vFAIL("Expecting close paren for nested extended charclass");
/* Then the ')' matching the original '(' handled by this
* case: statement */
RExC_parse++;
if (UCHARAT(RExC_parse) != ')')
vFAIL("Expecting close paren for wrapper for nested extended charclass");
RExC_flags = save_flags;
goto handle_operand;
}
/* A regular '('. Look behind for illegal syntax */
if (top_index - fence >= 0) {
/* If the top entry on the stack is an operator, it had
* better be a '!', otherwise the entry below the top
* operand should be an operator */
if ( ! (top_ptr = av_fetch(stack, top_index, FALSE))
|| (IS_OPERATOR(*top_ptr) && SvUV(*top_ptr) != '!')
|| ( IS_OPERAND(*top_ptr)
&& ( top_index - fence < 1
|| ! (stacked_ptr = av_fetch(stack,
top_index - 1,
FALSE))
|| ! IS_OPERATOR(*stacked_ptr))))
{
RExC_parse++;
vFAIL("Unexpected '(' with no preceding operator");
}
}
/* Stack the position of this undealt-with left paren */
av_push(fence_stack, newSViv(fence));
fence = top_index + 1;
break;
case '\\':
/* regclass() can only return RESTART_PARSE and NEED_UTF8 if
* multi-char folds are allowed. */
if (!regclass(pRExC_state, flagp, depth+1,
TRUE, /* means parse just the next thing */
FALSE, /* don't allow multi-char folds */
FALSE, /* don't silence non-portable warnings. */
TRUE, /* strict */
FALSE, /* Require return to be an ANYOF */
¤t))
{
RETURN_FAIL_ON_RESTART(*flagp, flagp);
goto regclass_failed;
}
/* regclass() will return with parsing just the \ sequence,
* leaving the parse pointer at the next thing to parse */
RExC_parse--;
goto handle_operand;
case '[': /* Is a bracketed character class */
{
/* See if this is a [:posix:] class. */
bool is_posix_class = (OOB_NAMEDCLASS
< handle_possible_posix(pRExC_state,
RExC_parse + 1,
NULL,
NULL,
TRUE /* checking only */));
/* If it is a posix class, leave the parse pointer at the '['
* to fool regclass() into thinking it is part of a
* '[[:posix:]]'. */
if (! is_posix_class) {
RExC_parse++;
}
/* regclass() can only return RESTART_PARSE and NEED_UTF8 if
* multi-char folds are allowed. */
if (!regclass(pRExC_state, flagp, depth+1,
is_posix_class, /* parse the whole char
class only if not a
posix class */
FALSE, /* don't allow multi-char folds */
TRUE, /* silence non-portable warnings. */
TRUE, /* strict */
FALSE, /* Require return to be an ANYOF */
¤t))
{
RETURN_FAIL_ON_RESTART(*flagp, flagp);
goto regclass_failed;
}
if (! current) {
break;
}
/* function call leaves parse pointing to the ']', except if we
* faked it */
if (is_posix_class) {
RExC_parse--;
}
goto handle_operand;
}
case ']':
if (top_index >= 1) {
goto join_operators;
}
/* Only a single operand on the stack: are done */
goto done;
case ')':
if (av_tindex_skip_len_mg(fence_stack) < 0) {
if (UCHARAT(RExC_parse - 1) == ']') {
break;
}
RExC_parse++;
vFAIL("Unexpected ')'");
}
/* If nothing after the fence, is missing an operand */
if (top_index - fence < 0) {
RExC_parse++;
goto bad_syntax;
}
/* If at least two things on the stack, treat this as an
* operator */
if (top_index - fence >= 1) {
goto join_operators;
}
/* Here only a single thing on the fenced stack, and there is a
* fence. Get rid of it */
fence_ptr = av_pop(fence_stack);
assert(fence_ptr);
fence = SvIV(fence_ptr);
SvREFCNT_dec_NN(fence_ptr);
fence_ptr = NULL;
if (fence < 0) {
fence = 0;
}
/* Having gotten rid of the fence, we pop the operand at the
* stack top and process it as a newly encountered operand */
current = av_pop(stack);
if (IS_OPERAND(current)) {
goto handle_operand;
}
RExC_parse++;
goto bad_syntax;
case '&':
case '|':
case '+':
case '-':
case '^':
/* These binary operators should have a left operand already
* parsed */
if ( top_index - fence < 0
|| top_index - fence == 1
|| ( ! (top_ptr = av_fetch(stack, top_index, FALSE)))
|| ! IS_OPERAND(*top_ptr))
{
goto unexpected_binary;
}
/* If only the one operand is on the part of the stack visible
* to us, we just place this operator in the proper position */
if (top_index - fence < 2) {
/* Place the operator before the operand */
SV* lhs = av_pop(stack);
av_push(stack, newSVuv(curchar));
av_push(stack, lhs);
break;
}
/* But if there is something else on the stack, we need to
* process it before this new operator if and only if the
* stacked operation has equal or higher precedence than the
* new one */
join_operators:
/* The operator on the stack is supposed to be below both its
* operands */
if ( ! (stacked_ptr = av_fetch(stack, top_index - 2, FALSE))
|| IS_OPERAND(*stacked_ptr))
{
/* But if not, it's legal and indicates we are completely
* done if and only if we're currently processing a ']',
* which should be the final thing in the expression */
if (curchar == ']') {
goto done;
}
unexpected_binary:
RExC_parse++;
vFAIL2("Unexpected binary operator '%c' with no "
"preceding operand", curchar);
}
stacked_operator = (char) SvUV(*stacked_ptr);
if (regex_set_precedence(curchar)
> regex_set_precedence(stacked_operator))
{
/* Here, the new operator has higher precedence than the
* stacked one. This means we need to add the new one to
* the stack to await its rhs operand (and maybe more
* stuff). We put it before the lhs operand, leaving
* untouched the stacked operator and everything below it
* */
lhs = av_pop(stack);
assert(IS_OPERAND(lhs));
av_push(stack, newSVuv(curchar));
av_push(stack, lhs);
break;
}
/* Here, the new operator has equal or lower precedence than
* what's already there. This means the operation already
* there should be performed now, before the new one. */
rhs = av_pop(stack);
if (! IS_OPERAND(rhs)) {
/* This can happen when a ! is not followed by an operand,
* like in /(?[\t &!])/ */
goto bad_syntax;
}
lhs = av_pop(stack);
if (! IS_OPERAND(lhs)) {
/* This can happen when there is an empty (), like in
* /(?[[0]+()+])/ */
goto bad_syntax;
}
switch (stacked_operator) {
case '&':
_invlist_intersection(lhs, rhs, &rhs);
break;
case '|':
case '+':
_invlist_union(lhs, rhs, &rhs);
break;
case '-':
_invlist_subtract(lhs, rhs, &rhs);
break;
case '^': /* The union minus the intersection */
{
SV* i = NULL;
SV* u = NULL;
_invlist_union(lhs, rhs, &u);
_invlist_intersection(lhs, rhs, &i);
_invlist_subtract(u, i, &rhs);
SvREFCNT_dec_NN(i);
SvREFCNT_dec_NN(u);
break;
}
}
SvREFCNT_dec(lhs);
/* Here, the higher precedence operation has been done, and the
* result is in 'rhs'. We overwrite the stacked operator with
* the result. Then we redo this code to either push the new
* operator onto the stack or perform any higher precedence
* stacked operation */
only_to_avoid_leaks = av_pop(stack);
SvREFCNT_dec(only_to_avoid_leaks);
av_push(stack, rhs);
goto redo_curchar;
case '!': /* Highest priority, right associative */
/* If what's already at the top of the stack is another '!",
* they just cancel each other out */
if ( (top_ptr = av_fetch(stack, top_index, FALSE))
&& (IS_OPERATOR(*top_ptr) && SvUV(*top_ptr) == '!'))
{
only_to_avoid_leaks = av_pop(stack);
SvREFCNT_dec(only_to_avoid_leaks);
}
else { /* Otherwise, since it's right associative, just push
onto the stack */
av_push(stack, newSVuv(curchar));
}
break;
default:
RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1;
if (RExC_parse >= RExC_end) {
break;
}
vFAIL("Unexpected character");
handle_operand:
/* Here 'current' is the operand. If something is already on the
* stack, we have to check if it is a !. But first, the code above
* may have altered the stack in the time since we earlier set
* 'top_index'. */
top_index = av_tindex_skip_len_mg(stack);
if (top_index - fence >= 0) {
/* If the top entry on the stack is an operator, it had better
* be a '!', otherwise the entry below the top operand should
* be an operator */
top_ptr = av_fetch(stack, top_index, FALSE);
assert(top_ptr);
if (IS_OPERATOR(*top_ptr)) {
/* The only permissible operator at the top of the stack is
* '!', which is applied immediately to this operand. */
curchar = (char) SvUV(*top_ptr);
if (curchar != '!') {
SvREFCNT_dec(current);
vFAIL2("Unexpected binary operator '%c' with no "
"preceding operand", curchar);
}
_invlist_invert(current);
only_to_avoid_leaks = av_pop(stack);
SvREFCNT_dec(only_to_avoid_leaks);
/* And we redo with the inverted operand. This allows
* handling multiple ! in a row */
goto handle_operand;
}
/* Single operand is ok only for the non-binary ')'
* operator */
else if ((top_index - fence == 0 && curchar != ')')
|| (top_index - fence > 0
&& (! (stacked_ptr = av_fetch(stack,
top_index - 1,
FALSE))
|| IS_OPERAND(*stacked_ptr))))
{
SvREFCNT_dec(current);
vFAIL("Operand with no preceding operator");
}
}
/* Here there was nothing on the stack or the top element was
* another operand. Just add this new one */
av_push(stack, current);
} /* End of switch on next parse token */
RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1;
} /* End of loop parsing through the construct */
vFAIL("Syntax error in (?[...])");
done:
if (RExC_parse >= RExC_end || RExC_parse[1] != ')') {
if (RExC_parse < RExC_end) {
RExC_parse++;
}
vFAIL("Unexpected ']' with no following ')' in (?[...");
}
if (av_tindex_skip_len_mg(fence_stack) >= 0) {
vFAIL("Unmatched (");
}
if (av_tindex_skip_len_mg(stack) < 0 /* Was empty */
|| ((final = av_pop(stack)) == NULL)
|| ! IS_OPERAND(final)
|| ! is_invlist(final)
|| av_tindex_skip_len_mg(stack) >= 0) /* More left on stack */
{
bad_syntax:
SvREFCNT_dec(final);
vFAIL("Incomplete expression within '(?[ ])'");
}
/* Here, 'final' is the resultant inversion list from evaluating the
* expression. Return it if so requested */
if (return_invlist) {
*return_invlist = final;
return END;
}
/* Otherwise generate a resultant node, based on 'final'. regclass() is
* expecting a string of ranges and individual code points */
invlist_iterinit(final);
result_string = newSVpvs("");
while (invlist_iternext(final, &start, &end)) {
if (start == end) {
Perl_sv_catpvf(aTHX_ result_string, "\\x{%" UVXf "}", start);
}
else {
Perl_sv_catpvf(aTHX_ result_string, "\\x{%" UVXf "}-\\x{%" UVXf "}",
start, end);
}
}
/* About to generate an ANYOF (or similar) node from the inversion list we
* have calculated */
save_parse = RExC_parse;
RExC_parse = SvPV(result_string, len);
save_end = RExC_end;
RExC_end = RExC_parse + len;
TURN_OFF_WARNINGS_IN_SUBSTITUTE_PARSE;
/* We turn off folding around the call, as the class we have constructed
* already has all folding taken into consideration, and we don't want
* regclass() to add to that */
RExC_flags &= ~RXf_PMf_FOLD;
/* regclass() can only return RESTART_PARSE and NEED_UTF8 if multi-char
* folds are allowed. */
node = regclass(pRExC_state, flagp, depth+1,
FALSE, /* means parse the whole char class */
FALSE, /* don't allow multi-char folds */
TRUE, /* silence non-portable warnings. The above may very
well have generated non-portable code points, but
they're valid on this machine */
FALSE, /* similarly, no need for strict */
FALSE, /* Require return to be an ANYOF */
NULL
);
RESTORE_WARNINGS;
RExC_parse = save_parse + 1;
RExC_end = save_end;
SvREFCNT_dec_NN(final);
SvREFCNT_dec_NN(result_string);
if (save_fold) {
RExC_flags |= RXf_PMf_FOLD;
}
if (!node) {
RETURN_FAIL_ON_RESTART(*flagp, flagp);
goto regclass_failed;
}
/* Fix up the node type if we are in locale. (We have pretended we are
* under /u for the purposes of regclass(), as this construct will only
* work under UTF-8 locales. But now we change the opcode to be ANYOFL (so
* as to cause any warnings about bad locales to be output in regexec.c),
* and add the flag that indicates to check if not in a UTF-8 locale. The
* reason we above forbid optimization into something other than an ANYOF
* node is simply to minimize the number of code changes in regexec.c.
* Otherwise we would have to create new EXACTish node types and deal with
* them. This decision could be revisited should this construct become
* popular.
*
* (One might think we could look at the resulting ANYOF node and suppress
* the flag if everything is above 255, as those would be UTF-8 only,
* but this isn't true, as the components that led to that result could
* have been locale-affected, and just happen to cancel each other out
* under UTF-8 locales.) */
if (in_locale) {
set_regex_charset(&RExC_flags, REGEX_LOCALE_CHARSET);
assert(OP(REGNODE_p(node)) == ANYOF);
OP(REGNODE_p(node)) = ANYOFL;
ANYOF_FLAGS(REGNODE_p(node))
|= ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD;
}
nextchar(pRExC_state);
Set_Node_Length(REGNODE_p(node), RExC_parse - oregcomp_parse + 1); /* MJD */
return node;
regclass_failed:
FAIL2("panic: regclass returned failure to handle_sets, " "flags=%#" UVxf,
(UV) *flagp);
}
#ifdef ENABLE_REGEX_SETS_DEBUGGING
STATIC void
S_dump_regex_sets_structures(pTHX_ RExC_state_t *pRExC_state,
AV * stack, const IV fence, AV * fence_stack)
{ /* Dumps the stacks in handle_regex_sets() */
const SSize_t stack_top = av_tindex_skip_len_mg(stack);
const SSize_t fence_stack_top = av_tindex_skip_len_mg(fence_stack);
SSize_t i;
PERL_ARGS_ASSERT_DUMP_REGEX_SETS_STRUCTURES;
PerlIO_printf(Perl_debug_log, "\nParse position is:%s\n", RExC_parse);
if (stack_top < 0) {
PerlIO_printf(Perl_debug_log, "Nothing on stack\n");
}
else {
PerlIO_printf(Perl_debug_log, "Stack: (fence=%d)\n", (int) fence);
for (i = stack_top; i >= 0; i--) {
SV ** element_ptr = av_fetch(stack, i, FALSE);
if (! element_ptr) {
}
if (IS_OPERATOR(*element_ptr)) {
PerlIO_printf(Perl_debug_log, "[%d]: %c\n",
(int) i, (int) SvIV(*element_ptr));
}
else {
PerlIO_printf(Perl_debug_log, "[%d] ", (int) i);
sv_dump(*element_ptr);
}
}
}
if (fence_stack_top < 0) {
PerlIO_printf(Perl_debug_log, "Nothing on fence_stack\n");
}
else {
PerlIO_printf(Perl_debug_log, "Fence_stack: \n");
for (i = fence_stack_top; i >= 0; i--) {
SV ** element_ptr = av_fetch(fence_stack, i, FALSE);
if (! element_ptr) {
}
PerlIO_printf(Perl_debug_log, "[%d]: %d\n",
(int) i, (int) SvIV(*element_ptr));
}
}
}
#endif
#undef IS_OPERATOR
#undef IS_OPERAND
STATIC void
S_add_above_Latin1_folds(pTHX_ RExC_state_t *pRExC_state, const U8 cp, SV** invlist)
{
/* This adds the Latin1/above-Latin1 folding rules.
*
* This should be called only for a Latin1-range code points, cp, which is
* known to be involved in a simple fold with other code points above
* Latin1. It would give false results if /aa has been specified.
* Multi-char folds are outside the scope of this, and must be handled
* specially. */
PERL_ARGS_ASSERT_ADD_ABOVE_LATIN1_FOLDS;
assert(HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(cp));
/* The rules that are valid for all Unicode versions are hard-coded in */
switch (cp) {
case 'k':
case 'K':
*invlist =
add_cp_to_invlist(*invlist, KELVIN_SIGN);
break;
case 's':
case 'S':
*invlist = add_cp_to_invlist(*invlist, LATIN_SMALL_LETTER_LONG_S);
break;
case MICRO_SIGN:
*invlist = add_cp_to_invlist(*invlist, GREEK_CAPITAL_LETTER_MU);
*invlist = add_cp_to_invlist(*invlist, GREEK_SMALL_LETTER_MU);
break;
case LATIN_CAPITAL_LETTER_A_WITH_RING_ABOVE:
case LATIN_SMALL_LETTER_A_WITH_RING_ABOVE:
*invlist = add_cp_to_invlist(*invlist, ANGSTROM_SIGN);
break;
case LATIN_SMALL_LETTER_Y_WITH_DIAERESIS:
*invlist = add_cp_to_invlist(*invlist,
LATIN_CAPITAL_LETTER_Y_WITH_DIAERESIS);
break;
default: /* Other code points are checked against the data for the
current Unicode version */
{
Size_t folds_count;
unsigned int first_fold;
const unsigned int * remaining_folds;
UV folded_cp;
if (isASCII(cp)) {
folded_cp = toFOLD(cp);
}
else {
U8 dummy_fold[UTF8_MAXBYTES_CASE+1];
Size_t dummy_len;
folded_cp = _to_fold_latin1(cp, dummy_fold, &dummy_len, 0);
}
if (folded_cp > 255) {
*invlist = add_cp_to_invlist(*invlist, folded_cp);
}
folds_count = _inverse_folds(folded_cp, &first_fold,
&remaining_folds);
if (folds_count == 0) {
/* Use deprecated warning to increase the chances of this being
* output */
ckWARN2reg_d(RExC_parse,
"Perl folding rules are not up-to-date for 0x%02X;"
" please use the perlbug utility to report;", cp);
}
else {
unsigned int i;
if (first_fold > 255) {
*invlist = add_cp_to_invlist(*invlist, first_fold);
}
for (i = 0; i < folds_count - 1; i++) {
if (remaining_folds[i] > 255) {
*invlist = add_cp_to_invlist(*invlist,
remaining_folds[i]);
}
}
}
break;
}
}
}
STATIC void
S_output_posix_warnings(pTHX_ RExC_state_t *pRExC_state, AV* posix_warnings)
{
/* Output the elements of the array given by '*posix_warnings' as REGEXP
* warnings. */
SV * msg;
const bool first_is_fatal = ckDEAD(packWARN(WARN_REGEXP));
PERL_ARGS_ASSERT_OUTPUT_POSIX_WARNINGS;
if (! TO_OUTPUT_WARNINGS(RExC_parse)) {
return;
}
while ((msg = av_shift(posix_warnings)) != &PL_sv_undef) {
if (first_is_fatal) { /* Avoid leaking this */
av_undef(posix_warnings); /* This isn't necessary if the
array is mortal, but is a
fail-safe */
(void) sv_2mortal(msg);
PREPARE_TO_DIE;
}
Perl_warner(aTHX_ packWARN(WARN_REGEXP), "%s", SvPVX(msg));
SvREFCNT_dec_NN(msg);
}
UPDATE_WARNINGS_LOC(RExC_parse);
}
STATIC AV *
S_add_multi_match(pTHX_ AV* multi_char_matches, SV* multi_string, const STRLEN cp_count)
{
/* This adds the string scalar <multi_string> to the array
* <multi_char_matches>. <multi_string> is known to have exactly
* <cp_count> code points in it. This is used when constructing a
* bracketed character class and we find something that needs to match more
* than a single character.
*
* <multi_char_matches> is actually an array of arrays. Each top-level
* element is an array that contains all the strings known so far that are
* the same length. And that length (in number of code points) is the same
* as the index of the top-level array. Hence, the [2] element is an
* array, each element thereof is a string containing TWO code points;
* while element [3] is for strings of THREE characters, and so on. Since
* this is for multi-char strings there can never be a [0] nor [1] element.
*
* When we rewrite the character class below, we will do so such that the
* longest strings are written first, so that it prefers the longest
* matching strings first. This is done even if it turns out that any
* quantifier is non-greedy, out of this programmer's (khw) laziness. Tom
* Christiansen has agreed that this is ok. This makes the test for the
* ligature 'ffi' come before the test for 'ff', for example */
AV* this_array;
AV** this_array_ptr;
PERL_ARGS_ASSERT_ADD_MULTI_MATCH;
if (! multi_char_matches) {
multi_char_matches = newAV();
}
if (av_exists(multi_char_matches, cp_count)) {
this_array_ptr = (AV**) av_fetch(multi_char_matches, cp_count, FALSE);
this_array = *this_array_ptr;
}
else {
this_array = newAV();
av_store(multi_char_matches, cp_count,
(SV*) this_array);
}
av_push(this_array, multi_string);
return multi_char_matches;
}
/* The names of properties whose definitions are not known at compile time are
* stored in this SV, after a constant heading. So if the length has been
* changed since initialization, then there is a run-time definition. */
#define HAS_NONLOCALE_RUNTIME_PROPERTY_DEFINITION \
(SvCUR(listsv) != initial_listsv_len)
/* There is a restricted set of white space characters that are legal when
* ignoring white space in a bracketed character class. This generates the
* code to skip them.
*
* There is a line below that uses the same white space criteria but is outside
* this macro. Both here and there must use the same definition */
#define SKIP_BRACKETED_WHITE_SPACE(do_skip, p) \
STMT_START { \
if (do_skip) { \
while (isBLANK_A(UCHARAT(p))) \
{ \
p++; \
} \
} \
} STMT_END
STATIC regnode_offset
S_regclass(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, U32 depth,
const bool stop_at_1, /* Just parse the next thing, don't
look for a full character class */
bool allow_mutiple_chars,
const bool silence_non_portable, /* Don't output warnings
about too large
characters */
const bool strict,
bool optimizable, /* ? Allow a non-ANYOF return
node */
SV** ret_invlist /* Return an inversion list, not a node */
)
{
/* parse a bracketed class specification. Most of these will produce an
* ANYOF node; but something like [a] will produce an EXACT node; [aA], an
* EXACTFish node; [[:ascii:]], a POSIXA node; etc. It is more complex
* under /i with multi-character folds: it will be rewritten following the
* paradigm of this example, where the <multi-fold>s are characters which
* fold to multiple character sequences:
* /[abc\x{multi-fold1}def\x{multi-fold2}ghi]/i
* gets effectively rewritten as:
* /(?:\x{multi-fold1}|\x{multi-fold2}|[abcdefghi]/i
* reg() gets called (recursively) on the rewritten version, and this
* function will return what it constructs. (Actually the <multi-fold>s
* aren't physically removed from the [abcdefghi], it's just that they are
* ignored in the recursion by means of a flag:
* <RExC_in_multi_char_class>.)
*
* ANYOF nodes contain a bit map for the first NUM_ANYOF_CODE_POINTS
* characters, with the corresponding bit set if that character is in the
* list. For characters above this, an inversion list is used. There
* are extra bits for \w, etc. in locale ANYOFs, as what these match is not
* determinable at compile time
*
* On success, returns the offset at which any next node should be placed
* into the regex engine program being compiled.
*
* Returns 0 otherwise, setting flagp to RESTART_PARSE if the parse needs
* to be restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded to
* UTF-8
*/
dVAR;
UV prevvalue = OOB_UNICODE, save_prevvalue = OOB_UNICODE;
IV range = 0;
UV value = OOB_UNICODE, save_value = OOB_UNICODE;
regnode_offset ret = -1; /* Initialized to an illegal value */
STRLEN numlen;
int namedclass = OOB_NAMEDCLASS;
char *rangebegin = NULL;
SV *listsv = NULL; /* List of \p{user-defined} whose definitions
aren't available at the time this was called */
STRLEN initial_listsv_len = 0; /* Kind of a kludge to see if it is more
than just initialized. */
SV* properties = NULL; /* Code points that match \p{} \P{} */
SV* posixes = NULL; /* Code points that match classes like [:word:],
extended beyond the Latin1 range. These have to
be kept separate from other code points for much
of this function because their handling is
different under /i, and for most classes under
/d as well */
SV* nposixes = NULL; /* Similarly for [:^word:]. These are kept
separate for a while from the non-complemented
versions because of complications with /d
matching */
SV* simple_posixes = NULL; /* But under some conditions, the classes can be
treated more simply than the general case,
leading to less compilation and execution
work */
UV element_count = 0; /* Number of distinct elements in the class.
Optimizations may be possible if this is tiny */
AV * multi_char_matches = NULL; /* Code points that fold to more than one
character; used under /i */
UV n;
char * stop_ptr = RExC_end; /* where to stop parsing */
/* ignore unescaped whitespace? */
const bool skip_white = cBOOL( ret_invlist
|| (RExC_flags & RXf_PMf_EXTENDED_MORE));
/* inversion list of code points this node matches only when the target
* string is in UTF-8. These are all non-ASCII, < 256. (Because is under
* /d) */
SV* upper_latin1_only_utf8_matches = NULL;
/* Inversion list of code points this node matches regardless of things
* like locale, folding, utf8ness of the target string */
SV* cp_list = NULL;
/* Like cp_list, but code points on this list need to be checked for things
* that fold to/from them under /i */
SV* cp_foldable_list = NULL;
/* Like cp_list, but code points on this list are valid only when the
* runtime locale is UTF-8 */
SV* only_utf8_locale_list = NULL;
/* In a range, if one of the endpoints is non-character-set portable,
* meaning that it hard-codes a code point that may mean a different
* charactger in ASCII vs. EBCDIC, as opposed to, say, a literal 'A' or a
* mnemonic '\t' which each mean the same character no matter which
* character set the platform is on. */
unsigned int non_portable_endpoint = 0;
/* Is the range unicode? which means on a platform that isn't 1-1 native
* to Unicode (i.e. non-ASCII), each code point in it should be considered
* to be a Unicode value. */
bool unicode_range = FALSE;
bool invert = FALSE; /* Is this class to be complemented */
bool warn_super = ALWAYS_WARN_SUPER;
const char * orig_parse = RExC_parse;
/* This variable is used to mark where the end in the input is of something
* that looks like a POSIX construct but isn't. During the parse, when
* something looks like it could be such a construct is encountered, it is
* checked for being one, but not if we've already checked this area of the
* input. Only after this position is reached do we check again */
char *not_posix_region_end = RExC_parse - 1;
AV* posix_warnings = NULL;
const bool do_posix_warnings = ckWARN(WARN_REGEXP);
U8 op = END; /* The returned node-type, initialized to an impossible
one. */
U8 anyof_flags = 0; /* flag bits if the node is an ANYOF-type */
U32 posixl = 0; /* bit field of posix classes matched under /l */
/* Flags as to what things aren't knowable until runtime. (Note that these are
* mutually exclusive.) */
#define HAS_USER_DEFINED_PROPERTY 0x01 /* /u any user-defined properties that
haven't been defined as of yet */
#define HAS_D_RUNTIME_DEPENDENCY 0x02 /* /d if the target being matched is
UTF-8 or not */
#define HAS_L_RUNTIME_DEPENDENCY 0x04 /* /l what the posix classes match and
what gets folded */
U32 has_runtime_dependency = 0; /* OR of the above flags */
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGCLASS;
#ifndef DEBUGGING
PERL_UNUSED_ARG(depth);
#endif
/* If wants an inversion list returned, we can't optimize to something
* else. */
if (ret_invlist) {
optimizable = FALSE;
}
DEBUG_PARSE("clas");
#if UNICODE_MAJOR_VERSION < 3 /* no multifolds in early Unicode */ \
|| (UNICODE_MAJOR_VERSION == 3 && UNICODE_DOT_VERSION == 0 \
&& UNICODE_DOT_DOT_VERSION == 0)
allow_mutiple_chars = FALSE;
#endif
/* We include the /i status at the beginning of this so that we can
* know it at runtime */
listsv = sv_2mortal(Perl_newSVpvf(aTHX_ "#%d\n", cBOOL(FOLD)));
initial_listsv_len = SvCUR(listsv);
SvTEMP_off(listsv); /* Grr, TEMPs and mortals are conflated. */
SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse);
assert(RExC_parse <= RExC_end);
if (UCHARAT(RExC_parse) == '^') { /* Complement the class */
RExC_parse++;
invert = TRUE;
allow_mutiple_chars = FALSE;
MARK_NAUGHTY(1);
SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse);
}
/* Check that they didn't say [:posix:] instead of [[:posix:]] */
if (! ret_invlist && MAYBE_POSIXCC(UCHARAT(RExC_parse))) {
int maybe_class = handle_possible_posix(pRExC_state,
RExC_parse,
¬_posix_region_end,
NULL,
TRUE /* checking only */);
if (maybe_class >= OOB_NAMEDCLASS && do_posix_warnings) {
ckWARN4reg(not_posix_region_end,
"POSIX syntax [%c %c] belongs inside character classes%s",
*RExC_parse, *RExC_parse,
(maybe_class == OOB_NAMEDCLASS)
? ((POSIXCC_NOTYET(*RExC_parse))
? " (but this one isn't implemented)"
: " (but this one isn't fully valid)")
: ""
);
}
}
/* If the caller wants us to just parse a single element, accomplish this
* by faking the loop ending condition */
if (stop_at_1 && RExC_end > RExC_parse) {
stop_ptr = RExC_parse + 1;
}
/* allow 1st char to be ']' (allowing it to be '-' is dealt with later) */
if (UCHARAT(RExC_parse) == ']')
goto charclassloop;
while (1) {
if ( posix_warnings
&& av_tindex_skip_len_mg(posix_warnings) >= 0
&& RExC_parse > not_posix_region_end)
{
/* Warnings about posix class issues are considered tentative until
* we are far enough along in the parse that we can no longer
* change our mind, at which point we output them. This is done
* each time through the loop so that a later class won't zap them
* before they have been dealt with. */
output_posix_warnings(pRExC_state, posix_warnings);
}
if (RExC_parse >= stop_ptr) {
break;
}
SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse);
if (UCHARAT(RExC_parse) == ']') {
break;
}
charclassloop:
namedclass = OOB_NAMEDCLASS; /* initialize as illegal */
save_value = value;
save_prevvalue = prevvalue;
if (!range) {
rangebegin = RExC_parse;
element_count++;
non_portable_endpoint = 0;
}
if (UTF && ! UTF8_IS_INVARIANT(* RExC_parse)) {
value = utf8n_to_uvchr((U8*)RExC_parse,
RExC_end - RExC_parse,
&numlen, UTF8_ALLOW_DEFAULT);
RExC_parse += numlen;
}
else
value = UCHARAT(RExC_parse++);
if (value == '[') {
char * posix_class_end;
namedclass = handle_possible_posix(pRExC_state,
RExC_parse,
&posix_class_end,
do_posix_warnings ? &posix_warnings : NULL,
FALSE /* die if error */);
if (namedclass > OOB_NAMEDCLASS) {
/* If there was an earlier attempt to parse this particular
* posix class, and it failed, it was a false alarm, as this
* successful one proves */
if ( posix_warnings
&& av_tindex_skip_len_mg(posix_warnings) >= 0
&& not_posix_region_end >= RExC_parse
&& not_posix_region_end <= posix_class_end)
{
av_undef(posix_warnings);
}
RExC_parse = posix_class_end;
}
else if (namedclass == OOB_NAMEDCLASS) {
not_posix_region_end = posix_class_end;
}
else {
namedclass = OOB_NAMEDCLASS;
}
}
else if ( RExC_parse - 1 > not_posix_region_end
&& MAYBE_POSIXCC(value))
{
(void) handle_possible_posix(
pRExC_state,
RExC_parse - 1, /* -1 because parse has already been
advanced */
¬_posix_region_end,
do_posix_warnings ? &posix_warnings : NULL,
TRUE /* checking only */);
}
else if ( strict && ! skip_white
&& ( _generic_isCC(value, _CC_VERTSPACE)
|| is_VERTWS_cp_high(value)))
{
vFAIL("Literal vertical space in [] is illegal except under /x");
}
else if (value == '\\') {
/* Is a backslash; get the code point of the char after it */
if (RExC_parse >= RExC_end) {
vFAIL("Unmatched [");
}
if (UTF && ! UTF8_IS_INVARIANT(UCHARAT(RExC_parse))) {
value = utf8n_to_uvchr((U8*)RExC_parse,
RExC_end - RExC_parse,
&numlen, UTF8_ALLOW_DEFAULT);
RExC_parse += numlen;
}
else
value = UCHARAT(RExC_parse++);
/* Some compilers cannot handle switching on 64-bit integer
* values, therefore value cannot be an UV. Yes, this will
* be a problem later if we want switch on Unicode.
* A similar issue a little bit later when switching on
* namedclass. --jhi */
/* If the \ is escaping white space when white space is being
* skipped, it means that that white space is wanted literally, and
* is already in 'value'. Otherwise, need to translate the escape
* into what it signifies. */
if (! skip_white || ! isBLANK_A(value)) switch ((I32)value) {
case 'w': namedclass = ANYOF_WORDCHAR; break;
case 'W': namedclass = ANYOF_NWORDCHAR; break;
case 's': namedclass = ANYOF_SPACE; break;
case 'S': namedclass = ANYOF_NSPACE; break;
case 'd': namedclass = ANYOF_DIGIT; break;
case 'D': namedclass = ANYOF_NDIGIT; break;
case 'v': namedclass = ANYOF_VERTWS; break;
case 'V': namedclass = ANYOF_NVERTWS; break;
case 'h': namedclass = ANYOF_HORIZWS; break;
case 'H': namedclass = ANYOF_NHORIZWS; break;
case 'N': /* Handle \N{NAME} in class */
{
const char * const backslash_N_beg = RExC_parse - 2;
int cp_count;
if (! grok_bslash_N(pRExC_state,
NULL, /* No regnode */
&value, /* Yes single value */
&cp_count, /* Multiple code pt count */
flagp,
strict,
depth)
) {
if (*flagp & NEED_UTF8)
FAIL("panic: grok_bslash_N set NEED_UTF8");
RETURN_FAIL_ON_RESTART_FLAGP(flagp);
if (cp_count < 0) {
vFAIL("\\N in a character class must be a named character: \\N{...}");
}
else if (cp_count == 0) {
ckWARNreg(RExC_parse,
"Ignoring zero length \\N{} in character class");
}
else { /* cp_count > 1 */
assert(cp_count > 1);
if (! RExC_in_multi_char_class) {
if ( ! allow_mutiple_chars
|| invert
|| range
|| *RExC_parse == '-')
{
if (strict) {
RExC_parse--;
vFAIL("\\N{} in inverted character class or as a range end-point is restricted to one character");
}
ckWARNreg(RExC_parse, "Using just the first character returned by \\N{} in character class");
break; /* <value> contains the first code
point. Drop out of the switch to
process it */
}
else {
SV * multi_char_N = newSVpvn(backslash_N_beg,
RExC_parse - backslash_N_beg);
multi_char_matches
= add_multi_match(multi_char_matches,
multi_char_N,
cp_count);
}
}
} /* End of cp_count != 1 */
/* This element should not be processed further in this
* class */
element_count--;
value = save_value;
prevvalue = save_prevvalue;
continue; /* Back to top of loop to get next char */
}
/* Here, is a single code point, and <value> contains it */
unicode_range = TRUE; /* \N{} are Unicode */
}
break;
case 'p':
case 'P':
{
char *e;
/* \p means they want Unicode semantics */
REQUIRE_UNI_RULES(flagp, 0);
if (RExC_parse >= RExC_end)
vFAIL2("Empty \\%c", (U8)value);
if (*RExC_parse == '{') {
const U8 c = (U8)value;
e = (char *) memchr(RExC_parse, '}', RExC_end - RExC_parse);
if (!e) {
RExC_parse++;
vFAIL2("Missing right brace on \\%c{}", c);
}
RExC_parse++;
/* White space is allowed adjacent to the braces and after
* any '^', even when not under /x */
while (isSPACE(*RExC_parse)) {
RExC_parse++;
}
if (UCHARAT(RExC_parse) == '^') {
/* toggle. (The rhs xor gets the single bit that
* differs between P and p; the other xor inverts just
* that bit) */
value ^= 'P' ^ 'p';
RExC_parse++;
while (isSPACE(*RExC_parse)) {
RExC_parse++;
}
}
if (e == RExC_parse)
vFAIL2("Empty \\%c{}", c);
n = e - RExC_parse;
while (isSPACE(*(RExC_parse + n - 1)))
n--;
} /* The \p isn't immediately followed by a '{' */
else if (! isALPHA(*RExC_parse)) {
RExC_parse += (UTF)
? UTF8_SAFE_SKIP(RExC_parse, RExC_end)
: 1;
vFAIL2("Character following \\%c must be '{' or a "
"single-character Unicode property name",
(U8) value);
}
else {
e = RExC_parse;
n = 1;
}
{
char* name = RExC_parse;
/* Any message returned about expanding the definition */
SV* msg = newSVpvs_flags("", SVs_TEMP);
/* If set TRUE, the property is user-defined as opposed to
* official Unicode */
bool user_defined = FALSE;
SV * prop_definition = parse_uniprop_string(
name, n, UTF, FOLD,
FALSE, /* This is compile-time */
/* We can't defer this defn when
* the full result is required in
* this call */
! cBOOL(ret_invlist),
&user_defined,
msg,
0 /* Base level */
);
if (SvCUR(msg)) { /* Assumes any error causes a msg */
assert(prop_definition == NULL);
RExC_parse = e + 1;
if (SvUTF8(msg)) { /* msg being UTF-8 makes the whole
thing so, or else the display is
mojibake */
RExC_utf8 = TRUE;
}
/* diag_listed_as: Can't find Unicode property definition "%s" in regex; marked by <-- HERE in m/%s/ */
vFAIL2utf8f("%" UTF8f, UTF8fARG(SvUTF8(msg),
SvCUR(msg), SvPVX(msg)));
}
if (! is_invlist(prop_definition)) {
/* Here, the definition isn't known, so we have gotten
* returned a string that will be evaluated if and when
* encountered at runtime. We add it to the list of
* such properties, along with whether it should be
* complemented or not */
if (value == 'P') {
sv_catpvs(listsv, "!");
}
else {
sv_catpvs(listsv, "+");
}
sv_catsv(listsv, prop_definition);
has_runtime_dependency |= HAS_USER_DEFINED_PROPERTY;
/* We don't know yet what this matches, so have to flag
* it */
anyof_flags |= ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP;
}
else {
assert (prop_definition && is_invlist(prop_definition));
/* Here we do have the complete property definition
*
* Temporary workaround for [perl #133136]. For this
* precise input that is in the .t that is failing,
* load utf8.pm, which is what the test wants, so that
* that .t passes */
if ( memEQs(RExC_start, e + 1 - RExC_start,
"foo\\p{Alnum}")
&& ! hv_common(GvHVn(PL_incgv),
NULL,
"utf8.pm", sizeof("utf8.pm") - 1,
0, HV_FETCH_ISEXISTS, NULL, 0))
{
require_pv("utf8.pm");
}
if (! user_defined &&
/* We warn on matching an above-Unicode code point
* if the match would return true, except don't
* warn for \p{All}, which has exactly one element
* = 0 */
(_invlist_contains_cp(prop_definition, 0x110000)
&& (! (_invlist_len(prop_definition) == 1
&& *invlist_array(prop_definition) == 0))))
{
warn_super = TRUE;
}
/* Invert if asking for the complement */
if (value == 'P') {
_invlist_union_complement_2nd(properties,
prop_definition,
&properties);
}
else {
_invlist_union(properties, prop_definition, &properties);
}
}
}
RExC_parse = e + 1;
namedclass = ANYOF_UNIPROP; /* no official name, but it's
named */
}
break;
case 'n': value = '\n'; break;
case 'r': value = '\r'; break;
case 't': value = '\t'; break;
case 'f': value = '\f'; break;
case 'b': value = '\b'; break;
case 'e': value = ESC_NATIVE; break;
case 'a': value = '\a'; break;
case 'o':
RExC_parse--; /* function expects to be pointed at the 'o' */
{
const char* error_msg;
bool valid = grok_bslash_o(&RExC_parse,
RExC_end,
&value,
&error_msg,
TO_OUTPUT_WARNINGS(RExC_parse),
strict,
silence_non_portable,
UTF);
if (! valid) {
vFAIL(error_msg);
}
UPDATE_WARNINGS_LOC(RExC_parse - 1);
}
non_portable_endpoint++;
break;
case 'x':
RExC_parse--; /* function expects to be pointed at the 'x' */
{
const char* error_msg;
bool valid = grok_bslash_x(&RExC_parse,
RExC_end,
&value,
&error_msg,
TO_OUTPUT_WARNINGS(RExC_parse),
strict,
silence_non_portable,
UTF);
if (! valid) {
vFAIL(error_msg);
}
UPDATE_WARNINGS_LOC(RExC_parse - 1);
}
non_portable_endpoint++;
break;
case 'c':
value = grok_bslash_c(*RExC_parse, TO_OUTPUT_WARNINGS(RExC_parse));
UPDATE_WARNINGS_LOC(RExC_parse);
RExC_parse++;
non_portable_endpoint++;
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7':
{
/* Take 1-3 octal digits */
I32 flags = PERL_SCAN_SILENT_ILLDIGIT;
numlen = (strict) ? 4 : 3;
value = grok_oct(--RExC_parse, &numlen, &flags, NULL);
RExC_parse += numlen;
if (numlen != 3) {
if (strict) {
RExC_parse += (UTF)
? UTF8_SAFE_SKIP(RExC_parse, RExC_end)
: 1;
vFAIL("Need exactly 3 octal digits");
}
else if ( numlen < 3 /* like \08, \178 */
&& RExC_parse < RExC_end
&& isDIGIT(*RExC_parse)
&& ckWARN(WARN_REGEXP))
{
reg_warn_non_literal_string(
RExC_parse + 1,
form_short_octal_warning(RExC_parse, numlen));
}
}
non_portable_endpoint++;
break;
}
default:
/* Allow \_ to not give an error */
if (isWORDCHAR(value) && value != '_') {
if (strict) {
vFAIL2("Unrecognized escape \\%c in character class",
(int)value);
}
else {
ckWARN2reg(RExC_parse,
"Unrecognized escape \\%c in character class passed through",
(int)value);
}
}
break;
} /* End of switch on char following backslash */
} /* end of handling backslash escape sequences */
/* Here, we have the current token in 'value' */
if (namedclass > OOB_NAMEDCLASS) { /* this is a named class \blah */
U8 classnum;
/* a bad range like a-\d, a-[:digit:]. The '-' is taken as a
* literal, as is the character that began the false range, i.e.
* the 'a' in the examples */
if (range) {
const int w = (RExC_parse >= rangebegin)
? RExC_parse - rangebegin
: 0;
if (strict) {
vFAIL2utf8f(
"False [] range \"%" UTF8f "\"",
UTF8fARG(UTF, w, rangebegin));
}
else {
ckWARN2reg(RExC_parse,
"False [] range \"%" UTF8f "\"",
UTF8fARG(UTF, w, rangebegin));
cp_list = add_cp_to_invlist(cp_list, '-');
cp_foldable_list = add_cp_to_invlist(cp_foldable_list,
prevvalue);
}
range = 0; /* this was not a true range */
element_count += 2; /* So counts for three values */
}
classnum = namedclass_to_classnum(namedclass);
if (LOC && namedclass < ANYOF_POSIXL_MAX
#ifndef HAS_ISASCII
&& classnum != _CC_ASCII
#endif
) {
SV* scratch_list = NULL;
/* What the Posix classes (like \w, [:space:]) match isn't
* generally knowable under locale until actual match time. A
* special node is used for these which has extra space for a
* bitmap, with a bit reserved for each named class that is to
* be matched against. (This isn't needed for \p{} and
* pseudo-classes, as they are not affected by locale, and
* hence are dealt with separately.) However, if a named class
* and its complement are both present, then it matches
* everything, and there is no runtime dependency. Odd numbers
* are the complements of the next lower number, so xor works.
* (Note that something like [\w\D] should match everything,
* because \d should be a proper subset of \w. But rather than
* trust that the locale is well behaved, we leave this to
* runtime to sort out) */
if (POSIXL_TEST(posixl, namedclass ^ 1)) {
cp_list = _add_range_to_invlist(cp_list, 0, UV_MAX);
POSIXL_ZERO(posixl);
has_runtime_dependency &= ~HAS_L_RUNTIME_DEPENDENCY;
anyof_flags &= ~ANYOF_MATCHES_POSIXL;
continue; /* We could ignore the rest of the class, but
best to parse it for any errors */
}
else { /* Here, isn't the complement of any already parsed
class */
POSIXL_SET(posixl, namedclass);
has_runtime_dependency |= HAS_L_RUNTIME_DEPENDENCY;
anyof_flags |= ANYOF_MATCHES_POSIXL;
/* The above-Latin1 characters are not subject to locale
* rules. Just add them to the unconditionally-matched
* list */
/* Get the list of the above-Latin1 code points this
* matches */
_invlist_intersection_maybe_complement_2nd(PL_AboveLatin1,
PL_XPosix_ptrs[classnum],
/* Odd numbers are complements,
* like NDIGIT, NASCII, ... */
namedclass % 2 != 0,
&scratch_list);
/* Checking if 'cp_list' is NULL first saves an extra
* clone. Its reference count will be decremented at the
* next union, etc, or if this is the only instance, at the
* end of the routine */
if (! cp_list) {
cp_list = scratch_list;
}
else {
_invlist_union(cp_list, scratch_list, &cp_list);
SvREFCNT_dec_NN(scratch_list);
}
continue; /* Go get next character */
}
}
else {
/* Here, is not /l, or is a POSIX class for which /l doesn't
* matter (or is a Unicode property, which is skipped here). */
if (namedclass >= ANYOF_POSIXL_MAX) { /* If a special class */
if (namedclass != ANYOF_UNIPROP) { /* UNIPROP = \p and \P */
/* Here, should be \h, \H, \v, or \V. None of /d, /i
* nor /l make a difference in what these match,
* therefore we just add what they match to cp_list. */
if (classnum != _CC_VERTSPACE) {
assert( namedclass == ANYOF_HORIZWS
|| namedclass == ANYOF_NHORIZWS);
/* It turns out that \h is just a synonym for
* XPosixBlank */
classnum = _CC_BLANK;
}
_invlist_union_maybe_complement_2nd(
cp_list,
PL_XPosix_ptrs[classnum],
namedclass % 2 != 0, /* Complement if odd
(NHORIZWS, NVERTWS)
*/
&cp_list);
}
}
else if ( AT_LEAST_UNI_SEMANTICS
|| classnum == _CC_ASCII
|| (DEPENDS_SEMANTICS && ( classnum == _CC_DIGIT
|| classnum == _CC_XDIGIT)))
{
/* We usually have to worry about /d affecting what POSIX
* classes match, with special code needed because we won't
* know until runtime what all matches. But there is no
* extra work needed under /u and /a; and [:ascii:] is
* unaffected by /d; and :digit: and :xdigit: don't have
* runtime differences under /d. So we can special case
* these, and avoid some extra work below, and at runtime.
* */
_invlist_union_maybe_complement_2nd(
simple_posixes,
((AT_LEAST_ASCII_RESTRICTED)
? PL_Posix_ptrs[classnum]
: PL_XPosix_ptrs[classnum]),
namedclass % 2 != 0,
&simple_posixes);
}
else { /* Garden variety class. If is NUPPER, NALPHA, ...
complement and use nposixes */
SV** posixes_ptr = namedclass % 2 == 0
? &posixes
: &nposixes;
_invlist_union_maybe_complement_2nd(
*posixes_ptr,
PL_XPosix_ptrs[classnum],
namedclass % 2 != 0,
posixes_ptr);
}
}
} /* end of namedclass \blah */
SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse);
/* If 'range' is set, 'value' is the ending of a range--check its
* validity. (If value isn't a single code point in the case of a
* range, we should have figured that out above in the code that
* catches false ranges). Later, we will handle each individual code
* point in the range. If 'range' isn't set, this could be the
* beginning of a range, so check for that by looking ahead to see if
* the next real character to be processed is the range indicator--the
* minus sign */
if (range) {
#ifdef EBCDIC
/* For unicode ranges, we have to test that the Unicode as opposed
* to the native values are not decreasing. (Above 255, there is
* no difference between native and Unicode) */
if (unicode_range && prevvalue < 255 && value < 255) {
if (NATIVE_TO_LATIN1(prevvalue) > NATIVE_TO_LATIN1(value)) {
goto backwards_range;
}
}
else
#endif
if (prevvalue > value) /* b-a */ {
int w;
#ifdef EBCDIC
backwards_range:
#endif
w = RExC_parse - rangebegin;
vFAIL2utf8f(
"Invalid [] range \"%" UTF8f "\"",
UTF8fARG(UTF, w, rangebegin));
NOT_REACHED; /* NOTREACHED */
}
}
else {
prevvalue = value; /* save the beginning of the potential range */
if (! stop_at_1 /* Can't be a range if parsing just one thing */
&& *RExC_parse == '-')
{
char* next_char_ptr = RExC_parse + 1;
/* Get the next real char after the '-' */
SKIP_BRACKETED_WHITE_SPACE(skip_white, next_char_ptr);
/* If the '-' is at the end of the class (just before the ']',
* it is a literal minus; otherwise it is a range */
if (next_char_ptr < RExC_end && *next_char_ptr != ']') {
RExC_parse = next_char_ptr;
/* a bad range like \w-, [:word:]- ? */
if (namedclass > OOB_NAMEDCLASS) {
if (strict || ckWARN(WARN_REGEXP)) {
const int w = RExC_parse >= rangebegin
? RExC_parse - rangebegin
: 0;
if (strict) {
vFAIL4("False [] range \"%*.*s\"",
w, w, rangebegin);
}
else {
vWARN4(RExC_parse,
"False [] range \"%*.*s\"",
w, w, rangebegin);
}
}
cp_list = add_cp_to_invlist(cp_list, '-');
element_count++;
} else
range = 1; /* yeah, it's a range! */
continue; /* but do it the next time */
}
}
}
if (namedclass > OOB_NAMEDCLASS) {
continue;
}
/* Here, we have a single value this time through the loop, and
* <prevvalue> is the beginning of the range, if any; or <value> if
* not. */
/* non-Latin1 code point implies unicode semantics. */
if (value > 255) {
REQUIRE_UNI_RULES(flagp, 0);
}
/* Ready to process either the single value, or the completed range.
* For single-valued non-inverted ranges, we consider the possibility
* of multi-char folds. (We made a conscious decision to not do this
* for the other cases because it can often lead to non-intuitive
* results. For example, you have the peculiar case that:
* "s s" =~ /^[^\xDF]+$/i => Y
* "ss" =~ /^[^\xDF]+$/i => N
*
* See [perl #89750] */
if (FOLD && allow_mutiple_chars && value == prevvalue) {
if ( value == LATIN_SMALL_LETTER_SHARP_S
|| (value > 255 && _invlist_contains_cp(PL_HasMultiCharFold,
value)))
{
/* Here <value> is indeed a multi-char fold. Get what it is */
U8 foldbuf[UTF8_MAXBYTES_CASE+1];
STRLEN foldlen;
UV folded = _to_uni_fold_flags(
value,
foldbuf,
&foldlen,
FOLD_FLAGS_FULL | (ASCII_FOLD_RESTRICTED
? FOLD_FLAGS_NOMIX_ASCII
: 0)
);
/* Here, <folded> should be the first character of the
* multi-char fold of <value>, with <foldbuf> containing the
* whole thing. But, if this fold is not allowed (because of
* the flags), <fold> will be the same as <value>, and should
* be processed like any other character, so skip the special
* handling */
if (folded != value) {
/* Skip if we are recursed, currently parsing the class
* again. Otherwise add this character to the list of
* multi-char folds. */
if (! RExC_in_multi_char_class) {
STRLEN cp_count = utf8_length(foldbuf,
foldbuf + foldlen);
SV* multi_fold = sv_2mortal(newSVpvs(""));
Perl_sv_catpvf(aTHX_ multi_fold, "\\x{%" UVXf "}", value);
multi_char_matches
= add_multi_match(multi_char_matches,
multi_fold,
cp_count);
}
/* This element should not be processed further in this
* class */
element_count--;
value = save_value;
prevvalue = save_prevvalue;
continue;
}
}
}
if (strict && ckWARN(WARN_REGEXP)) {
if (range) {
/* If the range starts above 255, everything is portable and
* likely to be so for any forseeable character set, so don't
* warn. */
if (unicode_range && non_portable_endpoint && prevvalue < 256) {
vWARN(RExC_parse, "Both or neither range ends should be Unicode");
}
else if (prevvalue != value) {
/* Under strict, ranges that stop and/or end in an ASCII
* printable should have each end point be a portable value
* for it (preferably like 'A', but we don't warn if it is
* a (portable) Unicode name or code point), and the range
* must be be all digits or all letters of the same case.
* Otherwise, the range is non-portable and unclear as to
* what it contains */
if ( (isPRINT_A(prevvalue) || isPRINT_A(value))
&& ( non_portable_endpoint
|| ! ( (isDIGIT_A(prevvalue) && isDIGIT_A(value))
|| (isLOWER_A(prevvalue) && isLOWER_A(value))
|| (isUPPER_A(prevvalue) && isUPPER_A(value))
))) {
vWARN(RExC_parse, "Ranges of ASCII printables should"
" be some subset of \"0-9\","
" \"A-Z\", or \"a-z\"");
}
else if (prevvalue >= FIRST_NON_ASCII_DECIMAL_DIGIT) {
SSize_t index_start;
SSize_t index_final;
/* But the nature of Unicode and languages mean we
* can't do the same checks for above-ASCII ranges,
* except in the case of digit ones. These should
* contain only digits from the same group of 10. The
* ASCII case is handled just above. Hence here, the
* range could be a range of digits. First some
* unlikely special cases. Grandfather in that a range
* ending in 19DA (NEW TAI LUE THAM DIGIT ONE) is bad
* if its starting value is one of the 10 digits prior
* to it. This is because it is an alternate way of
* writing 19D1, and some people may expect it to be in
* that group. But it is bad, because it won't give
* the expected results. In Unicode 5.2 it was
* considered to be in that group (of 11, hence), but
* this was fixed in the next version */
if (UNLIKELY(value == 0x19DA && prevvalue >= 0x19D0)) {
goto warn_bad_digit_range;
}
else if (UNLIKELY( prevvalue >= 0x1D7CE
&& value <= 0x1D7FF))
{
/* This is the only other case currently in Unicode
* where the algorithm below fails. The code
* points just above are the end points of a single
* range containing only decimal digits. It is 5
* different series of 0-9. All other ranges of
* digits currently in Unicode are just a single
* series. (And mktables will notify us if a later
* Unicode version breaks this.)
*
* If the range being checked is at most 9 long,
* and the digit values represented are in
* numerical order, they are from the same series.
* */
if ( value - prevvalue > 9
|| ((( value - 0x1D7CE) % 10)
<= (prevvalue - 0x1D7CE) % 10))
{
goto warn_bad_digit_range;
}
}
else {
/* For all other ranges of digits in Unicode, the
* algorithm is just to check if both end points
* are in the same series, which is the same range.
* */
index_start = _invlist_search(
PL_XPosix_ptrs[_CC_DIGIT],
prevvalue);
/* Warn if the range starts and ends with a digit,
* and they are not in the same group of 10. */
if ( index_start >= 0
&& ELEMENT_RANGE_MATCHES_INVLIST(index_start)
&& (index_final =
_invlist_search(PL_XPosix_ptrs[_CC_DIGIT],
value)) != index_start
&& index_final >= 0
&& ELEMENT_RANGE_MATCHES_INVLIST(index_final))
{
warn_bad_digit_range:
vWARN(RExC_parse, "Ranges of digits should be"
" from the same group of"
" 10");
}
}
}
}
}
if ((! range || prevvalue == value) && non_portable_endpoint) {
if (isPRINT_A(value)) {
char literal[3];
unsigned d = 0;
if (isBACKSLASHED_PUNCT(value)) {
literal[d++] = '\\';
}
literal[d++] = (char) value;
literal[d++] = '\0';
vWARN4(RExC_parse,
"\"%.*s\" is more clearly written simply as \"%s\"",
(int) (RExC_parse - rangebegin),
rangebegin,
literal
);
}
else if isMNEMONIC_CNTRL(value) {
vWARN4(RExC_parse,
"\"%.*s\" is more clearly written simply as \"%s\"",
(int) (RExC_parse - rangebegin),
rangebegin,
cntrl_to_mnemonic((U8) value)
);
}
}
}
/* Deal with this element of the class */
#ifndef EBCDIC
cp_foldable_list = _add_range_to_invlist(cp_foldable_list,
prevvalue, value);
#else
/* On non-ASCII platforms, for ranges that span all of 0..255, and ones
* that don't require special handling, we can just add the range like
* we do for ASCII platforms */
if ((UNLIKELY(prevvalue == 0) && value >= 255)
|| ! (prevvalue < 256
&& (unicode_range
|| (! non_portable_endpoint
&& ((isLOWER_A(prevvalue) && isLOWER_A(value))
|| (isUPPER_A(prevvalue)
&& isUPPER_A(value)))))))
{
cp_foldable_list = _add_range_to_invlist(cp_foldable_list,
prevvalue, value);
}
else {
/* Here, requires special handling. This can be because it is a
* range whose code points are considered to be Unicode, and so
* must be individually translated into native, or because its a
* subrange of 'A-Z' or 'a-z' which each aren't contiguous in
* EBCDIC, but we have defined them to include only the "expected"
* upper or lower case ASCII alphabetics. Subranges above 255 are
* the same in native and Unicode, so can be added as a range */
U8 start = NATIVE_TO_LATIN1(prevvalue);
unsigned j;
U8 end = (value < 256) ? NATIVE_TO_LATIN1(value) : 255;
for (j = start; j <= end; j++) {
cp_foldable_list = add_cp_to_invlist(cp_foldable_list, LATIN1_TO_NATIVE(j));
}
if (value > 255) {
cp_foldable_list = _add_range_to_invlist(cp_foldable_list,
256, value);
}
}
#endif
range = 0; /* this range (if it was one) is done now */
} /* End of loop through all the text within the brackets */
if ( posix_warnings && av_tindex_skip_len_mg(posix_warnings) >= 0) {
output_posix_warnings(pRExC_state, posix_warnings);
}
/* If anything in the class expands to more than one character, we have to
* deal with them by building up a substitute parse string, and recursively
* calling reg() on it, instead of proceeding */
if (multi_char_matches) {
SV * substitute_parse = newSVpvn_flags("?:", 2, SVs_TEMP);
I32 cp_count;
STRLEN len;
char *save_end = RExC_end;
char *save_parse = RExC_parse;
char *save_start = RExC_start;
Size_t constructed_prefix_len = 0; /* This gives the length of the
constructed portion of the
substitute parse. */
bool first_time = TRUE; /* First multi-char occurrence doesn't get
a "|" */
I32 reg_flags;
assert(! invert);
/* Only one level of recursion allowed */
assert(RExC_copy_start_in_constructed == RExC_precomp);
#if 0 /* Have decided not to deal with multi-char folds in inverted classes,
because too confusing */
if (invert) {
sv_catpvs(substitute_parse, "(?:");
}
#endif
/* Look at the longest folds first */
for (cp_count = av_tindex_skip_len_mg(multi_char_matches);
cp_count > 0;
cp_count--)
{
if (av_exists(multi_char_matches, cp_count)) {
AV** this_array_ptr;
SV* this_sequence;
this_array_ptr = (AV**) av_fetch(multi_char_matches,
cp_count, FALSE);
while ((this_sequence = av_pop(*this_array_ptr)) !=
&PL_sv_undef)
{
if (! first_time) {
sv_catpvs(substitute_parse, "|");
}
first_time = FALSE;
sv_catpv(substitute_parse, SvPVX(this_sequence));
}
}
}
/* If the character class contains anything else besides these
* multi-character folds, have to include it in recursive parsing */
if (element_count) {
sv_catpvs(substitute_parse, "|[");
constructed_prefix_len = SvCUR(substitute_parse);
sv_catpvn(substitute_parse, orig_parse, RExC_parse - orig_parse);
/* Put in a closing ']' only if not going off the end, as otherwise
* we are adding something that really isn't there */
if (RExC_parse < RExC_end) {
sv_catpvs(substitute_parse, "]");
}
}
sv_catpvs(substitute_parse, ")");
#if 0
if (invert) {
/* This is a way to get the parse to skip forward a whole named
* sequence instead of matching the 2nd character when it fails the
* first */
sv_catpvs(substitute_parse, "(*THEN)(*SKIP)(*FAIL)|.)");
}
#endif
/* Set up the data structure so that any errors will be properly
* reported. See the comments at the definition of
* REPORT_LOCATION_ARGS for details */
RExC_copy_start_in_input = (char *) orig_parse;
RExC_start = RExC_parse = SvPV(substitute_parse, len);
RExC_copy_start_in_constructed = RExC_start + constructed_prefix_len;
RExC_end = RExC_parse + len;
RExC_in_multi_char_class = 1;
ret = reg(pRExC_state, 1, ®_flags, depth+1);
*flagp |= reg_flags & (HASWIDTH|SIMPLE|SPSTART|POSTPONED|RESTART_PARSE|NEED_UTF8);
/* And restore so can parse the rest of the pattern */
RExC_parse = save_parse;
RExC_start = RExC_copy_start_in_constructed = RExC_copy_start_in_input = save_start;
RExC_end = save_end;
RExC_in_multi_char_class = 0;
SvREFCNT_dec_NN(multi_char_matches);
return ret;
}
/* If folding, we calculate all characters that could fold to or from the
* ones already on the list */
if (cp_foldable_list) {
if (FOLD) {
UV start, end; /* End points of code point ranges */
SV* fold_intersection = NULL;
SV** use_list;
/* Our calculated list will be for Unicode rules. For locale
* matching, we have to keep a separate list that is consulted at
* runtime only when the locale indicates Unicode rules (and we
* don't include potential matches in the ASCII/Latin1 range, as
* any code point could fold to any other, based on the run-time
* locale). For non-locale, we just use the general list */
if (LOC) {
use_list = &only_utf8_locale_list;
}
else {
use_list = &cp_list;
}
/* Only the characters in this class that participate in folds need
* be checked. Get the intersection of this class and all the
* possible characters that are foldable. This can quickly narrow
* down a large class */
_invlist_intersection(PL_in_some_fold, cp_foldable_list,
&fold_intersection);
/* Now look at the foldable characters in this class individually */
invlist_iterinit(fold_intersection);
while (invlist_iternext(fold_intersection, &start, &end)) {
UV j;
UV folded;
/* Look at every character in the range */
for (j = start; j <= end; j++) {
U8 foldbuf[UTF8_MAXBYTES_CASE+1];
STRLEN foldlen;
unsigned int k;
Size_t folds_count;
unsigned int first_fold;
const unsigned int * remaining_folds;
if (j < 256) {
/* Under /l, we don't know what code points below 256
* fold to, except we do know the MICRO SIGN folds to
* an above-255 character if the locale is UTF-8, so we
* add it to the special list (in *use_list) Otherwise
* we know now what things can match, though some folds
* are valid under /d only if the target is UTF-8.
* Those go in a separate list */
if ( IS_IN_SOME_FOLD_L1(j)
&& ! (LOC && j != MICRO_SIGN))
{
/* ASCII is always matched; non-ASCII is matched
* only under Unicode rules (which could happen
* under /l if the locale is a UTF-8 one */
if (isASCII(j) || ! DEPENDS_SEMANTICS) {
*use_list = add_cp_to_invlist(*use_list,
PL_fold_latin1[j]);
}
else if (j != PL_fold_latin1[j]) {
upper_latin1_only_utf8_matches
= add_cp_to_invlist(
upper_latin1_only_utf8_matches,
PL_fold_latin1[j]);
}
}
if (HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(j)
&& (! isASCII(j) || ! ASCII_FOLD_RESTRICTED))
{
add_above_Latin1_folds(pRExC_state,
(U8) j,
use_list);
}
continue;
}
/* Here is an above Latin1 character. We don't have the
* rules hard-coded for it. First, get its fold. This is
* the simple fold, as the multi-character folds have been
* handled earlier and separated out */
folded = _to_uni_fold_flags(j, foldbuf, &foldlen,
(ASCII_FOLD_RESTRICTED)
? FOLD_FLAGS_NOMIX_ASCII
: 0);
/* Single character fold of above Latin1. Add everything
* in its fold closure to the list that this node should
* match. */
folds_count = _inverse_folds(folded, &first_fold,
&remaining_folds);
for (k = 0; k <= folds_count; k++) {
UV c = (k == 0) /* First time through use itself */
? folded
: (k == 1) /* 2nd time use, the first fold */
? first_fold
/* Then the remaining ones */
: remaining_folds[k-2];
/* /aa doesn't allow folds between ASCII and non- */
if (( ASCII_FOLD_RESTRICTED
&& (isASCII(c) != isASCII(j))))
{
continue;
}
/* Folds under /l which cross the 255/256 boundary are
* added to a separate list. (These are valid only
* when the locale is UTF-8.) */
if (c < 256 && LOC) {
*use_list = add_cp_to_invlist(*use_list, c);
continue;
}
if (isASCII(c) || c > 255 || AT_LEAST_UNI_SEMANTICS)
{
cp_list = add_cp_to_invlist(cp_list, c);
}
else {
/* Similarly folds involving non-ascii Latin1
* characters under /d are added to their list */
upper_latin1_only_utf8_matches
= add_cp_to_invlist(
upper_latin1_only_utf8_matches,
c);
}
}
}
}
SvREFCNT_dec_NN(fold_intersection);
}
/* Now that we have finished adding all the folds, there is no reason
* to keep the foldable list separate */
_invlist_union(cp_list, cp_foldable_list, &cp_list);
SvREFCNT_dec_NN(cp_foldable_list);
}
/* And combine the result (if any) with any inversion lists from posix
* classes. The lists are kept separate up to now because we don't want to
* fold the classes */
if (simple_posixes) { /* These are the classes known to be unaffected by
/a, /aa, and /d */
if (cp_list) {
_invlist_union(cp_list, simple_posixes, &cp_list);
SvREFCNT_dec_NN(simple_posixes);
}
else {
cp_list = simple_posixes;
}
}
if (posixes || nposixes) {
if (! DEPENDS_SEMANTICS) {
/* For everything but /d, we can just add the current 'posixes' and
* 'nposixes' to the main list */
if (posixes) {
if (cp_list) {
_invlist_union(cp_list, posixes, &cp_list);
SvREFCNT_dec_NN(posixes);
}
else {
cp_list = posixes;
}
}
if (nposixes) {
if (cp_list) {
_invlist_union(cp_list, nposixes, &cp_list);
SvREFCNT_dec_NN(nposixes);
}
else {
cp_list = nposixes;
}
}
}
else {
/* Under /d, things like \w match upper Latin1 characters only if
* the target string is in UTF-8. But things like \W match all the
* upper Latin1 characters if the target string is not in UTF-8.
*
* Handle the case with something like \W separately */
if (nposixes) {
SV* only_non_utf8_list = invlist_clone(PL_UpperLatin1, NULL);
/* A complemented posix class matches all upper Latin1
* characters if not in UTF-8. And it matches just certain
* ones when in UTF-8. That means those certain ones are
* matched regardless, so can just be added to the
* unconditional list */
if (cp_list) {
_invlist_union(cp_list, nposixes, &cp_list);
SvREFCNT_dec_NN(nposixes);
nposixes = NULL;
}
else {
cp_list = nposixes;
}
/* Likewise for 'posixes' */
_invlist_union(posixes, cp_list, &cp_list);
SvREFCNT_dec(posixes);
/* Likewise for anything else in the range that matched only
* under UTF-8 */
if (upper_latin1_only_utf8_matches) {
_invlist_union(cp_list,
upper_latin1_only_utf8_matches,
&cp_list);
SvREFCNT_dec_NN(upper_latin1_only_utf8_matches);
upper_latin1_only_utf8_matches = NULL;
}
/* If we don't match all the upper Latin1 characters regardless
* of UTF-8ness, we have to set a flag to match the rest when
* not in UTF-8 */
_invlist_subtract(only_non_utf8_list, cp_list,
&only_non_utf8_list);
if (_invlist_len(only_non_utf8_list) != 0) {
anyof_flags |= ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER;
}
SvREFCNT_dec_NN(only_non_utf8_list);
}
else {
/* Here there were no complemented posix classes. That means
* the upper Latin1 characters in 'posixes' match only when the
* target string is in UTF-8. So we have to add them to the
* list of those types of code points, while adding the
* remainder to the unconditional list.
*
* First calculate what they are */
SV* nonascii_but_latin1_properties = NULL;
_invlist_intersection(posixes, PL_UpperLatin1,
&nonascii_but_latin1_properties);
/* And add them to the final list of such characters. */
_invlist_union(upper_latin1_only_utf8_matches,
nonascii_but_latin1_properties,
&upper_latin1_only_utf8_matches);
/* Remove them from what now becomes the unconditional list */
_invlist_subtract(posixes, nonascii_but_latin1_properties,
&posixes);
/* And add those unconditional ones to the final list */
if (cp_list) {
_invlist_union(cp_list, posixes, &cp_list);
SvREFCNT_dec_NN(posixes);
posixes = NULL;
}
else {
cp_list = posixes;
}
SvREFCNT_dec(nonascii_but_latin1_properties);
/* Get rid of any characters from the conditional list that we
* now know are matched unconditionally, which may make that
* list empty */
_invlist_subtract(upper_latin1_only_utf8_matches,
cp_list,
&upper_latin1_only_utf8_matches);
if (_invlist_len(upper_latin1_only_utf8_matches) == 0) {
SvREFCNT_dec_NN(upper_latin1_only_utf8_matches);
upper_latin1_only_utf8_matches = NULL;
}
}
}
}
/* And combine the result (if any) with any inversion list from properties.
* The lists are kept separate up to now so that we can distinguish the two
* in regards to matching above-Unicode. A run-time warning is generated
* if a Unicode property is matched against a non-Unicode code point. But,
* we allow user-defined properties to match anything, without any warning,
* and we also suppress the warning if there is a portion of the character
* class that isn't a Unicode property, and which matches above Unicode, \W
* or [\x{110000}] for example.
* (Note that in this case, unlike the Posix one above, there is no
* <upper_latin1_only_utf8_matches>, because having a Unicode property
* forces Unicode semantics */
if (properties) {
if (cp_list) {
/* If it matters to the final outcome, see if a non-property
* component of the class matches above Unicode. If so, the
* warning gets suppressed. This is true even if just a single
* such code point is specified, as, though not strictly correct if
* another such code point is matched against, the fact that they
* are using above-Unicode code points indicates they should know
* the issues involved */
if (warn_super) {
warn_super = ! (invert
^ (invlist_highest(cp_list) > PERL_UNICODE_MAX));
}
_invlist_union(properties, cp_list, &cp_list);
SvREFCNT_dec_NN(properties);
}
else {
cp_list = properties;
}
if (warn_super) {
anyof_flags
|= ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER;
/* Because an ANYOF node is the only one that warns, this node
* can't be optimized into something else */
optimizable = FALSE;
}
}
/* Here, we have calculated what code points should be in the character
* class.
*
* Now we can see about various optimizations. Fold calculation (which we
* did above) needs to take place before inversion. Otherwise /[^k]/i
* would invert to include K, which under /i would match k, which it
* shouldn't. Therefore we can't invert folded locale now, as it won't be
* folded until runtime */
/* If we didn't do folding, it's because some information isn't available
* until runtime; set the run-time fold flag for these We know to set the
* flag if we have a non-NULL list for UTF-8 locales, or the class matches
* at least one 0-255 range code point */
if (LOC && FOLD) {
/* Some things on the list might be unconditionally included because of
* other components. Remove them, and clean up the list if it goes to
* 0 elements */
if (only_utf8_locale_list && cp_list) {
_invlist_subtract(only_utf8_locale_list, cp_list,
&only_utf8_locale_list);
if (_invlist_len(only_utf8_locale_list) == 0) {
SvREFCNT_dec_NN(only_utf8_locale_list);
only_utf8_locale_list = NULL;
}
}
if ( only_utf8_locale_list
|| (cp_list && ( _invlist_contains_cp(cp_list, LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE)
|| _invlist_contains_cp(cp_list, LATIN_SMALL_LETTER_DOTLESS_I))))
{
has_runtime_dependency |= HAS_L_RUNTIME_DEPENDENCY;
anyof_flags
|= ANYOFL_FOLD
| ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD;
}
else if (cp_list) { /* Look to see if a 0-255 code point is in list */
UV start, end;
invlist_iterinit(cp_list);
if (invlist_iternext(cp_list, &start, &end) && start < 256) {
anyof_flags |= ANYOFL_FOLD;
has_runtime_dependency |= HAS_L_RUNTIME_DEPENDENCY;
}
invlist_iterfinish(cp_list);
}
}
else if ( DEPENDS_SEMANTICS
&& ( upper_latin1_only_utf8_matches
|| (anyof_flags & ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER)))
{
RExC_seen_d_op = TRUE;
has_runtime_dependency |= HAS_D_RUNTIME_DEPENDENCY;
}
/* Optimize inverted patterns (e.g. [^a-z]) when everything is known at
* compile time. */
if ( cp_list
&& invert
&& ! has_runtime_dependency)
{
_invlist_invert(cp_list);
/* Clear the invert flag since have just done it here */
invert = FALSE;
}
if (ret_invlist) {
*ret_invlist = cp_list;
return RExC_emit;
}
/* All possible optimizations below still have these characteristics.
* (Multi-char folds aren't SIMPLE, but they don't get this far in this
* routine) */
*flagp |= HASWIDTH|SIMPLE;
if (anyof_flags & ANYOF_LOCALE_FLAGS) {
RExC_contains_locale = 1;
}
/* Some character classes are equivalent to other nodes. Such nodes take
* up less room, and some nodes require fewer operations to execute, than
* ANYOF nodes. EXACTish nodes may be joinable with adjacent nodes to
* improve efficiency. */
if (optimizable) {
PERL_UINT_FAST8_T i;
Size_t partial_cp_count = 0;
UV start[MAX_FOLD_FROMS+1] = { 0 }; /* +1 for the folded-to char */
UV end[MAX_FOLD_FROMS+1] = { 0 };
if (cp_list) { /* Count the code points in enough ranges that we would
see all the ones possible in any fold in this version
of Unicode */
invlist_iterinit(cp_list);
for (i = 0; i <= MAX_FOLD_FROMS; i++) {
if (! invlist_iternext(cp_list, &start[i], &end[i])) {
break;
}
partial_cp_count += end[i] - start[i] + 1;
}
invlist_iterfinish(cp_list);
}
/* If we know at compile time that this matches every possible code
* point, any run-time dependencies don't matter */
if (start[0] == 0 && end[0] == UV_MAX) {
if (invert) {
ret = reganode(pRExC_state, OPFAIL, 0);
}
else {
ret = reg_node(pRExC_state, SANY);
MARK_NAUGHTY(1);
}
goto not_anyof;
}
/* Similarly, for /l posix classes, if both a class and its
* complement match, any run-time dependencies don't matter */
if (posixl) {
for (namedclass = 0; namedclass < ANYOF_POSIXL_MAX;
namedclass += 2)
{
if ( POSIXL_TEST(posixl, namedclass) /* class */
&& POSIXL_TEST(posixl, namedclass + 1)) /* its complement */
{
if (invert) {
ret = reganode(pRExC_state, OPFAIL, 0);
}
else {
ret = reg_node(pRExC_state, SANY);
MARK_NAUGHTY(1);
}
goto not_anyof;
}
}
/* For well-behaved locales, some classes are subsets of others,
* so complementing the subset and including the non-complemented
* superset should match everything, like [\D[:alnum:]], and
* [[:^alpha:][:alnum:]], but some implementations of locales are
* buggy, and khw thinks its a bad idea to have optimization change
* behavior, even if it avoids an OS bug in a given case */
#define isSINGLE_BIT_SET(n) isPOWER_OF_2(n)
/* If is a single posix /l class, can optimize to just that op.
* Such a node will not match anything in the Latin1 range, as that
* is not determinable until runtime, but will match whatever the
* class does outside that range. (Note that some classes won't
* match anything outside the range, like [:ascii:]) */
if ( isSINGLE_BIT_SET(posixl)
&& (partial_cp_count == 0 || start[0] > 255))
{
U8 classnum;
SV * class_above_latin1 = NULL;
bool already_inverted;
bool are_equivalent;
/* Compute which bit is set, which is the same thing as, e.g.,
* ANYOF_CNTRL. From
* https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogDeBruijn
* */
static const int MultiplyDeBruijnBitPosition2[32] =
{
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
};
namedclass = MultiplyDeBruijnBitPosition2[(posixl
* 0x077CB531U) >> 27];
classnum = namedclass_to_classnum(namedclass);
/* The named classes are such that the inverted number is one
* larger than the non-inverted one */
already_inverted = namedclass
- classnum_to_namedclass(classnum);
/* Create an inversion list of the official property, inverted
* if the constructed node list is inverted, and restricted to
* only the above latin1 code points, which are the only ones
* known at compile time */
_invlist_intersection_maybe_complement_2nd(
PL_AboveLatin1,
PL_XPosix_ptrs[classnum],
already_inverted,
&class_above_latin1);
are_equivalent = _invlistEQ(class_above_latin1, cp_list,
FALSE);
SvREFCNT_dec_NN(class_above_latin1);
if (are_equivalent) {
/* Resolve the run-time inversion flag with this possibly
* inverted class */
invert = invert ^ already_inverted;
ret = reg_node(pRExC_state,
POSIXL + invert * (NPOSIXL - POSIXL));
FLAGS(REGNODE_p(ret)) = classnum;
goto not_anyof;
}
}
}
/* khw can't think of any other possible transformation involving
* these. */
if (has_runtime_dependency & HAS_USER_DEFINED_PROPERTY) {
goto is_anyof;
}
if (! has_runtime_dependency) {
/* If the list is empty, nothing matches. This happens, for
* example, when a Unicode property that doesn't match anything is
* the only element in the character class (perluniprops.pod notes
* such properties). */
if (partial_cp_count == 0) {
if (invert) {
ret = reg_node(pRExC_state, SANY);
}
else {
ret = reganode(pRExC_state, OPFAIL, 0);
}
goto not_anyof;
}
/* If matches everything but \n */
if ( start[0] == 0 && end[0] == '\n' - 1
&& start[1] == '\n' + 1 && end[1] == UV_MAX)
{
assert (! invert);
ret = reg_node(pRExC_state, REG_ANY);
MARK_NAUGHTY(1);
goto not_anyof;
}
}
/* Next see if can optimize classes that contain just a few code points
* into an EXACTish node. The reason to do this is to let the
* optimizer join this node with adjacent EXACTish ones.
*
* An EXACTFish node can be generated even if not under /i, and vice
* versa. But care must be taken. An EXACTFish node has to be such
* that it only matches precisely the code points in the class, but we
* want to generate the least restrictive one that does that, to
* increase the odds of being able to join with an adjacent node. For
* example, if the class contains [kK], we have to make it an EXACTFAA
* node to prevent the KELVIN SIGN from matching. Whether we are under
* /i or not is irrelevant in this case. Less obvious is the pattern
* qr/[\x{02BC}]n/i. U+02BC is MODIFIER LETTER APOSTROPHE. That is
* supposed to match the single character U+0149 LATIN SMALL LETTER N
* PRECEDED BY APOSTROPHE. And so even though there is no simple fold
* that includes \X{02BC}, there is a multi-char fold that does, and so
* the node generated for it must be an EXACTFish one. On the other
* hand qr/:/i should generate a plain EXACT node since the colon
* participates in no fold whatsoever, and having it EXACT tells the
* optimizer the target string cannot match unless it has a colon in
* it.
*
* We don't typically generate an EXACTish node if doing so would
* require changing the pattern to UTF-8, as that affects /d and
* otherwise is slower. However, under /i, not changing to UTF-8 can
* miss some potential multi-character folds. We calculate the
* EXACTish node, and then decide if something would be missed if we
* don't upgrade */
if ( ! posixl
&& ! invert
/* Only try if there are no more code points in the class than
* in the max possible fold */
&& partial_cp_count > 0 && partial_cp_count <= MAX_FOLD_FROMS + 1
&& (start[0] < 256 || UTF || FOLD))
{
if (partial_cp_count == 1 && ! upper_latin1_only_utf8_matches)
{
/* We can always make a single code point class into an
* EXACTish node. */
if (LOC) {
/* Here is /l: Use EXACTL, except /li indicates EXACTFL,
* as that means there is a fold not known until runtime so
* shows as only a single code point here. */
op = (FOLD) ? EXACTFL : EXACTL;
}
else if (! FOLD) { /* Not /l and not /i */
op = (start[0] < 256) ? EXACT : EXACT_ONLY8;
}
else if (start[0] < 256) { /* /i, not /l, and the code point is
small */
/* Under /i, it gets a little tricky. A code point that
* doesn't participate in a fold should be an EXACT node.
* We know this one isn't the result of a simple fold, or
* there'd be more than one code point in the list, but it
* could be part of a multi- character fold. In that case
* we better not create an EXACT node, as we would wrongly
* be telling the optimizer that this code point must be in
* the target string, and that is wrong. This is because
* if the sequence around this code point forms a
* multi-char fold, what needs to be in the string could be
* the code point that folds to the sequence.
*
* This handles the case of below-255 code points, as we
* have an easy look up for those. The next clause handles
* the above-256 one */
op = IS_IN_SOME_FOLD_L1(start[0])
? EXACTFU
: EXACT;
}
else { /* /i, larger code point. Since we are under /i, and
have just this code point, we know that it can't
fold to something else, so PL_InMultiCharFold
applies to it */
op = _invlist_contains_cp(PL_InMultiCharFold,
start[0])
? EXACTFU_ONLY8
: EXACT_ONLY8;
}
value = start[0];
}
else if ( ! (has_runtime_dependency & ~HAS_D_RUNTIME_DEPENDENCY)
&& _invlist_contains_cp(PL_in_some_fold, start[0]))
{
/* Here, the only runtime dependency, if any, is from /d, and
* the class matches more than one code point, and the lowest
* code point participates in some fold. It might be that the
* other code points are /i equivalent to this one, and hence
* they would representable by an EXACTFish node. Above, we
* eliminated classes that contain too many code points to be
* EXACTFish, with the test for MAX_FOLD_FROMS
*
* First, special case the ASCII fold pairs, like 'B' and 'b'.
* We do this because we have EXACTFAA at our disposal for the
* ASCII range */
if (partial_cp_count == 2 && isASCII(start[0])) {
/* The only ASCII characters that participate in folds are
* alphabetics */
assert(isALPHA(start[0]));
if ( end[0] == start[0] /* First range is a single
character, so 2nd exists */
&& isALPHA_FOLD_EQ(start[0], start[1]))
{
/* Here, is part of an ASCII fold pair */
if ( ASCII_FOLD_RESTRICTED
|| HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(start[0]))
{
/* If the second clause just above was true, it
* means we can't be under /i, or else the list
* would have included more than this fold pair.
* Therefore we have to exclude the possibility of
* whatever else it is that folds to these, by
* using EXACTFAA */
op = EXACTFAA;
}
else if (HAS_NONLATIN1_FOLD_CLOSURE(start[0])) {
/* Here, there's no simple fold that start[0] is part
* of, but there is a multi-character one. If we
* are not under /i, we want to exclude that
* possibility; if under /i, we want to include it
* */
op = (FOLD) ? EXACTFU : EXACTFAA;
}
else {
/* Here, the only possible fold start[0] particpates in
* is with start[1]. /i or not isn't relevant */
op = EXACTFU;
}
value = toFOLD(start[0]);
}
}
else if ( ! upper_latin1_only_utf8_matches
|| ( _invlist_len(upper_latin1_only_utf8_matches)
== 2
&& PL_fold_latin1[
invlist_highest(upper_latin1_only_utf8_matches)]
== start[0]))
{
/* Here, the smallest character is non-ascii or there are
* more than 2 code points matched by this node. Also, we
* either don't have /d UTF-8 dependent matches, or if we
* do, they look like they could be a single character that
* is the fold of the lowest one in the always-match list.
* This test quickly excludes most of the false positives
* when there are /d UTF-8 depdendent matches. These are
* like LATIN CAPITAL LETTER A WITH GRAVE matching LATIN
* SMALL LETTER A WITH GRAVE iff the target string is
* UTF-8. (We don't have to worry above about exceeding
* the array bounds of PL_fold_latin1[] because any code
* point in 'upper_latin1_only_utf8_matches' is below 256.)
*
* EXACTFAA would apply only to pairs (hence exactly 2 code
* points) in the ASCII range, so we can't use it here to
* artificially restrict the fold domain, so we check if
* the class does or does not match some EXACTFish node.
* Further, if we aren't under /i, and and the folded-to
* character is part of a multi-character fold, we can't do
* this optimization, as the sequence around it could be
* that multi-character fold, and we don't here know the
* context, so we have to assume it is that multi-char
* fold, to prevent potential bugs.
*
* To do the general case, we first find the fold of the
* lowest code point (which may be higher than the lowest
* one), then find everything that folds to it. (The data
* structure we have only maps from the folded code points,
* so we have to do the earlier step.) */
Size_t foldlen;
U8 foldbuf[UTF8_MAXBYTES_CASE];
UV folded = _to_uni_fold_flags(start[0],
foldbuf, &foldlen, 0);
unsigned int first_fold;
const unsigned int * remaining_folds;
Size_t folds_to_this_cp_count = _inverse_folds(
folded,
&first_fold,
&remaining_folds);
Size_t folds_count = folds_to_this_cp_count + 1;
SV * fold_list = _new_invlist(folds_count);
unsigned int i;
/* If there are UTF-8 dependent matches, create a temporary
* list of what this node matches, including them. */
SV * all_cp_list = NULL;
SV ** use_this_list = &cp_list;
if (upper_latin1_only_utf8_matches) {
all_cp_list = _new_invlist(0);
use_this_list = &all_cp_list;
_invlist_union(cp_list,
upper_latin1_only_utf8_matches,
use_this_list);
}
/* Having gotten everything that participates in the fold
* containing the lowest code point, we turn that into an
* inversion list, making sure everything is included. */
fold_list = add_cp_to_invlist(fold_list, start[0]);
fold_list = add_cp_to_invlist(fold_list, folded);
if (folds_to_this_cp_count > 0) {
fold_list = add_cp_to_invlist(fold_list, first_fold);
for (i = 0; i + 1 < folds_to_this_cp_count; i++) {
fold_list = add_cp_to_invlist(fold_list,
remaining_folds[i]);
}
}
/* If the fold list is identical to what's in this ANYOF
* node, the node can be represented by an EXACTFish one
* instead */
if (_invlistEQ(*use_this_list, fold_list,
0 /* Don't complement */ )
) {
/* But, we have to be careful, as mentioned above.
* Just the right sequence of characters could match
* this if it is part of a multi-character fold. That
* IS what we want if we are under /i. But it ISN'T
* what we want if not under /i, as it could match when
* it shouldn't. So, when we aren't under /i and this
* character participates in a multi-char fold, we
* don't optimize into an EXACTFish node. So, for each
* case below we have to check if we are folding
* and if not, if it is not part of a multi-char fold.
* */
if (start[0] > 255) { /* Highish code point */
if (FOLD || ! _invlist_contains_cp(
PL_InMultiCharFold, folded))
{
op = (LOC)
? EXACTFLU8
: (ASCII_FOLD_RESTRICTED)
? EXACTFAA
: EXACTFU_ONLY8;
value = folded;
}
} /* Below, the lowest code point < 256 */
else if ( FOLD
&& folded == 's'
&& DEPENDS_SEMANTICS)
{ /* An EXACTF node containing a single character
's', can be an EXACTFU if it doesn't get
joined with an adjacent 's' */
op = EXACTFU_S_EDGE;
value = folded;
}
else if ( FOLD
|| ! HAS_NONLATIN1_FOLD_CLOSURE(start[0]))
{
if (upper_latin1_only_utf8_matches) {
op = EXACTF;
/* We can't use the fold, as that only matches
* under UTF-8 */
value = start[0];
}
else if ( UNLIKELY(start[0] == MICRO_SIGN)
&& ! UTF)
{ /* EXACTFUP is a special node for this
character */
op = (ASCII_FOLD_RESTRICTED)
? EXACTFAA
: EXACTFUP;
value = MICRO_SIGN;
}
else if ( ASCII_FOLD_RESTRICTED
&& ! isASCII(start[0]))
{ /* For ASCII under /iaa, we can use EXACTFU
below */
op = EXACTFAA;
value = folded;
}
else {
op = EXACTFU;
value = folded;
}
}
}
SvREFCNT_dec_NN(fold_list);
SvREFCNT_dec(all_cp_list);
}
}
if (op != END) {
/* Here, we have calculated what EXACTish node we would use.
* But we don't use it if it would require converting the
* pattern to UTF-8, unless not using it could cause us to miss
* some folds (hence be buggy) */
if (! UTF && value > 255) {
SV * in_multis = NULL;
assert(FOLD);
/* If there is no code point that is part of a multi-char
* fold, then there aren't any matches, so we don't do this
* optimization. Otherwise, it could match depending on
* the context around us, so we do upgrade */
_invlist_intersection(PL_InMultiCharFold, cp_list, &in_multis);
if (UNLIKELY(_invlist_len(in_multis) != 0)) {
REQUIRE_UTF8(flagp);
}
else {
op = END;
}
}
if (op != END) {
U8 len = (UTF) ? UVCHR_SKIP(value) : 1;
ret = regnode_guts(pRExC_state, op, len, "exact");
FILL_NODE(ret, op);
RExC_emit += 1 + STR_SZ(len);
STR_LEN(REGNODE_p(ret)) = len;
if (len == 1) {
*STRING(REGNODE_p(ret)) = (U8) value;
}
else {
uvchr_to_utf8((U8 *) STRING(REGNODE_p(ret)), value);
}
goto not_anyof;
}
}
}
if (! has_runtime_dependency) {
/* See if this can be turned into an ANYOFM node. Think about the
* bit patterns in two different bytes. In some positions, the
* bits in each will be 1; and in other positions both will be 0;
* and in some positions the bit will be 1 in one byte, and 0 in
* the other. Let 'n' be the number of positions where the bits
* differ. We create a mask which has exactly 'n' 0 bits, each in
* a position where the two bytes differ. Now take the set of all
* bytes that when ANDed with the mask yield the same result. That
* set has 2**n elements, and is representable by just two 8 bit
* numbers: the result and the mask. Importantly, matching the set
* can be vectorized by creating a word full of the result bytes,
* and a word full of the mask bytes, yielding a significant speed
* up. Here, see if this node matches such a set. As a concrete
* example consider [01], and the byte representing '0' which is
* 0x30 on ASCII machines. It has the bits 0011 0000. Take the
* mask 1111 1110. If we AND 0x31 and 0x30 with that mask we get
* 0x30. Any other bytes ANDed yield something else. So [01],
* which is a common usage, is optimizable into ANYOFM, and can
* benefit from the speed up. We can only do this on UTF-8
* invariant bytes, because they have the same bit patterns under
* UTF-8 as not. */
PERL_UINT_FAST8_T inverted = 0;
#ifdef EBCDIC
const PERL_UINT_FAST8_T max_permissible = 0xFF;
#else
const PERL_UINT_FAST8_T max_permissible = 0x7F;
#endif
/* If doesn't fit the criteria for ANYOFM, invert and try again.
* If that works we will instead later generate an NANYOFM, and
* invert back when through */
if (invlist_highest(cp_list) > max_permissible) {
_invlist_invert(cp_list);
inverted = 1;
}
if (invlist_highest(cp_list) <= max_permissible) {
UV this_start, this_end;
UV lowest_cp = UV_MAX; /* inited to suppress compiler warn */
U8 bits_differing = 0;
Size_t full_cp_count = 0;
bool first_time = TRUE;
/* Go through the bytes and find the bit positions that differ
* */
invlist_iterinit(cp_list);
while (invlist_iternext(cp_list, &this_start, &this_end)) {
unsigned int i = this_start;
if (first_time) {
if (! UVCHR_IS_INVARIANT(i)) {
goto done_anyofm;
}
first_time = FALSE;
lowest_cp = this_start;
/* We have set up the code point to compare with.
* Don't compare it with itself */
i++;
}
/* Find the bit positions that differ from the lowest code
* point in the node. Keep track of all such positions by
* OR'ing */
for (; i <= this_end; i++) {
if (! UVCHR_IS_INVARIANT(i)) {
goto done_anyofm;
}
bits_differing |= i ^ lowest_cp;
}
full_cp_count += this_end - this_start + 1;
}
invlist_iterfinish(cp_list);
/* At the end of the loop, we count how many bits differ from
* the bits in lowest code point, call the count 'd'. If the
* set we found contains 2**d elements, it is the closure of
* all code points that differ only in those bit positions. To
* convince yourself of that, first note that the number in the
* closure must be a power of 2, which we test for. The only
* way we could have that count and it be some differing set,
* is if we got some code points that don't differ from the
* lowest code point in any position, but do differ from each
* other in some other position. That means one code point has
* a 1 in that position, and another has a 0. But that would
* mean that one of them differs from the lowest code point in
* that position, which possibility we've already excluded. */
if ( (inverted || full_cp_count > 1)
&& full_cp_count == 1U << PL_bitcount[bits_differing])
{
U8 ANYOFM_mask;
op = ANYOFM + inverted;;
/* We need to make the bits that differ be 0's */
ANYOFM_mask = ~ bits_differing; /* This goes into FLAGS */
/* The argument is the lowest code point */
ret = reganode(pRExC_state, op, lowest_cp);
FLAGS(REGNODE_p(ret)) = ANYOFM_mask;
}
}
done_anyofm:
if (inverted) {
_invlist_invert(cp_list);
}
if (op != END) {
goto not_anyof;
}
}
if (! (anyof_flags & ANYOF_LOCALE_FLAGS)) {
PERL_UINT_FAST8_T type;
SV * intersection = NULL;
SV* d_invlist = NULL;
/* See if this matches any of the POSIX classes. The POSIXA and
* POSIXD ones are about the same speed as ANYOF ops, but take less
* room; the ones that have above-Latin1 code point matches are
* somewhat faster than ANYOF. */
for (type = POSIXA; type >= POSIXD; type--) {
int posix_class;
if (type == POSIXL) { /* But not /l posix classes */
continue;
}
for (posix_class = 0;
posix_class <= _HIGHEST_REGCOMP_DOT_H_SYNC;
posix_class++)
{
SV** our_code_points = &cp_list;
SV** official_code_points;
int try_inverted;
if (type == POSIXA) {
official_code_points = &PL_Posix_ptrs[posix_class];
}
else {
official_code_points = &PL_XPosix_ptrs[posix_class];
}
/* Skip non-existent classes of this type. e.g. \v only
* has an entry in PL_XPosix_ptrs */
if (! *official_code_points) {
continue;
}
/* Try both the regular class, and its inversion */
for (try_inverted = 0; try_inverted < 2; try_inverted++) {
bool this_inverted = invert ^ try_inverted;
if (type != POSIXD) {
/* This class that isn't /d can't match if we have
* /d dependencies */
if (has_runtime_dependency
& HAS_D_RUNTIME_DEPENDENCY)
{
continue;
}
}
else /* is /d */ if (! this_inverted) {
/* /d classes don't match anything non-ASCII below
* 256 unconditionally (which cp_list contains) */
_invlist_intersection(cp_list, PL_UpperLatin1,
&intersection);
if (_invlist_len(intersection) != 0) {
continue;
}
SvREFCNT_dec(d_invlist);
d_invlist = invlist_clone(cp_list, NULL);
/* But under UTF-8 it turns into using /u rules.
* Add the things it matches under these conditions
* so that we check below that these are identical
* to what the tested class should match */
if (upper_latin1_only_utf8_matches) {
_invlist_union(
d_invlist,
upper_latin1_only_utf8_matches,
&d_invlist);
}
our_code_points = &d_invlist;
}
else { /* POSIXD, inverted. If this doesn't have this
flag set, it isn't /d. */
if (! (anyof_flags & ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER))
{
continue;
}
our_code_points = &cp_list;
}
/* Here, have weeded out some things. We want to see
* if the list of characters this node contains
* ('*our_code_points') precisely matches those of the
* class we are currently checking against
* ('*official_code_points'). */
if (_invlistEQ(*our_code_points,
*official_code_points,
try_inverted))
{
/* Here, they precisely match. Optimize this ANYOF
* node into its equivalent POSIX one of the
* correct type, possibly inverted */
ret = reg_node(pRExC_state, (try_inverted)
? type + NPOSIXA
- POSIXA
: type);
FLAGS(REGNODE_p(ret)) = posix_class;
SvREFCNT_dec(d_invlist);
SvREFCNT_dec(intersection);
goto not_anyof;
}
}
}
}
SvREFCNT_dec(d_invlist);
SvREFCNT_dec(intersection);
}
/* If didn't find an optimization and there is no need for a
* bitmap, optimize to indicate that */
if ( start[0] >= NUM_ANYOF_CODE_POINTS
&& ! LOC
&& ! upper_latin1_only_utf8_matches
&& anyof_flags == 0)
{
UV highest_cp = invlist_highest(cp_list);
/* If the lowest and highest code point in the class have the same
* UTF-8 first byte, then all do, and we can store that byte for
* regexec.c to use so that it can more quickly scan the target
* string for potential matches for this class. We co-opt the the
* flags field for this. Zero means, they don't have the same
* first byte. We do accept here very large code points (for
* future use), but don't bother with this optimization for them,
* as it would cause other complications */
if (highest_cp > IV_MAX) {
anyof_flags = 0;
}
else {
U8 low_utf8[UTF8_MAXBYTES+1];
U8 high_utf8[UTF8_MAXBYTES+1];
(void) uvchr_to_utf8(low_utf8, start[0]);
(void) uvchr_to_utf8(high_utf8, invlist_highest(cp_list));
anyof_flags = (low_utf8[0] == high_utf8[0])
? low_utf8[0]
: 0;
}
op = ANYOFH;
}
} /* End of seeing if can optimize it into a different node */
is_anyof: /* It's going to be an ANYOF node. */
if (op != ANYOFH) {
op = (has_runtime_dependency & HAS_D_RUNTIME_DEPENDENCY)
? ANYOFD
: ((posixl)
? ANYOFPOSIXL
: ((LOC)
? ANYOFL
: ANYOF));
}
ret = regnode_guts(pRExC_state, op, regarglen[op], "anyof");
FILL_NODE(ret, op); /* We set the argument later */
RExC_emit += 1 + regarglen[op];
ANYOF_FLAGS(REGNODE_p(ret)) = anyof_flags;
/* Here, <cp_list> contains all the code points we can determine at
* compile time that match under all conditions. Go through it, and
* for things that belong in the bitmap, put them there, and delete from
* <cp_list>. While we are at it, see if everything above 255 is in the
* list, and if so, set a flag to speed up execution */
populate_ANYOF_from_invlist(REGNODE_p(ret), &cp_list);
if (posixl) {
ANYOF_POSIXL_SET_TO_BITMAP(REGNODE_p(ret), posixl);
}
if (invert) {
ANYOF_FLAGS(REGNODE_p(ret)) |= ANYOF_INVERT;
}
/* Here, the bitmap has been populated with all the Latin1 code points that
* always match. Can now add to the overall list those that match only
* when the target string is UTF-8 (<upper_latin1_only_utf8_matches>).
* */
if (upper_latin1_only_utf8_matches) {
if (cp_list) {
_invlist_union(cp_list,
upper_latin1_only_utf8_matches,
&cp_list);
SvREFCNT_dec_NN(upper_latin1_only_utf8_matches);
}
else {
cp_list = upper_latin1_only_utf8_matches;
}
ANYOF_FLAGS(REGNODE_p(ret)) |= ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP;
}
set_ANYOF_arg(pRExC_state, REGNODE_p(ret), cp_list,
(HAS_NONLOCALE_RUNTIME_PROPERTY_DEFINITION)
? listsv : NULL,
only_utf8_locale_list);
return ret;
not_anyof:
/* Here, the node is getting optimized into something that's not an ANYOF
* one. Finish up. */
Set_Node_Offset_Length(REGNODE_p(ret), orig_parse - RExC_start,
RExC_parse - orig_parse);;
SvREFCNT_dec(cp_list);;
return ret;
}
#undef HAS_NONLOCALE_RUNTIME_PROPERTY_DEFINITION
STATIC void
S_set_ANYOF_arg(pTHX_ RExC_state_t* const pRExC_state,
regnode* const node,
SV* const cp_list,
SV* const runtime_defns,
SV* const only_utf8_locale_list)
{
/* Sets the arg field of an ANYOF-type node 'node', using information about
* the node passed-in. If there is nothing outside the node's bitmap, the
* arg is set to ANYOF_ONLY_HAS_BITMAP. Otherwise, it sets the argument to
* the count returned by add_data(), having allocated and stored an array,
* av, as follows:
*
* av[0] stores the inversion list defining this class as far as known at
* this time, or PL_sv_undef if nothing definite is now known.
* av[1] stores the inversion list of code points that match only if the
* current locale is UTF-8, or if none, PL_sv_undef if there is an
* av[2], or no entry otherwise.
* av[2] stores the list of user-defined properties whose subroutine
* definitions aren't known at this time, or no entry if none. */
UV n;
PERL_ARGS_ASSERT_SET_ANYOF_ARG;
if (! cp_list && ! runtime_defns && ! only_utf8_locale_list) {
assert(! (ANYOF_FLAGS(node)
& ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP));
ARG_SET(node, ANYOF_ONLY_HAS_BITMAP);
}
else {
AV * const av = newAV();
SV *rv;
if (cp_list) {
av_store(av, INVLIST_INDEX, cp_list);
}
if (only_utf8_locale_list) {
av_store(av, ONLY_LOCALE_MATCHES_INDEX, only_utf8_locale_list);
}
if (runtime_defns) {
av_store(av, DEFERRED_USER_DEFINED_INDEX, SvREFCNT_inc(runtime_defns));
}
rv = newRV_noinc(MUTABLE_SV(av));
n = add_data(pRExC_state, STR_WITH_LEN("s"));
RExC_rxi->data->data[n] = (void*)rv;
ARG_SET(node, n);
}
}
#if !defined(PERL_IN_XSUB_RE) || defined(PLUGGABLE_RE_EXTENSION)
SV *
Perl__get_regclass_nonbitmap_data(pTHX_ const regexp *prog,
const regnode* node,
bool doinit,
SV** listsvp,
SV** only_utf8_locale_ptr,
SV** output_invlist)
{
/* For internal core use only.
* Returns the inversion list for the input 'node' in the regex 'prog'.
* If <doinit> is 'true', will attempt to create the inversion list if not
* already done.
* If <listsvp> is non-null, will return the printable contents of the
* property definition. This can be used to get debugging information
* even before the inversion list exists, by calling this function with
* 'doinit' set to false, in which case the components that will be used
* to eventually create the inversion list are returned (in a printable
* form).
* If <only_utf8_locale_ptr> is not NULL, it is where this routine is to
* store an inversion list of code points that should match only if the
* execution-time locale is a UTF-8 one.
* If <output_invlist> is not NULL, it is where this routine is to store an
* inversion list of the code points that would be instead returned in
* <listsvp> if this were NULL. Thus, what gets output in <listsvp>
* when this parameter is used, is just the non-code point data that
* will go into creating the inversion list. This currently should be just
* user-defined properties whose definitions were not known at compile
* time. Using this parameter allows for easier manipulation of the
* inversion list's data by the caller. It is illegal to call this
* function with this parameter set, but not <listsvp>
*
* Tied intimately to how S_set_ANYOF_arg sets up the data structure. Note
* that, in spite of this function's name, the inversion list it returns
* may include the bitmap data as well */
SV *si = NULL; /* Input initialization string */
SV* invlist = NULL;
RXi_GET_DECL(prog, progi);
const struct reg_data * const data = prog ? progi->data : NULL;
PERL_ARGS_ASSERT__GET_REGCLASS_NONBITMAP_DATA;
assert(! output_invlist || listsvp);
if (data && data->count) {
const U32 n = ARG(node);
if (data->what[n] == 's') {
SV * const rv = MUTABLE_SV(data->data[n]);
AV * const av = MUTABLE_AV(SvRV(rv));
SV **const ary = AvARRAY(av);
invlist = ary[INVLIST_INDEX];
if (av_tindex_skip_len_mg(av) >= ONLY_LOCALE_MATCHES_INDEX) {
*only_utf8_locale_ptr = ary[ONLY_LOCALE_MATCHES_INDEX];
}
if (av_tindex_skip_len_mg(av) >= DEFERRED_USER_DEFINED_INDEX) {
si = ary[DEFERRED_USER_DEFINED_INDEX];
}
if (doinit && (si || invlist)) {
if (si) {
bool user_defined;
SV * msg = newSVpvs_flags("", SVs_TEMP);
SV * prop_definition = handle_user_defined_property(
"", 0, FALSE, /* There is no \p{}, \P{} */
SvPVX_const(si)[1] - '0', /* /i or not has been
stored here for just
this occasion */
TRUE, /* run time */
FALSE, /* This call must find the defn */
si, /* The property definition */
&user_defined,
msg,
0 /* base level call */
);
if (SvCUR(msg)) {
assert(prop_definition == NULL);
Perl_croak(aTHX_ "%" UTF8f,
UTF8fARG(SvUTF8(msg), SvCUR(msg), SvPVX(msg)));
}
if (invlist) {
_invlist_union(invlist, prop_definition, &invlist);
SvREFCNT_dec_NN(prop_definition);
}
else {
invlist = prop_definition;
}
STATIC_ASSERT_STMT(ONLY_LOCALE_MATCHES_INDEX == 1 + INVLIST_INDEX);
STATIC_ASSERT_STMT(DEFERRED_USER_DEFINED_INDEX == 1 + ONLY_LOCALE_MATCHES_INDEX);
av_store(av, INVLIST_INDEX, invlist);
av_fill(av, (ary[ONLY_LOCALE_MATCHES_INDEX])
? ONLY_LOCALE_MATCHES_INDEX:
INVLIST_INDEX);
si = NULL;
}
}
}
}
/* If requested, return a printable version of what this ANYOF node matches
* */
if (listsvp) {
SV* matches_string = NULL;
/* This function can be called at compile-time, before everything gets
* resolved, in which case we return the currently best available
* information, which is the string that will eventually be used to do
* that resolving, 'si' */
if (si) {
/* Here, we only have 'si' (and possibly some passed-in data in
* 'invlist', which is handled below) If the caller only wants
* 'si', use that. */
if (! output_invlist) {
matches_string = newSVsv(si);
}
else {
/* But if the caller wants an inversion list of the node, we
* need to parse 'si' and place as much as possible in the
* desired output inversion list, making 'matches_string' only
* contain the currently unresolvable things */
const char *si_string = SvPVX(si);
STRLEN remaining = SvCUR(si);
UV prev_cp = 0;
U8 count = 0;
/* Ignore everything before the first new-line */
while (*si_string != '\n' && remaining > 0) {
si_string++;
remaining--;
}
assert(remaining > 0);
si_string++;
remaining--;
while (remaining > 0) {
/* The data consists of just strings defining user-defined
* property names, but in prior incarnations, and perhaps
* somehow from pluggable regex engines, it could still
* hold hex code point definitions. Each component of a
* range would be separated by a tab, and each range by a
* new-line. If these are found, instead add them to the
* inversion list */
I32 grok_flags = PERL_SCAN_SILENT_ILLDIGIT
|PERL_SCAN_SILENT_NON_PORTABLE;
STRLEN len = remaining;
UV cp = grok_hex(si_string, &len, &grok_flags, NULL);
/* If the hex decode routine found something, it should go
* up to the next \n */
if ( *(si_string + len) == '\n') {
if (count) { /* 2nd code point on line */
*output_invlist = _add_range_to_invlist(*output_invlist, prev_cp, cp);
}
else {
*output_invlist = add_cp_to_invlist(*output_invlist, cp);
}
count = 0;
goto prepare_for_next_iteration;
}
/* If the hex decode was instead for the lower range limit,
* save it, and go parse the upper range limit */
if (*(si_string + len) == '\t') {
assert(count == 0);
prev_cp = cp;
count = 1;
prepare_for_next_iteration:
si_string += len + 1;
remaining -= len + 1;
continue;
}
/* Here, didn't find a legal hex number. Just add it from
* here to the next \n */
remaining -= len;
while (*(si_string + len) != '\n' && remaining > 0) {
remaining--;
len++;
}
if (*(si_string + len) == '\n') {
len++;
remaining--;
}
if (matches_string) {
sv_catpvn(matches_string, si_string, len - 1);
}
else {
matches_string = newSVpvn(si_string, len - 1);
}
si_string += len;
sv_catpvs(matches_string, " ");
} /* end of loop through the text */
assert(matches_string);
if (SvCUR(matches_string)) { /* Get rid of trailing blank */
SvCUR_set(matches_string, SvCUR(matches_string) - 1);
}
} /* end of has an 'si' */
}
/* Add the stuff that's already known */
if (invlist) {
/* Again, if the caller doesn't want the output inversion list, put
* everything in 'matches-string' */
if (! output_invlist) {
if ( ! matches_string) {
matches_string = newSVpvs("\n");
}
sv_catsv(matches_string, invlist_contents(invlist,
TRUE /* traditional style */
));
}
else if (! *output_invlist) {
*output_invlist = invlist_clone(invlist, NULL);
}
else {
_invlist_union(*output_invlist, invlist, output_invlist);
}
}
*listsvp = matches_string;
}
return invlist;
}
#endif /* !defined(PERL_IN_XSUB_RE) || defined(PLUGGABLE_RE_EXTENSION) */
/* reg_skipcomment()
Absorbs an /x style # comment from the input stream,
returning a pointer to the first character beyond the comment, or if the
comment terminates the pattern without anything following it, this returns
one past the final character of the pattern (in other words, RExC_end) and
sets the REG_RUN_ON_COMMENT_SEEN flag.
Note it's the callers responsibility to ensure that we are
actually in /x mode
*/
PERL_STATIC_INLINE char*
S_reg_skipcomment(RExC_state_t *pRExC_state, char* p)
{
PERL_ARGS_ASSERT_REG_SKIPCOMMENT;
assert(*p == '#');
while (p < RExC_end) {
if (*(++p) == '\n') {
return p+1;
}
}
/* we ran off the end of the pattern without ending the comment, so we have
* to add an \n when wrapping */
RExC_seen |= REG_RUN_ON_COMMENT_SEEN;
return p;
}
STATIC void
S_skip_to_be_ignored_text(pTHX_ RExC_state_t *pRExC_state,
char ** p,
const bool force_to_xmod
)
{
/* If the text at the current parse position '*p' is a '(?#...)' comment,
* or if we are under /x or 'force_to_xmod' is TRUE, and the text at '*p'
* is /x whitespace, advance '*p' so that on exit it points to the first
* byte past all such white space and comments */
const bool use_xmod = force_to_xmod || (RExC_flags & RXf_PMf_EXTENDED);
PERL_ARGS_ASSERT_SKIP_TO_BE_IGNORED_TEXT;
assert( ! UTF || UTF8_IS_INVARIANT(**p) || UTF8_IS_START(**p));
for (;;) {
if (RExC_end - (*p) >= 3
&& *(*p) == '('
&& *(*p + 1) == '?'
&& *(*p + 2) == '#')
{
while (*(*p) != ')') {
if ((*p) == RExC_end)
FAIL("Sequence (?#... not terminated");
(*p)++;
}
(*p)++;
continue;
}
if (use_xmod) {
const char * save_p = *p;
while ((*p) < RExC_end) {
STRLEN len;
if ((len = is_PATWS_safe((*p), RExC_end, UTF))) {
(*p) += len;
}
else if (*(*p) == '#') {
(*p) = reg_skipcomment(pRExC_state, (*p));
}
else {
break;
}
}
if (*p != save_p) {
continue;
}
}
break;
}
return;
}
/* nextchar()
Advances the parse position by one byte, unless that byte is the beginning
of a '(?#...)' style comment, or is /x whitespace and /x is in effect. In
those two cases, the parse position is advanced beyond all such comments and
white space.
This is the UTF, (?#...), and /x friendly way of saying RExC_parse++.
*/
STATIC void
S_nextchar(pTHX_ RExC_state_t *pRExC_state)
{
PERL_ARGS_ASSERT_NEXTCHAR;
if (RExC_parse < RExC_end) {
assert( ! UTF
|| UTF8_IS_INVARIANT(*RExC_parse)
|| UTF8_IS_START(*RExC_parse));
RExC_parse += (UTF)
? UTF8_SAFE_SKIP(RExC_parse, RExC_end)
: 1;
skip_to_be_ignored_text(pRExC_state, &RExC_parse,
FALSE /* Don't force /x */ );
}
}
STATIC void
S_change_engine_size(pTHX_ RExC_state_t *pRExC_state, const Ptrdiff_t size)
{
/* 'size' is the delta to add or subtract from the current memory allocated
* to the regex engine being constructed */
PERL_ARGS_ASSERT_CHANGE_ENGINE_SIZE;
RExC_size += size;
Renewc(RExC_rxi,
sizeof(regexp_internal) + (RExC_size + 1) * sizeof(regnode),
/* +1 for REG_MAGIC */
char,
regexp_internal);
if ( RExC_rxi == NULL )
FAIL("Regexp out of space");
RXi_SET(RExC_rx, RExC_rxi);
RExC_emit_start = RExC_rxi->program;
if (size > 0) {
Zero(REGNODE_p(RExC_emit), size, regnode);
}
#ifdef RE_TRACK_PATTERN_OFFSETS
Renew(RExC_offsets, 2*RExC_size+1, U32);
if (size > 0) {
Zero(RExC_offsets + 2*(RExC_size - size) + 1, 2 * size, U32);
}
RExC_offsets[0] = RExC_size;
#endif
}
STATIC regnode_offset
S_regnode_guts(pTHX_ RExC_state_t *pRExC_state, const U8 op, const STRLEN extra_size, const char* const name)
{
/* Allocate a regnode for 'op', with 'extra_size' extra space. It aligns
* and increments RExC_size and RExC_emit
*
* It returns the regnode's offset into the regex engine program */
const regnode_offset ret = RExC_emit;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGNODE_GUTS;
SIZE_ALIGN(RExC_size);
change_engine_size(pRExC_state, (Ptrdiff_t) 1 + extra_size);
NODE_ALIGN_FILL(REGNODE_p(ret));
#ifndef RE_TRACK_PATTERN_OFFSETS
PERL_UNUSED_ARG(name);
PERL_UNUSED_ARG(op);
#else
assert(extra_size >= regarglen[op] || PL_regkind[op] == ANYOF);
if (RExC_offsets) { /* MJD */
MJD_OFFSET_DEBUG(
("%s:%d: (op %s) %s %" UVuf " (len %" UVuf ") (max %" UVuf ").\n",
name, __LINE__,
PL_reg_name[op],
(UV)(RExC_emit) > RExC_offsets[0]
? "Overwriting end of array!\n" : "OK",
(UV)(RExC_emit),
(UV)(RExC_parse - RExC_start),
(UV)RExC_offsets[0]));
Set_Node_Offset(REGNODE_p(RExC_emit), RExC_parse + (op == END));
}
#endif
return(ret);
}
/*
- reg_node - emit a node
*/
STATIC regnode_offset /* Location. */
S_reg_node(pTHX_ RExC_state_t *pRExC_state, U8 op)
{
const regnode_offset ret = regnode_guts(pRExC_state, op, regarglen[op], "reg_node");
regnode_offset ptr = ret;
PERL_ARGS_ASSERT_REG_NODE;
assert(regarglen[op] == 0);
FILL_ADVANCE_NODE(ptr, op);
RExC_emit = ptr;
return(ret);
}
/*
- reganode - emit a node with an argument
*/
STATIC regnode_offset /* Location. */
S_reganode(pTHX_ RExC_state_t *pRExC_state, U8 op, U32 arg)
{
const regnode_offset ret = regnode_guts(pRExC_state, op, regarglen[op], "reganode");
regnode_offset ptr = ret;
PERL_ARGS_ASSERT_REGANODE;
/* ANYOF are special cased to allow non-length 1 args */
assert(regarglen[op] == 1);
FILL_ADVANCE_NODE_ARG(ptr, op, arg);
RExC_emit = ptr;
return(ret);
}
STATIC regnode_offset
S_reg2Lanode(pTHX_ RExC_state_t *pRExC_state, const U8 op, const U32 arg1, const I32 arg2)
{
/* emit a node with U32 and I32 arguments */
const regnode_offset ret = regnode_guts(pRExC_state, op, regarglen[op], "reg2Lanode");
regnode_offset ptr = ret;
PERL_ARGS_ASSERT_REG2LANODE;
assert(regarglen[op] == 2);
FILL_ADVANCE_NODE_2L_ARG(ptr, op, arg1, arg2);
RExC_emit = ptr;
return(ret);
}
/*
- reginsert - insert an operator in front of already-emitted operand
*
* That means that on exit 'operand' is the offset of the newly inserted
* operator, and the original operand has been relocated.
*
* IMPORTANT NOTE - it is the *callers* responsibility to correctly
* set up NEXT_OFF() of the inserted node if needed. Something like this:
*
* reginsert(pRExC, OPFAIL, orig_emit, depth+1);
* NEXT_OFF(orig_emit) = regarglen[OPFAIL] + NODE_STEP_REGNODE;
*
* ALSO NOTE - FLAGS(newly-inserted-operator) will be set to 0 as well.
*/
STATIC void
S_reginsert(pTHX_ RExC_state_t *pRExC_state, const U8 op,
const regnode_offset operand, const U32 depth)
{
regnode *src;
regnode *dst;
regnode *place;
const int offset = regarglen[(U8)op];
const int size = NODE_STEP_REGNODE + offset;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGINSERT;
PERL_UNUSED_CONTEXT;
PERL_UNUSED_ARG(depth);
/* (PL_regkind[(U8)op] == CURLY ? EXTRA_STEP_2ARGS : 0); */
DEBUG_PARSE_FMT("inst"," - %s", PL_reg_name[op]);
assert(!RExC_study_started); /* I believe we should never use reginsert once we have started
studying. If this is wrong then we need to adjust RExC_recurse
below like we do with RExC_open_parens/RExC_close_parens. */
change_engine_size(pRExC_state, (Ptrdiff_t) size);
src = REGNODE_p(RExC_emit);
RExC_emit += size;
dst = REGNODE_p(RExC_emit);
/* If we are in a "count the parentheses" pass, the numbers are unreliable,
* and [perl #133871] shows this can lead to problems, so skip this
* realignment of parens until a later pass when they are reliable */
if (! IN_PARENS_PASS && RExC_open_parens) {
int paren;
/*DEBUG_PARSE_FMT("inst"," - %" IVdf, (IV)RExC_npar);*/
/* remember that RExC_npar is rex->nparens + 1,
* iow it is 1 more than the number of parens seen in
* the pattern so far. */
for ( paren=0 ; paren < RExC_npar ; paren++ ) {
/* note, RExC_open_parens[0] is the start of the
* regex, it can't move. RExC_close_parens[0] is the end
* of the regex, it *can* move. */
if ( paren && RExC_open_parens[paren] >= operand ) {
/*DEBUG_PARSE_FMT("open"," - %d", size);*/
RExC_open_parens[paren] += size;
} else {
/*DEBUG_PARSE_FMT("open"," - %s","ok");*/
}
if ( RExC_close_parens[paren] >= operand ) {
/*DEBUG_PARSE_FMT("close"," - %d", size);*/
RExC_close_parens[paren] += size;
} else {
/*DEBUG_PARSE_FMT("close"," - %s","ok");*/
}
}
}
if (RExC_end_op)
RExC_end_op += size;
while (src > REGNODE_p(operand)) {
StructCopy(--src, --dst, regnode);
#ifdef RE_TRACK_PATTERN_OFFSETS
if (RExC_offsets) { /* MJD 20010112 */
MJD_OFFSET_DEBUG(
("%s(%d): (op %s) %s copy %" UVuf " -> %" UVuf " (max %" UVuf ").\n",
"reginsert",
__LINE__,
PL_reg_name[op],
(UV)(REGNODE_OFFSET(dst)) > RExC_offsets[0]
? "Overwriting end of array!\n" : "OK",
(UV)REGNODE_OFFSET(src),
(UV)REGNODE_OFFSET(dst),
(UV)RExC_offsets[0]));
Set_Node_Offset_To_R(REGNODE_OFFSET(dst), Node_Offset(src));
Set_Node_Length_To_R(REGNODE_OFFSET(dst), Node_Length(src));
}
#endif
}
place = REGNODE_p(operand); /* Op node, where operand used to be. */
#ifdef RE_TRACK_PATTERN_OFFSETS
if (RExC_offsets) { /* MJD */
MJD_OFFSET_DEBUG(
("%s(%d): (op %s) %s %" UVuf " <- %" UVuf " (max %" UVuf ").\n",
"reginsert",
__LINE__,
PL_reg_name[op],
(UV)REGNODE_OFFSET(place) > RExC_offsets[0]
? "Overwriting end of array!\n" : "OK",
(UV)REGNODE_OFFSET(place),
(UV)(RExC_parse - RExC_start),
(UV)RExC_offsets[0]));
Set_Node_Offset(place, RExC_parse);
Set_Node_Length(place, 1);
}
#endif
src = NEXTOPER(place);
FLAGS(place) = 0;
FILL_NODE(operand, op);
/* Zero out any arguments in the new node */
Zero(src, offset, regnode);
}
/*
- regtail - set the next-pointer at the end of a node chain of p to val. If
that value won't fit in the space available, instead returns FALSE.
(Except asserts if we can't fit in the largest space the regex
engine is designed for.)
- SEE ALSO: regtail_study
*/
STATIC bool
S_regtail(pTHX_ RExC_state_t * pRExC_state,
const regnode_offset p,
const regnode_offset val,
const U32 depth)
{
regnode_offset scan;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGTAIL;
#ifndef DEBUGGING
PERL_UNUSED_ARG(depth);
#endif
/* Find last node. */
scan = (regnode_offset) p;
for (;;) {
regnode * const temp = regnext(REGNODE_p(scan));
DEBUG_PARSE_r({
DEBUG_PARSE_MSG((scan==p ? "tail" : ""));
regprop(RExC_rx, RExC_mysv, REGNODE_p(scan), NULL, pRExC_state);
Perl_re_printf( aTHX_ "~ %s (%d) %s %s\n",
SvPV_nolen_const(RExC_mysv), scan,
(temp == NULL ? "->" : ""),
(temp == NULL ? PL_reg_name[OP(REGNODE_p(val))] : "")
);
});
if (temp == NULL)
break;
scan = REGNODE_OFFSET(temp);
}
if (reg_off_by_arg[OP(REGNODE_p(scan))]) {
assert((UV) (val - scan) <= U32_MAX);
ARG_SET(REGNODE_p(scan), val - scan);
}
else {
if (val - scan > U16_MAX) {
/* Populate this with something that won't loop and will likely
* lead to a crash if the caller ignores the failure return, and
* execution continues */
NEXT_OFF(REGNODE_p(scan)) = U16_MAX;
return FALSE;
}
NEXT_OFF(REGNODE_p(scan)) = val - scan;
}
return TRUE;
}
#ifdef DEBUGGING
/*
- regtail_study - set the next-pointer at the end of a node chain of p to val.
- Look for optimizable sequences at the same time.
- currently only looks for EXACT chains.
This is experimental code. The idea is to use this routine to perform
in place optimizations on branches and groups as they are constructed,
with the long term intention of removing optimization from study_chunk so
that it is purely analytical.
Currently only used when in DEBUG mode. The macro REGTAIL_STUDY() is used
to control which is which.
This used to return a value that was ignored. It was a problem that it is
#ifdef'd to be another function that didn't return a value. khw has changed it
so both currently return a pass/fail return.
*/
/* TODO: All four parms should be const */
STATIC bool
S_regtail_study(pTHX_ RExC_state_t *pRExC_state, regnode_offset p,
const regnode_offset val, U32 depth)
{
regnode_offset scan;
U8 exact = PSEUDO;
#ifdef EXPERIMENTAL_INPLACESCAN
I32 min = 0;
#endif
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGTAIL_STUDY;
/* Find last node. */
scan = p;
for (;;) {
regnode * const temp = regnext(REGNODE_p(scan));
#ifdef EXPERIMENTAL_INPLACESCAN
if (PL_regkind[OP(REGNODE_p(scan))] == EXACT) {
bool unfolded_multi_char; /* Unexamined in this routine */
if (join_exact(pRExC_state, scan, &min,
&unfolded_multi_char, 1, REGNODE_p(val), depth+1))
return TRUE; /* Was return EXACT */
}
#endif
if ( exact ) {
switch (OP(REGNODE_p(scan))) {
case EXACT:
case EXACT_ONLY8:
case EXACTL:
case EXACTF:
case EXACTFU_S_EDGE:
case EXACTFAA_NO_TRIE:
case EXACTFAA:
case EXACTFU:
case EXACTFU_ONLY8:
case EXACTFLU8:
case EXACTFUP:
case EXACTFL:
if( exact == PSEUDO )
exact= OP(REGNODE_p(scan));
else if ( exact != OP(REGNODE_p(scan)) )
exact= 0;
case NOTHING:
break;
default:
exact= 0;
}
}
DEBUG_PARSE_r({
DEBUG_PARSE_MSG((scan==p ? "tsdy" : ""));
regprop(RExC_rx, RExC_mysv, REGNODE_p(scan), NULL, pRExC_state);
Perl_re_printf( aTHX_ "~ %s (%d) -> %s\n",
SvPV_nolen_const(RExC_mysv),
scan,
PL_reg_name[exact]);
});
if (temp == NULL)
break;
scan = REGNODE_OFFSET(temp);
}
DEBUG_PARSE_r({
DEBUG_PARSE_MSG("");
regprop(RExC_rx, RExC_mysv, REGNODE_p(val), NULL, pRExC_state);
Perl_re_printf( aTHX_
"~ attach to %s (%" IVdf ") offset to %" IVdf "\n",
SvPV_nolen_const(RExC_mysv),
(IV)val,
(IV)(val - scan)
);
});
if (reg_off_by_arg[OP(REGNODE_p(scan))]) {
assert((UV) (val - scan) <= U32_MAX);
ARG_SET(REGNODE_p(scan), val - scan);
}
else {
if (val - scan > U16_MAX) {
/* Populate this with something that won't loop and will likely
* lead to a crash if the caller ignores the failure return, and
* execution continues */
NEXT_OFF(REGNODE_p(scan)) = U16_MAX;
return FALSE;
}
NEXT_OFF(REGNODE_p(scan)) = val - scan;
}
return TRUE; /* Was 'return exact' */
}
#endif
STATIC SV*
S_get_ANYOFM_contents(pTHX_ const regnode * n) {
/* Returns an inversion list of all the code points matched by the
* ANYOFM/NANYOFM node 'n' */
SV * cp_list = _new_invlist(-1);
const U8 lowest = (U8) ARG(n);
unsigned int i;
U8 count = 0;
U8 needed = 1U << PL_bitcount[ (U8) ~ FLAGS(n)];
PERL_ARGS_ASSERT_GET_ANYOFM_CONTENTS;
/* Starting with the lowest code point, any code point that ANDed with the
* mask yields the lowest code point is in the set */
for (i = lowest; i <= 0xFF; i++) {
if ((i & FLAGS(n)) == ARG(n)) {
cp_list = add_cp_to_invlist(cp_list, i);
count++;
/* We know how many code points (a power of two) that are in the
* set. No use looking once we've got that number */
if (count >= needed) break;
}
}
if (OP(n) == NANYOFM) {
_invlist_invert(cp_list);
}
return cp_list;
}
/*
- regdump - dump a regexp onto Perl_debug_log in vaguely comprehensible form
*/
#ifdef DEBUGGING
static void
S_regdump_intflags(pTHX_ const char *lead, const U32 flags)
{
int bit;
int set=0;
ASSUME(REG_INTFLAGS_NAME_SIZE <= sizeof(flags)*8);
for (bit=0; bit<REG_INTFLAGS_NAME_SIZE; bit++) {
if (flags & (1<<bit)) {
if (!set++ && lead)
Perl_re_printf( aTHX_ "%s", lead);
Perl_re_printf( aTHX_ "%s ", PL_reg_intflags_name[bit]);
}
}
if (lead) {
if (set)
Perl_re_printf( aTHX_ "\n");
else
Perl_re_printf( aTHX_ "%s[none-set]\n", lead);
}
}
static void
S_regdump_extflags(pTHX_ const char *lead, const U32 flags)
{
int bit;
int set=0;
regex_charset cs;
ASSUME(REG_EXTFLAGS_NAME_SIZE <= sizeof(flags)*8);
for (bit=0; bit<REG_EXTFLAGS_NAME_SIZE; bit++) {
if (flags & (1<<bit)) {
if ((1<<bit) & RXf_PMf_CHARSET) { /* Output separately, below */
continue;
}
if (!set++ && lead)
Perl_re_printf( aTHX_ "%s", lead);
Perl_re_printf( aTHX_ "%s ", PL_reg_extflags_name[bit]);
}
}
if ((cs = get_regex_charset(flags)) != REGEX_DEPENDS_CHARSET) {
if (!set++ && lead) {
Perl_re_printf( aTHX_ "%s", lead);
}
switch (cs) {
case REGEX_UNICODE_CHARSET:
Perl_re_printf( aTHX_ "UNICODE");
break;
case REGEX_LOCALE_CHARSET:
Perl_re_printf( aTHX_ "LOCALE");
break;
case REGEX_ASCII_RESTRICTED_CHARSET:
Perl_re_printf( aTHX_ "ASCII-RESTRICTED");
break;
case REGEX_ASCII_MORE_RESTRICTED_CHARSET:
Perl_re_printf( aTHX_ "ASCII-MORE_RESTRICTED");
break;
default:
Perl_re_printf( aTHX_ "UNKNOWN CHARACTER SET");
break;
}
}
if (lead) {
if (set)
Perl_re_printf( aTHX_ "\n");
else
Perl_re_printf( aTHX_ "%s[none-set]\n", lead);
}
}
#endif
void
Perl_regdump(pTHX_ const regexp *r)
{
#ifdef DEBUGGING
int i;
SV * const sv = sv_newmortal();
SV *dsv= sv_newmortal();
RXi_GET_DECL(r, ri);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGDUMP;
(void)dumpuntil(r, ri->program, ri->program + 1, NULL, NULL, sv, 0, 0);
/* Header fields of interest. */
for (i = 0; i < 2; i++) {
if (r->substrs->data[i].substr) {
RE_PV_QUOTED_DECL(s, 0, dsv,
SvPVX_const(r->substrs->data[i].substr),
RE_SV_DUMPLEN(r->substrs->data[i].substr),
PL_dump_re_max_len);
Perl_re_printf( aTHX_
"%s %s%s at %" IVdf "..%" UVuf " ",
i ? "floating" : "anchored",
s,
RE_SV_TAIL(r->substrs->data[i].substr),
(IV)r->substrs->data[i].min_offset,
(UV)r->substrs->data[i].max_offset);
}
else if (r->substrs->data[i].utf8_substr) {
RE_PV_QUOTED_DECL(s, 1, dsv,
SvPVX_const(r->substrs->data[i].utf8_substr),
RE_SV_DUMPLEN(r->substrs->data[i].utf8_substr),
30);
Perl_re_printf( aTHX_
"%s utf8 %s%s at %" IVdf "..%" UVuf " ",
i ? "floating" : "anchored",
s,
RE_SV_TAIL(r->substrs->data[i].utf8_substr),
(IV)r->substrs->data[i].min_offset,
(UV)r->substrs->data[i].max_offset);
}
}
if (r->check_substr || r->check_utf8)
Perl_re_printf( aTHX_
(const char *)
( r->check_substr == r->substrs->data[1].substr
&& r->check_utf8 == r->substrs->data[1].utf8_substr
? "(checking floating" : "(checking anchored"));
if (r->intflags & PREGf_NOSCAN)
Perl_re_printf( aTHX_ " noscan");
if (r->extflags & RXf_CHECK_ALL)
Perl_re_printf( aTHX_ " isall");
if (r->check_substr || r->check_utf8)
Perl_re_printf( aTHX_ ") ");
if (ri->regstclass) {
regprop(r, sv, ri->regstclass, NULL, NULL);
Perl_re_printf( aTHX_ "stclass %s ", SvPVX_const(sv));
}
if (r->intflags & PREGf_ANCH) {
Perl_re_printf( aTHX_ "anchored");
if (r->intflags & PREGf_ANCH_MBOL)
Perl_re_printf( aTHX_ "(MBOL)");
if (r->intflags & PREGf_ANCH_SBOL)
Perl_re_printf( aTHX_ "(SBOL)");
if (r->intflags & PREGf_ANCH_GPOS)
Perl_re_printf( aTHX_ "(GPOS)");
Perl_re_printf( aTHX_ " ");
}
if (r->intflags & PREGf_GPOS_SEEN)
Perl_re_printf( aTHX_ "GPOS:%" UVuf " ", (UV)r->gofs);
if (r->intflags & PREGf_SKIP)
Perl_re_printf( aTHX_ "plus ");
if (r->intflags & PREGf_IMPLICIT)
Perl_re_printf( aTHX_ "implicit ");
Perl_re_printf( aTHX_ "minlen %" IVdf " ", (IV)r->minlen);
if (r->extflags & RXf_EVAL_SEEN)
Perl_re_printf( aTHX_ "with eval ");
Perl_re_printf( aTHX_ "\n");
DEBUG_FLAGS_r({
regdump_extflags("r->extflags: ", r->extflags);
regdump_intflags("r->intflags: ", r->intflags);
});
#else
PERL_ARGS_ASSERT_REGDUMP;
PERL_UNUSED_CONTEXT;
PERL_UNUSED_ARG(r);
#endif /* DEBUGGING */
}
/* Should be synchronized with ANYOF_ #defines in regcomp.h */
#ifdef DEBUGGING
# if _CC_WORDCHAR != 0 || _CC_DIGIT != 1 || _CC_ALPHA != 2 \
|| _CC_LOWER != 3 || _CC_UPPER != 4 || _CC_PUNCT != 5 \
|| _CC_PRINT != 6 || _CC_ALPHANUMERIC != 7 || _CC_GRAPH != 8 \
|| _CC_CASED != 9 || _CC_SPACE != 10 || _CC_BLANK != 11 \
|| _CC_XDIGIT != 12 || _CC_CNTRL != 13 || _CC_ASCII != 14 \
|| _CC_VERTSPACE != 15
# error Need to adjust order of anyofs[]
# endif
static const char * const anyofs[] = {
"\\w",
"\\W",
"\\d",
"\\D",
"[:alpha:]",
"[:^alpha:]",
"[:lower:]",
"[:^lower:]",
"[:upper:]",
"[:^upper:]",
"[:punct:]",
"[:^punct:]",
"[:print:]",
"[:^print:]",
"[:alnum:]",
"[:^alnum:]",
"[:graph:]",
"[:^graph:]",
"[:cased:]",
"[:^cased:]",
"\\s",
"\\S",
"[:blank:]",
"[:^blank:]",
"[:xdigit:]",
"[:^xdigit:]",
"[:cntrl:]",
"[:^cntrl:]",
"[:ascii:]",
"[:^ascii:]",
"\\v",
"\\V"
};
#endif
/*
- regprop - printable representation of opcode, with run time support
*/
void
Perl_regprop(pTHX_ const regexp *prog, SV *sv, const regnode *o, const regmatch_info *reginfo, const RExC_state_t *pRExC_state)
{
#ifdef DEBUGGING
dVAR;
int k;
RXi_GET_DECL(prog, progi);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGPROP;
SvPVCLEAR(sv);
if (OP(o) > REGNODE_MAX) /* regnode.type is unsigned */
/* It would be nice to FAIL() here, but this may be called from
regexec.c, and it would be hard to supply pRExC_state. */
Perl_croak(aTHX_ "Corrupted regexp opcode %d > %d",
(int)OP(o), (int)REGNODE_MAX);
sv_catpv(sv, PL_reg_name[OP(o)]); /* Take off const! */
k = PL_regkind[OP(o)];
if (k == EXACT) {
sv_catpvs(sv, " ");
/* Using is_utf8_string() (via PERL_PV_UNI_DETECT)
* is a crude hack but it may be the best for now since
* we have no flag "this EXACTish node was UTF-8"
* --jhi */
pv_pretty(sv, STRING(o), STR_LEN(o), PL_dump_re_max_len,
PL_colors[0], PL_colors[1],
PERL_PV_ESCAPE_UNI_DETECT |
PERL_PV_ESCAPE_NONASCII |
PERL_PV_PRETTY_ELLIPSES |
PERL_PV_PRETTY_LTGT |
PERL_PV_PRETTY_NOCLEAR
);
} else if (k == TRIE) {
/* print the details of the trie in dumpuntil instead, as
* progi->data isn't available here */
const char op = OP(o);
const U32 n = ARG(o);
const reg_ac_data * const ac = IS_TRIE_AC(op) ?
(reg_ac_data *)progi->data->data[n] :
NULL;
const reg_trie_data * const trie
= (reg_trie_data*)progi->data->data[!IS_TRIE_AC(op) ? n : ac->trie];
Perl_sv_catpvf(aTHX_ sv, "-%s", PL_reg_name[o->flags]);
DEBUG_TRIE_COMPILE_r({
if (trie->jump)
sv_catpvs(sv, "(JUMP)");
Perl_sv_catpvf(aTHX_ sv,
"<S:%" UVuf "/%" IVdf " W:%" UVuf " L:%" UVuf "/%" UVuf " C:%" UVuf "/%" UVuf ">",
(UV)trie->startstate,
(IV)trie->statecount-1, /* -1 because of the unused 0 element */
(UV)trie->wordcount,
(UV)trie->minlen,
(UV)trie->maxlen,
(UV)TRIE_CHARCOUNT(trie),
(UV)trie->uniquecharcount
);
});
if ( IS_ANYOF_TRIE(op) || trie->bitmap ) {
sv_catpvs(sv, "[");
(void) put_charclass_bitmap_innards(sv,
((IS_ANYOF_TRIE(op))
? ANYOF_BITMAP(o)
: TRIE_BITMAP(trie)),
NULL,
NULL,
NULL,
FALSE
);
sv_catpvs(sv, "]");
}
} else if (k == CURLY) {
U32 lo = ARG1(o), hi = ARG2(o);
if (OP(o) == CURLYM || OP(o) == CURLYN || OP(o) == CURLYX)
Perl_sv_catpvf(aTHX_ sv, "[%d]", o->flags); /* Parenth number */
Perl_sv_catpvf(aTHX_ sv, "{%u,", (unsigned) lo);
if (hi == REG_INFTY)
sv_catpvs(sv, "INFTY");
else
Perl_sv_catpvf(aTHX_ sv, "%u", (unsigned) hi);
sv_catpvs(sv, "}");
}
else if (k == WHILEM && o->flags) /* Ordinal/of */
Perl_sv_catpvf(aTHX_ sv, "[%d/%d]", o->flags & 0xf, o->flags>>4);
else if (k == REF || k == OPEN || k == CLOSE
|| k == GROUPP || OP(o)==ACCEPT)
{
AV *name_list= NULL;
U32 parno= OP(o) == ACCEPT ? (U32)ARG2L(o) : ARG(o);
Perl_sv_catpvf(aTHX_ sv, "%" UVuf, (UV)parno); /* Parenth number */
if ( RXp_PAREN_NAMES(prog) ) {
name_list= MUTABLE_AV(progi->data->data[progi->name_list_idx]);
} else if ( pRExC_state ) {
name_list= RExC_paren_name_list;
}
if (name_list) {
if ( k != REF || (OP(o) < NREF)) {
SV **name= av_fetch(name_list, parno, 0 );
if (name)
Perl_sv_catpvf(aTHX_ sv, " '%" SVf "'", SVfARG(*name));
}
else {
SV *sv_dat= MUTABLE_SV(progi->data->data[ parno ]);
I32 *nums=(I32*)SvPVX(sv_dat);
SV **name= av_fetch(name_list, nums[0], 0 );
I32 n;
if (name) {
for ( n=0; n<SvIVX(sv_dat); n++ ) {
Perl_sv_catpvf(aTHX_ sv, "%s%" IVdf,
(n ? "," : ""), (IV)nums[n]);
}
Perl_sv_catpvf(aTHX_ sv, " '%" SVf "'", SVfARG(*name));
}
}
}
if ( k == REF && reginfo) {
U32 n = ARG(o); /* which paren pair */
I32 ln = prog->offs[n].start;
if (prog->lastparen < n || ln == -1 || prog->offs[n].end == -1)
Perl_sv_catpvf(aTHX_ sv, ": FAIL");
else if (ln == prog->offs[n].end)
Perl_sv_catpvf(aTHX_ sv, ": ACCEPT - EMPTY STRING");
else {
const char *s = reginfo->strbeg + ln;
Perl_sv_catpvf(aTHX_ sv, ": ");
Perl_pv_pretty( aTHX_ sv, s, prog->offs[n].end - prog->offs[n].start, 32, 0, 0,
PERL_PV_ESCAPE_UNI_DETECT|PERL_PV_PRETTY_NOCLEAR|PERL_PV_PRETTY_ELLIPSES|PERL_PV_PRETTY_QUOTE );
}
}
} else if (k == GOSUB) {
AV *name_list= NULL;
if ( RXp_PAREN_NAMES(prog) ) {
name_list= MUTABLE_AV(progi->data->data[progi->name_list_idx]);
} else if ( pRExC_state ) {
name_list= RExC_paren_name_list;
}
/* Paren and offset */
Perl_sv_catpvf(aTHX_ sv, "%d[%+d:%d]", (int)ARG(o),(int)ARG2L(o),
(int)((o + (int)ARG2L(o)) - progi->program) );
if (name_list) {
SV **name= av_fetch(name_list, ARG(o), 0 );
if (name)
Perl_sv_catpvf(aTHX_ sv, " '%" SVf "'", SVfARG(*name));
}
}
else if (k == LOGICAL)
/* 2: embedded, otherwise 1 */
Perl_sv_catpvf(aTHX_ sv, "[%d]", o->flags);
else if (k == ANYOF) {
const U8 flags = (OP(o) == ANYOFH) ? 0 : ANYOF_FLAGS(o);
bool do_sep = FALSE; /* Do we need to separate various components of
the output? */
/* Set if there is still an unresolved user-defined property */
SV *unresolved = NULL;
/* Things that are ignored except when the runtime locale is UTF-8 */
SV *only_utf8_locale_invlist = NULL;
/* Code points that don't fit in the bitmap */
SV *nonbitmap_invlist = NULL;
/* And things that aren't in the bitmap, but are small enough to be */
SV* bitmap_range_not_in_bitmap = NULL;
const bool inverted = flags & ANYOF_INVERT;
if (OP(o) == ANYOFL || OP(o) == ANYOFPOSIXL) {
if (ANYOFL_UTF8_LOCALE_REQD(flags)) {
sv_catpvs(sv, "{utf8-locale-reqd}");
}
if (flags & ANYOFL_FOLD) {
sv_catpvs(sv, "{i}");
}
}
/* If there is stuff outside the bitmap, get it */
if (ARG(o) != ANYOF_ONLY_HAS_BITMAP) {
(void) _get_regclass_nonbitmap_data(prog, o, FALSE,
&unresolved,
&only_utf8_locale_invlist,
&nonbitmap_invlist);
/* The non-bitmap data may contain stuff that could fit in the
* bitmap. This could come from a user-defined property being
* finally resolved when this call was done; or much more likely
* because there are matches that require UTF-8 to be valid, and so
* aren't in the bitmap. This is teased apart later */
_invlist_intersection(nonbitmap_invlist,
PL_InBitmap,
&bitmap_range_not_in_bitmap);
/* Leave just the things that don't fit into the bitmap */
_invlist_subtract(nonbitmap_invlist,
PL_InBitmap,
&nonbitmap_invlist);
}
/* Obey this flag to add all above-the-bitmap code points */
if (flags & ANYOF_MATCHES_ALL_ABOVE_BITMAP) {
nonbitmap_invlist = _add_range_to_invlist(nonbitmap_invlist,
NUM_ANYOF_CODE_POINTS,
UV_MAX);
}
/* Ready to start outputting. First, the initial left bracket */
Perl_sv_catpvf(aTHX_ sv, "[%s", PL_colors[0]);
if (OP(o) != ANYOFH) {
/* Then all the things that could fit in the bitmap */
do_sep = put_charclass_bitmap_innards(sv,
ANYOF_BITMAP(o),
bitmap_range_not_in_bitmap,
only_utf8_locale_invlist,
o,
/* Can't try inverting for a
* better display if there
* are things that haven't
* been resolved */
unresolved != NULL);
SvREFCNT_dec(bitmap_range_not_in_bitmap);
/* If there are user-defined properties which haven't been defined
* yet, output them. If the result is not to be inverted, it is
* clearest to output them in a separate [] from the bitmap range
* stuff. If the result is to be complemented, we have to show
* everything in one [], as the inversion applies to the whole
* thing. Use {braces} to separate them from anything in the
* bitmap and anything above the bitmap. */
if (unresolved) {
if (inverted) {
if (! do_sep) { /* If didn't output anything in the bitmap
*/
sv_catpvs(sv, "^");
}
sv_catpvs(sv, "{");
}
else if (do_sep) {
Perl_sv_catpvf(aTHX_ sv,"%s][%s", PL_colors[1],
PL_colors[0]);
}
sv_catsv(sv, unresolved);
if (inverted) {
sv_catpvs(sv, "}");
}
do_sep = ! inverted;
}
}
/* And, finally, add the above-the-bitmap stuff */
if (nonbitmap_invlist && _invlist_len(nonbitmap_invlist)) {
SV* contents;
/* See if truncation size is overridden */
const STRLEN dump_len = (PL_dump_re_max_len > 256)
? PL_dump_re_max_len
: 256;
/* This is output in a separate [] */
if (do_sep) {
Perl_sv_catpvf(aTHX_ sv,"%s][%s", PL_colors[1], PL_colors[0]);
}
/* And, for easy of understanding, it is shown in the
* uncomplemented form if possible. The one exception being if
* there are unresolved items, where the inversion has to be
* delayed until runtime */
if (inverted && ! unresolved) {
_invlist_invert(nonbitmap_invlist);
_invlist_subtract(nonbitmap_invlist, PL_InBitmap, &nonbitmap_invlist);
}
contents = invlist_contents(nonbitmap_invlist,
FALSE /* output suitable for catsv */
);
/* If the output is shorter than the permissible maximum, just do it. */
if (SvCUR(contents) <= dump_len) {
sv_catsv(sv, contents);
}
else {
const char * contents_string = SvPVX(contents);
STRLEN i = dump_len;
/* Otherwise, start at the permissible max and work back to the
* first break possibility */
while (i > 0 && contents_string[i] != ' ') {
i--;
}
if (i == 0) { /* Fail-safe. Use the max if we couldn't
find a legal break */
i = dump_len;
}
sv_catpvn(sv, contents_string, i);
sv_catpvs(sv, "...");
}
SvREFCNT_dec_NN(contents);
SvREFCNT_dec_NN(nonbitmap_invlist);
}
/* And finally the matching, closing ']' */
Perl_sv_catpvf(aTHX_ sv, "%s]", PL_colors[1]);
if (OP(o) == ANYOFH && FLAGS(o) != 0) {
Perl_sv_catpvf(aTHX_ sv, " (First UTF-8 byte=\\x%02x)", FLAGS(o));
}
SvREFCNT_dec(unresolved);
}
else if (k == ANYOFM) {
SV * cp_list = get_ANYOFM_contents(o);
Perl_sv_catpvf(aTHX_ sv, "[%s", PL_colors[0]);
if (OP(o) == NANYOFM) {
_invlist_invert(cp_list);
}
put_charclass_bitmap_innards(sv, NULL, cp_list, NULL, NULL, TRUE);
Perl_sv_catpvf(aTHX_ sv, "%s]", PL_colors[1]);
SvREFCNT_dec(cp_list);
}
else if (k == POSIXD || k == NPOSIXD) {
U8 index = FLAGS(o) * 2;
if (index < C_ARRAY_LENGTH(anyofs)) {
if (*anyofs[index] != '[') {
sv_catpvs(sv, "[");
}
sv_catpv(sv, anyofs[index]);
if (*anyofs[index] != '[') {
sv_catpvs(sv, "]");
}
}
else {
Perl_sv_catpvf(aTHX_ sv, "[illegal type=%d])", index);
}
}
else if (k == BOUND || k == NBOUND) {
/* Must be synced with order of 'bound_type' in regcomp.h */
const char * const bounds[] = {
"", /* Traditional */
"{gcb}",
"{lb}",
"{sb}",
"{wb}"
};
assert(FLAGS(o) < C_ARRAY_LENGTH(bounds));
sv_catpv(sv, bounds[FLAGS(o)]);
}
else if (k == BRANCHJ && (OP(o) == UNLESSM || OP(o) == IFMATCH)) {
Perl_sv_catpvf(aTHX_ sv, "[%d", -(o->flags));
if (o->next_off) {
Perl_sv_catpvf(aTHX_ sv, "..-%d", o->flags - o->next_off);
}
Perl_sv_catpvf(aTHX_ sv, "]");
}
else if (OP(o) == SBOL)
Perl_sv_catpvf(aTHX_ sv, " /%s/", o->flags ? "\\A" : "^");
/* add on the verb argument if there is one */
if ( ( k == VERB || OP(o) == ACCEPT || OP(o) == OPFAIL ) && o->flags) {
if ( ARG(o) )
Perl_sv_catpvf(aTHX_ sv, ":%" SVf,
SVfARG((MUTABLE_SV(progi->data->data[ ARG( o ) ]))));
else
sv_catpvs(sv, ":NULL");
}
#else
PERL_UNUSED_CONTEXT;
PERL_UNUSED_ARG(sv);
PERL_UNUSED_ARG(o);
PERL_UNUSED_ARG(prog);
PERL_UNUSED_ARG(reginfo);
PERL_UNUSED_ARG(pRExC_state);
#endif /* DEBUGGING */
}
SV *
Perl_re_intuit_string(pTHX_ REGEXP * const r)
{ /* Assume that RE_INTUIT is set */
struct regexp *const prog = ReANY(r);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_RE_INTUIT_STRING;
PERL_UNUSED_CONTEXT;
DEBUG_COMPILE_r(
{
const char * const s = SvPV_nolen_const(RX_UTF8(r)
? prog->check_utf8 : prog->check_substr);
if (!PL_colorset) reginitcolors();
Perl_re_printf( aTHX_
"%sUsing REx %ssubstr:%s \"%s%.60s%s%s\"\n",
PL_colors[4],
RX_UTF8(r) ? "utf8 " : "",
PL_colors[5], PL_colors[0],
s,
PL_colors[1],
(strlen(s) > PL_dump_re_max_len ? "..." : ""));
} );
/* use UTF8 check substring if regexp pattern itself is in UTF8 */
return RX_UTF8(r) ? prog->check_utf8 : prog->check_substr;
}
/*
pregfree()
handles refcounting and freeing the perl core regexp structure. When
it is necessary to actually free the structure the first thing it
does is call the 'free' method of the regexp_engine associated to
the regexp, allowing the handling of the void *pprivate; member
first. (This routine is not overridable by extensions, which is why
the extensions free is called first.)
See regdupe and regdupe_internal if you change anything here.
*/
#ifndef PERL_IN_XSUB_RE
void
Perl_pregfree(pTHX_ REGEXP *r)
{
SvREFCNT_dec(r);
}
void
Perl_pregfree2(pTHX_ REGEXP *rx)
{
struct regexp *const r = ReANY(rx);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_PREGFREE2;
if (! r)
return;
if (r->mother_re) {
ReREFCNT_dec(r->mother_re);
} else {
CALLREGFREE_PVT(rx); /* free the private data */
SvREFCNT_dec(RXp_PAREN_NAMES(r));
}
if (r->substrs) {
int i;
for (i = 0; i < 2; i++) {
SvREFCNT_dec(r->substrs->data[i].substr);
SvREFCNT_dec(r->substrs->data[i].utf8_substr);
}
Safefree(r->substrs);
}
RX_MATCH_COPY_FREE(rx);
#ifdef PERL_ANY_COW
SvREFCNT_dec(r->saved_copy);
#endif
Safefree(r->offs);
SvREFCNT_dec(r->qr_anoncv);
if (r->recurse_locinput)
Safefree(r->recurse_locinput);
}
/* reg_temp_copy()
Copy ssv to dsv, both of which should of type SVt_REGEXP or SVt_PVLV,
except that dsv will be created if NULL.
This function is used in two main ways. First to implement
$r = qr/....; $s = $$r;
Secondly, it is used as a hacky workaround to the structural issue of
match results
being stored in the regexp structure which is in turn stored in
PL_curpm/PL_reg_curpm. The problem is that due to qr// the pattern
could be PL_curpm in multiple contexts, and could require multiple
result sets being associated with the pattern simultaneously, such
as when doing a recursive match with (??{$qr})
The solution is to make a lightweight copy of the regexp structure
when a qr// is returned from the code executed by (??{$qr}) this
lightweight copy doesn't actually own any of its data except for
the starp/end and the actual regexp structure itself.
*/
REGEXP *
Perl_reg_temp_copy(pTHX_ REGEXP *dsv, REGEXP *ssv)
{
struct regexp *drx;
struct regexp *const srx = ReANY(ssv);
const bool islv = dsv && SvTYPE(dsv) == SVt_PVLV;
PERL_ARGS_ASSERT_REG_TEMP_COPY;
if (!dsv)
dsv = (REGEXP*) newSV_type(SVt_REGEXP);
else {
assert(SvTYPE(dsv) == SVt_REGEXP || (SvTYPE(dsv) == SVt_PVLV));
/* our only valid caller, sv_setsv_flags(), should have done
* a SV_CHECK_THINKFIRST_COW_DROP() by now */
assert(!SvOOK(dsv));
assert(!SvIsCOW(dsv));
assert(!SvROK(dsv));
if (SvPVX_const(dsv)) {
if (SvLEN(dsv))
Safefree(SvPVX(dsv));
SvPVX(dsv) = NULL;
}
SvLEN_set(dsv, 0);
SvCUR_set(dsv, 0);
SvOK_off((SV *)dsv);
if (islv) {
/* For PVLVs, the head (sv_any) points to an XPVLV, while
* the LV's xpvlenu_rx will point to a regexp body, which
* we allocate here */
REGEXP *temp = (REGEXP *)newSV_type(SVt_REGEXP);
assert(!SvPVX(dsv));
((XPV*)SvANY(dsv))->xpv_len_u.xpvlenu_rx = temp->sv_any;
temp->sv_any = NULL;
SvFLAGS(temp) = (SvFLAGS(temp) & ~SVTYPEMASK) | SVt_NULL;
SvREFCNT_dec_NN(temp);
/* SvCUR still resides in the xpvlv struct, so the regexp copy-
ing below will not set it. */
SvCUR_set(dsv, SvCUR(ssv));
}
}
/* This ensures that SvTHINKFIRST(sv) is true, and hence that
sv_force_normal(sv) is called. */
SvFAKE_on(dsv);
drx = ReANY(dsv);
SvFLAGS(dsv) |= SvFLAGS(ssv) & (SVf_POK|SVp_POK|SVf_UTF8);
SvPV_set(dsv, RX_WRAPPED(ssv));
/* We share the same string buffer as the original regexp, on which we
hold a reference count, incremented when mother_re is set below.
The string pointer is copied here, being part of the regexp struct.
*/
memcpy(&(drx->xpv_cur), &(srx->xpv_cur),
sizeof(regexp) - STRUCT_OFFSET(regexp, xpv_cur));
if (!islv)
SvLEN_set(dsv, 0);
if (srx->offs) {
const I32 npar = srx->nparens+1;
Newx(drx->offs, npar, regexp_paren_pair);
Copy(srx->offs, drx->offs, npar, regexp_paren_pair);
}
if (srx->substrs) {
int i;
Newx(drx->substrs, 1, struct reg_substr_data);
StructCopy(srx->substrs, drx->substrs, struct reg_substr_data);
for (i = 0; i < 2; i++) {
SvREFCNT_inc_void(drx->substrs->data[i].substr);
SvREFCNT_inc_void(drx->substrs->data[i].utf8_substr);
}
/* check_substr and check_utf8, if non-NULL, point to either their
anchored or float namesakes, and don't hold a second reference. */
}
RX_MATCH_COPIED_off(dsv);
#ifdef PERL_ANY_COW
drx->saved_copy = NULL;
#endif
drx->mother_re = ReREFCNT_inc(srx->mother_re ? srx->mother_re : ssv);
SvREFCNT_inc_void(drx->qr_anoncv);
if (srx->recurse_locinput)
Newx(drx->recurse_locinput, srx->nparens + 1, char *);
return dsv;
}
#endif
/* regfree_internal()
Free the private data in a regexp. This is overloadable by
extensions. Perl takes care of the regexp structure in pregfree(),
this covers the *pprivate pointer which technically perl doesn't
know about, however of course we have to handle the
regexp_internal structure when no extension is in use.
Note this is called before freeing anything in the regexp
structure.
*/
void
Perl_regfree_internal(pTHX_ REGEXP * const rx)
{
struct regexp *const r = ReANY(rx);
RXi_GET_DECL(r, ri);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGFREE_INTERNAL;
if (! ri) {
return;
}
DEBUG_COMPILE_r({
if (!PL_colorset)
reginitcolors();
{
SV *dsv= sv_newmortal();
RE_PV_QUOTED_DECL(s, RX_UTF8(rx),
dsv, RX_PRECOMP(rx), RX_PRELEN(rx), PL_dump_re_max_len);
Perl_re_printf( aTHX_ "%sFreeing REx:%s %s\n",
PL_colors[4], PL_colors[5], s);
}
});
#ifdef RE_TRACK_PATTERN_OFFSETS
if (ri->u.offsets)
Safefree(ri->u.offsets); /* 20010421 MJD */
#endif
if (ri->code_blocks)
S_free_codeblocks(aTHX_ ri->code_blocks);
if (ri->data) {
int n = ri->data->count;
while (--n >= 0) {
/* If you add a ->what type here, update the comment in regcomp.h */
switch (ri->data->what[n]) {
case 'a':
case 'r':
case 's':
case 'S':
case 'u':
SvREFCNT_dec(MUTABLE_SV(ri->data->data[n]));
break;
case 'f':
Safefree(ri->data->data[n]);
break;
case 'l':
case 'L':
break;
case 'T':
{ /* Aho Corasick add-on structure for a trie node.
Used in stclass optimization only */
U32 refcount;
reg_ac_data *aho=(reg_ac_data*)ri->data->data[n];
#ifdef USE_ITHREADS
dVAR;
#endif
OP_REFCNT_LOCK;
refcount = --aho->refcount;
OP_REFCNT_UNLOCK;
if ( !refcount ) {
PerlMemShared_free(aho->states);
PerlMemShared_free(aho->fail);
/* do this last!!!! */
PerlMemShared_free(ri->data->data[n]);
/* we should only ever get called once, so
* assert as much, and also guard the free
* which /might/ happen twice. At the least
* it will make code anlyzers happy and it
* doesn't cost much. - Yves */
assert(ri->regstclass);
if (ri->regstclass) {
PerlMemShared_free(ri->regstclass);
ri->regstclass = 0;
}
}
}
break;
case 't':
{
/* trie structure. */
U32 refcount;
reg_trie_data *trie=(reg_trie_data*)ri->data->data[n];
#ifdef USE_ITHREADS
dVAR;
#endif
OP_REFCNT_LOCK;
refcount = --trie->refcount;
OP_REFCNT_UNLOCK;
if ( !refcount ) {
PerlMemShared_free(trie->charmap);
PerlMemShared_free(trie->states);
PerlMemShared_free(trie->trans);
if (trie->bitmap)
PerlMemShared_free(trie->bitmap);
if (trie->jump)
PerlMemShared_free(trie->jump);
PerlMemShared_free(trie->wordinfo);
/* do this last!!!! */
PerlMemShared_free(ri->data->data[n]);
}
}
break;
default:
Perl_croak(aTHX_ "panic: regfree data code '%c'",
ri->data->what[n]);
}
}
Safefree(ri->data->what);
Safefree(ri->data);
}
Safefree(ri);
}
#define av_dup_inc(s, t) MUTABLE_AV(sv_dup_inc((const SV *)s, t))
#define hv_dup_inc(s, t) MUTABLE_HV(sv_dup_inc((const SV *)s, t))
#define SAVEPVN(p, n) ((p) ? savepvn(p, n) : NULL)
/*
re_dup_guts - duplicate a regexp.
This routine is expected to clone a given regexp structure. It is only
compiled under USE_ITHREADS.
After all of the core data stored in struct regexp is duplicated
the regexp_engine.dupe method is used to copy any private data
stored in the *pprivate pointer. This allows extensions to handle
any duplication it needs to do.
See pregfree() and regfree_internal() if you change anything here.
*/
#if defined(USE_ITHREADS)
#ifndef PERL_IN_XSUB_RE
void
Perl_re_dup_guts(pTHX_ const REGEXP *sstr, REGEXP *dstr, CLONE_PARAMS *param)
{
dVAR;
I32 npar;
const struct regexp *r = ReANY(sstr);
struct regexp *ret = ReANY(dstr);
PERL_ARGS_ASSERT_RE_DUP_GUTS;
npar = r->nparens+1;
Newx(ret->offs, npar, regexp_paren_pair);
Copy(r->offs, ret->offs, npar, regexp_paren_pair);
if (ret->substrs) {
/* Do it this way to avoid reading from *r after the StructCopy().
That way, if any of the sv_dup_inc()s dislodge *r from the L1
cache, it doesn't matter. */
int i;
const bool anchored = r->check_substr
? r->check_substr == r->substrs->data[0].substr
: r->check_utf8 == r->substrs->data[0].utf8_substr;
Newx(ret->substrs, 1, struct reg_substr_data);
StructCopy(r->substrs, ret->substrs, struct reg_substr_data);
for (i = 0; i < 2; i++) {
ret->substrs->data[i].substr =
sv_dup_inc(ret->substrs->data[i].substr, param);
ret->substrs->data[i].utf8_substr =
sv_dup_inc(ret->substrs->data[i].utf8_substr, param);
}
/* check_substr and check_utf8, if non-NULL, point to either their
anchored or float namesakes, and don't hold a second reference. */
if (ret->check_substr) {
if (anchored) {
assert(r->check_utf8 == r->substrs->data[0].utf8_substr);
ret->check_substr = ret->substrs->data[0].substr;
ret->check_utf8 = ret->substrs->data[0].utf8_substr;
} else {
assert(r->check_substr == r->substrs->data[1].substr);
assert(r->check_utf8 == r->substrs->data[1].utf8_substr);
ret->check_substr = ret->substrs->data[1].substr;
ret->check_utf8 = ret->substrs->data[1].utf8_substr;
}
} else if (ret->check_utf8) {
if (anchored) {
ret->check_utf8 = ret->substrs->data[0].utf8_substr;
} else {
ret->check_utf8 = ret->substrs->data[1].utf8_substr;
}
}
}
RXp_PAREN_NAMES(ret) = hv_dup_inc(RXp_PAREN_NAMES(ret), param);
ret->qr_anoncv = MUTABLE_CV(sv_dup_inc((const SV *)ret->qr_anoncv, param));
if (r->recurse_locinput)
Newx(ret->recurse_locinput, r->nparens + 1, char *);
if (ret->pprivate)
RXi_SET(ret, CALLREGDUPE_PVT(dstr, param));
if (RX_MATCH_COPIED(dstr))
ret->subbeg = SAVEPVN(ret->subbeg, ret->sublen);
else
ret->subbeg = NULL;
#ifdef PERL_ANY_COW
ret->saved_copy = NULL;
#endif
/* Whether mother_re be set or no, we need to copy the string. We
cannot refrain from copying it when the storage points directly to
our mother regexp, because that's
1: a buffer in a different thread
2: something we no longer hold a reference on
so we need to copy it locally. */
RX_WRAPPED(dstr) = SAVEPVN(RX_WRAPPED_const(sstr), SvCUR(sstr)+1);
/* set malloced length to a non-zero value so it will be freed
* (otherwise in combination with SVf_FAKE it looks like an alien
* buffer). It doesn't have to be the actual malloced size, since it
* should never be grown */
SvLEN_set(dstr, SvCUR(sstr)+1);
ret->mother_re = NULL;
}
#endif /* PERL_IN_XSUB_RE */
/*
regdupe_internal()
This is the internal complement to regdupe() which is used to copy
the structure pointed to by the *pprivate pointer in the regexp.
This is the core version of the extension overridable cloning hook.
The regexp structure being duplicated will be copied by perl prior
to this and will be provided as the regexp *r argument, however
with the /old/ structures pprivate pointer value. Thus this routine
may override any copying normally done by perl.
It returns a pointer to the new regexp_internal structure.
*/
void *
Perl_regdupe_internal(pTHX_ REGEXP * const rx, CLONE_PARAMS *param)
{
dVAR;
struct regexp *const r = ReANY(rx);
regexp_internal *reti;
int len;
RXi_GET_DECL(r, ri);
PERL_ARGS_ASSERT_REGDUPE_INTERNAL;
len = ProgLen(ri);
Newxc(reti, sizeof(regexp_internal) + len*sizeof(regnode),
char, regexp_internal);
Copy(ri->program, reti->program, len+1, regnode);
if (ri->code_blocks) {
int n;
Newx(reti->code_blocks, 1, struct reg_code_blocks);
Newx(reti->code_blocks->cb, ri->code_blocks->count,
struct reg_code_block);
Copy(ri->code_blocks->cb, reti->code_blocks->cb,
ri->code_blocks->count, struct reg_code_block);
for (n = 0; n < ri->code_blocks->count; n++)
reti->code_blocks->cb[n].src_regex = (REGEXP*)
sv_dup_inc((SV*)(ri->code_blocks->cb[n].src_regex), param);
reti->code_blocks->count = ri->code_blocks->count;
reti->code_blocks->refcnt = 1;
}
else
reti->code_blocks = NULL;
reti->regstclass = NULL;
if (ri->data) {
struct reg_data *d;
const int count = ri->data->count;
int i;
Newxc(d, sizeof(struct reg_data) + count*sizeof(void *),
char, struct reg_data);
Newx(d->what, count, U8);
d->count = count;
for (i = 0; i < count; i++) {
d->what[i] = ri->data->what[i];
switch (d->what[i]) {
/* see also regcomp.h and regfree_internal() */
case 'a': /* actually an AV, but the dup function is identical.
values seem to be "plain sv's" generally. */
case 'r': /* a compiled regex (but still just another SV) */
case 's': /* an RV (currently only used for an RV to an AV by the ANYOF code)
this use case should go away, the code could have used
'a' instead - see S_set_ANYOF_arg() for array contents. */
case 'S': /* actually an SV, but the dup function is identical. */
case 'u': /* actually an HV, but the dup function is identical.
values are "plain sv's" */
d->data[i] = sv_dup_inc((const SV *)ri->data->data[i], param);
break;
case 'f':
/* Synthetic Start Class - "Fake" charclass we generate to optimize
* patterns which could start with several different things. Pre-TRIE
* this was more important than it is now, however this still helps
* in some places, for instance /x?a+/ might produce a SSC equivalent
* to [xa]. This is used by Perl_re_intuit_start() and S_find_byclass()
* in regexec.c
*/
/* This is cheating. */
Newx(d->data[i], 1, regnode_ssc);
StructCopy(ri->data->data[i], d->data[i], regnode_ssc);
reti->regstclass = (regnode*)d->data[i];
break;
case 'T':
/* AHO-CORASICK fail table */
/* Trie stclasses are readonly and can thus be shared
* without duplication. We free the stclass in pregfree
* when the corresponding reg_ac_data struct is freed.
*/
reti->regstclass= ri->regstclass;
/* FALLTHROUGH */
case 't':
/* TRIE transition table */
OP_REFCNT_LOCK;
((reg_trie_data*)ri->data->data[i])->refcount++;
OP_REFCNT_UNLOCK;
/* FALLTHROUGH */
case 'l': /* (?{...}) or (??{ ... }) code (cb->block) */
case 'L': /* same when RExC_pm_flags & PMf_HAS_CV and code
is not from another regexp */
d->data[i] = ri->data->data[i];
break;
default:
Perl_croak(aTHX_ "panic: re_dup_guts unknown data code '%c'",
ri->data->what[i]);
}
}
reti->data = d;
}
else
reti->data = NULL;
reti->name_list_idx = ri->name_list_idx;
#ifdef RE_TRACK_PATTERN_OFFSETS
if (ri->u.offsets) {
Newx(reti->u.offsets, 2*len+1, U32);
Copy(ri->u.offsets, reti->u.offsets, 2*len+1, U32);
}
#else
SetProgLen(reti, len);
#endif
return (void*)reti;
}
#endif /* USE_ITHREADS */
#ifndef PERL_IN_XSUB_RE
/*
- regnext - dig the "next" pointer out of a node
*/
regnode *
Perl_regnext(pTHX_ regnode *p)
{
I32 offset;
if (!p)
return(NULL);
if (OP(p) > REGNODE_MAX) { /* regnode.type is unsigned */
Perl_croak(aTHX_ "Corrupted regexp opcode %d > %d",
(int)OP(p), (int)REGNODE_MAX);
}
offset = (reg_off_by_arg[OP(p)] ? ARG(p) : NEXT_OFF(p));
if (offset == 0)
return(NULL);
return(p+offset);
}
#endif
STATIC void
S_re_croak2(pTHX_ bool utf8, const char* pat1, const char* pat2,...)
{
va_list args;
STRLEN l1 = strlen(pat1);
STRLEN l2 = strlen(pat2);
char buf[512];
SV *msv;
const char *message;
PERL_ARGS_ASSERT_RE_CROAK2;
if (l1 > 510)
l1 = 510;
if (l1 + l2 > 510)
l2 = 510 - l1;
Copy(pat1, buf, l1 , char);
Copy(pat2, buf + l1, l2 , char);
buf[l1 + l2] = '\n';
buf[l1 + l2 + 1] = '\0';
va_start(args, pat2);
msv = vmess(buf, &args);
va_end(args);
message = SvPV_const(msv, l1);
if (l1 > 512)
l1 = 512;
Copy(message, buf, l1 , char);
/* l1-1 to avoid \n */
Perl_croak(aTHX_ "%" UTF8f, UTF8fARG(utf8, l1-1, buf));
}
/* XXX Here's a total kludge. But we need to re-enter for swash routines. */
#ifndef PERL_IN_XSUB_RE
void
Perl_save_re_context(pTHX)
{
I32 nparens = -1;
I32 i;
/* Save $1..$n (#18107: UTF-8 s/(\w+)/uc($1)/e); AMS 20021106. */
if (PL_curpm) {
const REGEXP * const rx = PM_GETRE(PL_curpm);
if (rx)
nparens = RX_NPARENS(rx);
}
/* RT #124109. This is a complete hack; in the SWASHNEW case we know
* that PL_curpm will be null, but that utf8.pm and the modules it
* loads will only use $1..$3.
* The t/porting/re_context.t test file checks this assumption.
*/
if (nparens == -1)
nparens = 3;
for (i = 1; i <= nparens; i++) {
char digits[TYPE_CHARS(long)];
const STRLEN len = my_snprintf(digits, sizeof(digits),
"%lu", (long)i);
GV *const *const gvp
= (GV**)hv_fetch(PL_defstash, digits, len, 0);
if (gvp) {
GV * const gv = *gvp;
if (SvTYPE(gv) == SVt_PVGV && GvSV(gv))
save_scalar(gv);
}
}
}
#endif
#ifdef DEBUGGING
STATIC void
S_put_code_point(pTHX_ SV *sv, UV c)
{
PERL_ARGS_ASSERT_PUT_CODE_POINT;
if (c > 255) {
Perl_sv_catpvf(aTHX_ sv, "\\x{%04" UVXf "}", c);
}
else if (isPRINT(c)) {
const char string = (char) c;
/* We use {phrase} as metanotation in the class, so also escape literal
* braces */
if (isBACKSLASHED_PUNCT(c) || c == '{' || c == '}')
sv_catpvs(sv, "\\");
sv_catpvn(sv, &string, 1);
}
else if (isMNEMONIC_CNTRL(c)) {
Perl_sv_catpvf(aTHX_ sv, "%s", cntrl_to_mnemonic((U8) c));
}
else {
Perl_sv_catpvf(aTHX_ sv, "\\x%02X", (U8) c);
}
}
#define MAX_PRINT_A MAX_PRINT_A_FOR_USE_ONLY_BY_REGCOMP_DOT_C
STATIC void
S_put_range(pTHX_ SV *sv, UV start, const UV end, const bool allow_literals)
{
/* Appends to 'sv' a displayable version of the range of code points from
* 'start' to 'end'. Mnemonics (like '\r') are used for the few controls
* that have them, when they occur at the beginning or end of the range.
* It uses hex to output the remaining code points, unless 'allow_literals'
* is true, in which case the printable ASCII ones are output as-is (though
* some of these will be escaped by put_code_point()).
*
* NOTE: This is designed only for printing ranges of code points that fit
* inside an ANYOF bitmap. Higher code points are simply suppressed
*/
const unsigned int min_range_count = 3;
assert(start <= end);
PERL_ARGS_ASSERT_PUT_RANGE;
while (start <= end) {
UV this_end;
const char * format;
if (end - start < min_range_count) {
/* Output chars individually when they occur in short ranges */
for (; start <= end; start++) {
put_code_point(sv, start);
}
break;
}
/* If permitted by the input options, and there is a possibility that
* this range contains a printable literal, look to see if there is
* one. */
if (allow_literals && start <= MAX_PRINT_A) {
/* If the character at the beginning of the range isn't an ASCII
* printable, effectively split the range into two parts:
* 1) the portion before the first such printable,
* 2) the rest
* and output them separately. */
if (! isPRINT_A(start)) {
UV temp_end = start + 1;
/* There is no point looking beyond the final possible
* printable, in MAX_PRINT_A */
UV max = MIN(end, MAX_PRINT_A);
while (temp_end <= max && ! isPRINT_A(temp_end)) {
temp_end++;
}
/* Here, temp_end points to one beyond the first printable if
* found, or to one beyond 'max' if not. If none found, make
* sure that we use the entire range */
if (temp_end > MAX_PRINT_A) {
temp_end = end + 1;
}
/* Output the first part of the split range: the part that
* doesn't have printables, with the parameter set to not look
* for literals (otherwise we would infinitely recurse) */
put_range(sv, start, temp_end - 1, FALSE);
/* The 2nd part of the range (if any) starts here. */
start = temp_end;
/* We do a continue, instead of dropping down, because even if
* the 2nd part is non-empty, it could be so short that we want
* to output it as individual characters, as tested for at the
* top of this loop. */
continue;
}
/* Here, 'start' is a printable ASCII. If it is an alphanumeric,
* output a sub-range of just the digits or letters, then process
* the remaining portion as usual. */
if (isALPHANUMERIC_A(start)) {
UV mask = (isDIGIT_A(start))
? _CC_DIGIT
: isUPPER_A(start)
? _CC_UPPER
: _CC_LOWER;
UV temp_end = start + 1;
/* Find the end of the sub-range that includes just the
* characters in the same class as the first character in it */
while (temp_end <= end && _generic_isCC_A(temp_end, mask)) {
temp_end++;
}
temp_end--;
/* For short ranges, don't duplicate the code above to output
* them; just call recursively */
if (temp_end - start < min_range_count) {
put_range(sv, start, temp_end, FALSE);
}
else { /* Output as a range */
put_code_point(sv, start);
sv_catpvs(sv, "-");
put_code_point(sv, temp_end);
}
start = temp_end + 1;
continue;
}
/* We output any other printables as individual characters */
if (isPUNCT_A(start) || isSPACE_A(start)) {
while (start <= end && (isPUNCT_A(start)
|| isSPACE_A(start)))
{
put_code_point(sv, start);
start++;
}
continue;
}
} /* End of looking for literals */
/* Here is not to output as a literal. Some control characters have
* mnemonic names. Split off any of those at the beginning and end of
* the range to print mnemonically. It isn't possible for many of
* these to be in a row, so this won't overwhelm with output */
if ( start <= end
&& (isMNEMONIC_CNTRL(start) || isMNEMONIC_CNTRL(end)))
{
while (isMNEMONIC_CNTRL(start) && start <= end) {
put_code_point(sv, start);
start++;
}
/* If this didn't take care of the whole range ... */
if (start <= end) {
/* Look backwards from the end to find the final non-mnemonic
* */
UV temp_end = end;
while (isMNEMONIC_CNTRL(temp_end)) {
temp_end--;
}
/* And separately output the interior range that doesn't start
* or end with mnemonics */
put_range(sv, start, temp_end, FALSE);
/* Then output the mnemonic trailing controls */
start = temp_end + 1;
while (start <= end) {
put_code_point(sv, start);
start++;
}
break;
}
}
/* As a final resort, output the range or subrange as hex. */
this_end = (end < NUM_ANYOF_CODE_POINTS)
? end
: NUM_ANYOF_CODE_POINTS - 1;
#if NUM_ANYOF_CODE_POINTS > 256
format = (this_end < 256)
? "\\x%02" UVXf "-\\x%02" UVXf
: "\\x{%04" UVXf "}-\\x{%04" UVXf "}";
#else
format = "\\x%02" UVXf "-\\x%02" UVXf;
#endif
GCC_DIAG_IGNORE_STMT(-Wformat-nonliteral);
Perl_sv_catpvf(aTHX_ sv, format, start, this_end);
GCC_DIAG_RESTORE_STMT;
break;
}
}
STATIC void
S_put_charclass_bitmap_innards_invlist(pTHX_ SV *sv, SV* invlist)
{
/* Concatenate onto the PV in 'sv' a displayable form of the inversion list
* 'invlist' */
UV start, end;
bool allow_literals = TRUE;
PERL_ARGS_ASSERT_PUT_CHARCLASS_BITMAP_INNARDS_INVLIST;
/* Generally, it is more readable if printable characters are output as
* literals, but if a range (nearly) spans all of them, it's best to output
* it as a single range. This code will use a single range if all but 2
* ASCII printables are in it */
invlist_iterinit(invlist);
while (invlist_iternext(invlist, &start, &end)) {
/* If the range starts beyond the final printable, it doesn't have any
* in it */
if (start > MAX_PRINT_A) {
break;
}
/* In both ASCII and EBCDIC, a SPACE is the lowest printable. To span
* all but two, the range must start and end no later than 2 from
* either end */
if (start < ' ' + 2 && end > MAX_PRINT_A - 2) {
if (end > MAX_PRINT_A) {
end = MAX_PRINT_A;
}
if (start < ' ') {
start = ' ';
}
if (end - start >= MAX_PRINT_A - ' ' - 2) {
allow_literals = FALSE;
}
break;
}
}
invlist_iterfinish(invlist);
/* Here we have figured things out. Output each range */
invlist_iterinit(invlist);
while (invlist_iternext(invlist, &start, &end)) {
if (start >= NUM_ANYOF_CODE_POINTS) {
break;
}
put_range(sv, start, end, allow_literals);
}
invlist_iterfinish(invlist);
return;
}
STATIC SV*
S_put_charclass_bitmap_innards_common(pTHX_
SV* invlist, /* The bitmap */
SV* posixes, /* Under /l, things like [:word:], \S */
SV* only_utf8, /* Under /d, matches iff the target is UTF-8 */
SV* not_utf8, /* /d, matches iff the target isn't UTF-8 */
SV* only_utf8_locale, /* Under /l, matches if the locale is UTF-8 */
const bool invert /* Is the result to be inverted? */
)
{
/* Create and return an SV containing a displayable version of the bitmap
* and associated information determined by the input parameters. If the
* output would have been only the inversion indicator '^', NULL is instead
* returned. */
dVAR;
SV * output;
PERL_ARGS_ASSERT_PUT_CHARCLASS_BITMAP_INNARDS_COMMON;
if (invert) {
output = newSVpvs("^");
}
else {
output = newSVpvs("");
}
/* First, the code points in the bitmap that are unconditionally there */
put_charclass_bitmap_innards_invlist(output, invlist);
/* Traditionally, these have been placed after the main code points */
if (posixes) {
sv_catsv(output, posixes);
}
if (only_utf8 && _invlist_len(only_utf8)) {
Perl_sv_catpvf(aTHX_ output, "%s{utf8}%s", PL_colors[1], PL_colors[0]);
put_charclass_bitmap_innards_invlist(output, only_utf8);
}
if (not_utf8 && _invlist_len(not_utf8)) {
Perl_sv_catpvf(aTHX_ output, "%s{not utf8}%s", PL_colors[1], PL_colors[0]);
put_charclass_bitmap_innards_invlist(output, not_utf8);
}
if (only_utf8_locale && _invlist_len(only_utf8_locale)) {
Perl_sv_catpvf(aTHX_ output, "%s{utf8 locale}%s", PL_colors[1], PL_colors[0]);
put_charclass_bitmap_innards_invlist(output, only_utf8_locale);
/* This is the only list in this routine that can legally contain code
* points outside the bitmap range. The call just above to
* 'put_charclass_bitmap_innards_invlist' will simply suppress them, so
* output them here. There's about a half-dozen possible, and none in
* contiguous ranges longer than 2 */
if (invlist_highest(only_utf8_locale) >= NUM_ANYOF_CODE_POINTS) {
UV start, end;
SV* above_bitmap = NULL;
_invlist_subtract(only_utf8_locale, PL_InBitmap, &above_bitmap);
invlist_iterinit(above_bitmap);
while (invlist_iternext(above_bitmap, &start, &end)) {
UV i;
for (i = start; i <= end; i++) {
put_code_point(output, i);
}
}
invlist_iterfinish(above_bitmap);
SvREFCNT_dec_NN(above_bitmap);
}
}
if (invert && SvCUR(output) == 1) {
return NULL;
}
return output;
}
STATIC bool
S_put_charclass_bitmap_innards(pTHX_ SV *sv,
char *bitmap,
SV *nonbitmap_invlist,
SV *only_utf8_locale_invlist,
const regnode * const node,
const bool force_as_is_display)
{
/* Appends to 'sv' a displayable version of the innards of the bracketed
* character class defined by the other arguments:
* 'bitmap' points to the bitmap, or NULL if to ignore that.
* 'nonbitmap_invlist' is an inversion list of the code points that are in
* the bitmap range, but for some reason aren't in the bitmap; NULL if
* none. The reasons for this could be that they require some
* condition such as the target string being or not being in UTF-8
* (under /d), or because they came from a user-defined property that
* was not resolved at the time of the regex compilation (under /u)
* 'only_utf8_locale_invlist' is an inversion list of the code points that
* are valid only if the runtime locale is a UTF-8 one; NULL if none
* 'node' is the regex pattern ANYOF node. It is needed only when the
* above two parameters are not null, and is passed so that this
* routine can tease apart the various reasons for them.
* 'force_as_is_display' is TRUE if this routine should definitely NOT try
* to invert things to see if that leads to a cleaner display. If
* FALSE, this routine is free to use its judgment about doing this.
*
* It returns TRUE if there was actually something output. (It may be that
* the bitmap, etc is empty.)
*
* When called for outputting the bitmap of a non-ANYOF node, just pass the
* bitmap, with the succeeding parameters set to NULL, and the final one to
* FALSE.
*/
/* In general, it tries to display the 'cleanest' representation of the
* innards, choosing whether to display them inverted or not, regardless of
* whether the class itself is to be inverted. However, there are some
* cases where it can't try inverting, as what actually matches isn't known
* until runtime, and hence the inversion isn't either. */
dVAR;
bool inverting_allowed = ! force_as_is_display;
int i;
STRLEN orig_sv_cur = SvCUR(sv);
SV* invlist; /* Inversion list we accumulate of code points that
are unconditionally matched */
SV* only_utf8 = NULL; /* Under /d, list of matches iff the target is
UTF-8 */
SV* not_utf8 = NULL; /* /d, list of matches iff the target isn't UTF-8
*/
SV* posixes = NULL; /* Under /l, string of things like [:word:], \D */
SV* only_utf8_locale = NULL; /* Under /l, list of matches if the locale
is UTF-8 */
SV* as_is_display; /* The output string when we take the inputs
literally */
SV* inverted_display; /* The output string when we invert the inputs */
U8 flags = (node) ? ANYOF_FLAGS(node) : 0;
bool invert = cBOOL(flags & ANYOF_INVERT); /* Is the input to be inverted
to match? */
/* We are biased in favor of displaying things without them being inverted,
* as that is generally easier to understand */
const int bias = 5;
PERL_ARGS_ASSERT_PUT_CHARCLASS_BITMAP_INNARDS;
/* Start off with whatever code points are passed in. (We clone, so we
* don't change the caller's list) */
if (nonbitmap_invlist) {
assert(invlist_highest(nonbitmap_invlist) < NUM_ANYOF_CODE_POINTS);
invlist = invlist_clone(nonbitmap_invlist, NULL);
}
else { /* Worst case size is every other code point is matched */
invlist = _new_invlist(NUM_ANYOF_CODE_POINTS / 2);
}
if (flags) {
if (OP(node) == ANYOFD) {
/* This flag indicates that the code points below 0x100 in the
* nonbitmap list are precisely the ones that match only when the
* target is UTF-8 (they should all be non-ASCII). */
if (flags & ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP)
{
_invlist_intersection(invlist, PL_UpperLatin1, &only_utf8);
_invlist_subtract(invlist, only_utf8, &invlist);
}
/* And this flag for matching all non-ASCII 0xFF and below */
if (flags & ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER)
{
not_utf8 = invlist_clone(PL_UpperLatin1, NULL);
}
}
else if (OP(node) == ANYOFL || OP(node) == ANYOFPOSIXL) {
/* If either of these flags are set, what matches isn't
* determinable except during execution, so don't know enough here
* to invert */
if (flags & (ANYOFL_FOLD|ANYOF_MATCHES_POSIXL)) {
inverting_allowed = FALSE;
}
/* What the posix classes match also varies at runtime, so these
* will be output symbolically. */
if (ANYOF_POSIXL_TEST_ANY_SET(node)) {
int i;
posixes = newSVpvs("");
for (i = 0; i < ANYOF_POSIXL_MAX; i++) {
if (ANYOF_POSIXL_TEST(node, i)) {
sv_catpv(posixes, anyofs[i]);
}
}
}
}
}
/* Accumulate the bit map into the unconditional match list */
if (bitmap) {
for (i = 0; i < NUM_ANYOF_CODE_POINTS; i++) {
if (BITMAP_TEST(bitmap, i)) {
int start = i++;
for (;
i < NUM_ANYOF_CODE_POINTS && BITMAP_TEST(bitmap, i);
i++)
{ /* empty */ }
invlist = _add_range_to_invlist(invlist, start, i-1);
}
}
}
/* Make sure that the conditional match lists don't have anything in them
* that match unconditionally; otherwise the output is quite confusing.
* This could happen if the code that populates these misses some
* duplication. */
if (only_utf8) {
_invlist_subtract(only_utf8, invlist, &only_utf8);
}
if (not_utf8) {
_invlist_subtract(not_utf8, invlist, ¬_utf8);
}
if (only_utf8_locale_invlist) {
/* Since this list is passed in, we have to make a copy before
* modifying it */
only_utf8_locale = invlist_clone(only_utf8_locale_invlist, NULL);
_invlist_subtract(only_utf8_locale, invlist, &only_utf8_locale);
/* And, it can get really weird for us to try outputting an inverted
* form of this list when it has things above the bitmap, so don't even
* try */
if (invlist_highest(only_utf8_locale) >= NUM_ANYOF_CODE_POINTS) {
inverting_allowed = FALSE;
}
}
/* Calculate what the output would be if we take the input as-is */
as_is_display = put_charclass_bitmap_innards_common(invlist,
posixes,
only_utf8,
not_utf8,
only_utf8_locale,
invert);
/* If have to take the output as-is, just do that */
if (! inverting_allowed) {
if (as_is_display) {
sv_catsv(sv, as_is_display);
SvREFCNT_dec_NN(as_is_display);
}
}
else { /* But otherwise, create the output again on the inverted input, and
use whichever version is shorter */
int inverted_bias, as_is_bias;
/* We will apply our bias to whichever of the the results doesn't have
* the '^' */
if (invert) {
invert = FALSE;
as_is_bias = bias;
inverted_bias = 0;
}
else {
invert = TRUE;
as_is_bias = 0;
inverted_bias = bias;
}
/* Now invert each of the lists that contribute to the output,
* excluding from the result things outside the possible range */
/* For the unconditional inversion list, we have to add in all the
* conditional code points, so that when inverted, they will be gone
* from it */
_invlist_union(only_utf8, invlist, &invlist);
_invlist_union(not_utf8, invlist, &invlist);
_invlist_union(only_utf8_locale, invlist, &invlist);
_invlist_invert(invlist);
_invlist_intersection(invlist, PL_InBitmap, &invlist);
if (only_utf8) {
_invlist_invert(only_utf8);
_invlist_intersection(only_utf8, PL_UpperLatin1, &only_utf8);
}
else if (not_utf8) {
/* If a code point matches iff the target string is not in UTF-8,
* then complementing the result has it not match iff not in UTF-8,
* which is the same thing as matching iff it is UTF-8. */
only_utf8 = not_utf8;
not_utf8 = NULL;
}
if (only_utf8_locale) {
_invlist_invert(only_utf8_locale);
_invlist_intersection(only_utf8_locale,
PL_InBitmap,
&only_utf8_locale);
}
inverted_display = put_charclass_bitmap_innards_common(
invlist,
posixes,
only_utf8,
not_utf8,
only_utf8_locale, invert);
/* Use the shortest representation, taking into account our bias
* against showing it inverted */
if ( inverted_display
&& ( ! as_is_display
|| ( SvCUR(inverted_display) + inverted_bias
< SvCUR(as_is_display) + as_is_bias)))
{
sv_catsv(sv, inverted_display);
}
else if (as_is_display) {
sv_catsv(sv, as_is_display);
}
SvREFCNT_dec(as_is_display);
SvREFCNT_dec(inverted_display);
}
SvREFCNT_dec_NN(invlist);
SvREFCNT_dec(only_utf8);
SvREFCNT_dec(not_utf8);
SvREFCNT_dec(posixes);
SvREFCNT_dec(only_utf8_locale);
return SvCUR(sv) > orig_sv_cur;
}
#define CLEAR_OPTSTART \
if (optstart) STMT_START { \
DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ \
" (%" IVdf " nodes)\n", (IV)(node - optstart))); \
optstart=NULL; \
} STMT_END
#define DUMPUNTIL(b,e) \
CLEAR_OPTSTART; \
node=dumpuntil(r,start,(b),(e),last,sv,indent+1,depth+1);
STATIC const regnode *
S_dumpuntil(pTHX_ const regexp *r, const regnode *start, const regnode *node,
const regnode *last, const regnode *plast,
SV* sv, I32 indent, U32 depth)
{
U8 op = PSEUDO; /* Arbitrary non-END op. */
const regnode *next;
const regnode *optstart= NULL;
RXi_GET_DECL(r, ri);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_DUMPUNTIL;
#ifdef DEBUG_DUMPUNTIL
Perl_re_printf( aTHX_ "--- %d : %d - %d - %d\n", indent, node-start,
last ? last-start : 0, plast ? plast-start : 0);
#endif
if (plast && plast < last)
last= plast;
while (PL_regkind[op] != END && (!last || node < last)) {
assert(node);
/* While that wasn't END last time... */
NODE_ALIGN(node);
op = OP(node);
if (op == CLOSE || op == SRCLOSE || op == WHILEM)
indent--;
next = regnext((regnode *)node);
/* Where, what. */
if (OP(node) == OPTIMIZED) {
if (!optstart && RE_DEBUG_FLAG(RE_DEBUG_COMPILE_OPTIMISE))
optstart = node;
else
goto after_print;
} else
CLEAR_OPTSTART;
regprop(r, sv, node, NULL, NULL);
Perl_re_printf( aTHX_ "%4" IVdf ":%*s%s", (IV)(node - start),
(int)(2*indent + 1), "", SvPVX_const(sv));
if (OP(node) != OPTIMIZED) {
if (next == NULL) /* Next ptr. */
Perl_re_printf( aTHX_ " (0)");
else if (PL_regkind[(U8)op] == BRANCH
&& PL_regkind[OP(next)] != BRANCH )
Perl_re_printf( aTHX_ " (FAIL)");
else
Perl_re_printf( aTHX_ " (%" IVdf ")", (IV)(next - start));
Perl_re_printf( aTHX_ "\n");
}
after_print:
if (PL_regkind[(U8)op] == BRANCHJ) {
assert(next);
{
const regnode *nnode = (OP(next) == LONGJMP
? regnext((regnode *)next)
: next);
if (last && nnode > last)
nnode = last;
DUMPUNTIL(NEXTOPER(NEXTOPER(node)), nnode);
}
}
else if (PL_regkind[(U8)op] == BRANCH) {
assert(next);
DUMPUNTIL(NEXTOPER(node), next);
}
else if ( PL_regkind[(U8)op] == TRIE ) {
const regnode *this_trie = node;
const char op = OP(node);
const U32 n = ARG(node);
const reg_ac_data * const ac = op>=AHOCORASICK ?
(reg_ac_data *)ri->data->data[n] :
NULL;
const reg_trie_data * const trie =
(reg_trie_data*)ri->data->data[op<AHOCORASICK ? n : ac->trie];
#ifdef DEBUGGING
AV *const trie_words
= MUTABLE_AV(ri->data->data[n + TRIE_WORDS_OFFSET]);
#endif
const regnode *nextbranch= NULL;
I32 word_idx;
SvPVCLEAR(sv);
for (word_idx= 0; word_idx < (I32)trie->wordcount; word_idx++) {
SV ** const elem_ptr = av_fetch(trie_words, word_idx, 0);
Perl_re_indentf( aTHX_ "%s ",
indent+3,
elem_ptr
? pv_pretty(sv, SvPV_nolen_const(*elem_ptr),
SvCUR(*elem_ptr), PL_dump_re_max_len,
PL_colors[0], PL_colors[1],
(SvUTF8(*elem_ptr)
? PERL_PV_ESCAPE_UNI
: 0)
| PERL_PV_PRETTY_ELLIPSES
| PERL_PV_PRETTY_LTGT
)
: "???"
);
if (trie->jump) {
U16 dist= trie->jump[word_idx+1];
Perl_re_printf( aTHX_ "(%" UVuf ")\n",
(UV)((dist ? this_trie + dist : next) - start));
if (dist) {
if (!nextbranch)
nextbranch= this_trie + trie->jump[0];
DUMPUNTIL(this_trie + dist, nextbranch);
}
if (nextbranch && PL_regkind[OP(nextbranch)]==BRANCH)
nextbranch= regnext((regnode *)nextbranch);
} else {
Perl_re_printf( aTHX_ "\n");
}
}
if (last && next > last)
node= last;
else
node= next;
}
else if ( op == CURLY ) { /* "next" might be very big: optimizer */
DUMPUNTIL(NEXTOPER(node) + EXTRA_STEP_2ARGS,
NEXTOPER(node) + EXTRA_STEP_2ARGS + 1);
}
else if (PL_regkind[(U8)op] == CURLY && op != CURLYX) {
assert(next);
DUMPUNTIL(NEXTOPER(node) + EXTRA_STEP_2ARGS, next);
}
else if ( op == PLUS || op == STAR) {
DUMPUNTIL(NEXTOPER(node), NEXTOPER(node) + 1);
}
else if (PL_regkind[(U8)op] == EXACT) {
/* Literal string, where present. */
node += NODE_SZ_STR(node) - 1;
node = NEXTOPER(node);
}
else {
node = NEXTOPER(node);
node += regarglen[(U8)op];
}
if (op == CURLYX || op == OPEN || op == SROPEN)
indent++;
}
CLEAR_OPTSTART;
#ifdef DEBUG_DUMPUNTIL
Perl_re_printf( aTHX_ "--- %d\n", (int)indent);
#endif
return node;
}
#endif /* DEBUGGING */
#ifndef PERL_IN_XSUB_RE
#include "uni_keywords.h"
void
Perl_init_uniprops(pTHX)
{
dVAR;
PL_user_def_props = newHV();
#ifdef USE_ITHREADS
HvSHAREKEYS_off(PL_user_def_props);
PL_user_def_props_aTHX = aTHX;
#endif
/* Set up the inversion list global variables */
PL_XPosix_ptrs[_CC_ASCII] = _new_invlist_C_array(uni_prop_ptrs[UNI_ASCII]);
PL_XPosix_ptrs[_CC_ALPHANUMERIC] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXALNUM]);
PL_XPosix_ptrs[_CC_ALPHA] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXALPHA]);
PL_XPosix_ptrs[_CC_BLANK] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXBLANK]);
PL_XPosix_ptrs[_CC_CASED] = _new_invlist_C_array(uni_prop_ptrs[UNI_CASED]);
PL_XPosix_ptrs[_CC_CNTRL] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXCNTRL]);
PL_XPosix_ptrs[_CC_DIGIT] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXDIGIT]);
PL_XPosix_ptrs[_CC_GRAPH] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXGRAPH]);
PL_XPosix_ptrs[_CC_LOWER] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXLOWER]);
PL_XPosix_ptrs[_CC_PRINT] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXPRINT]);
PL_XPosix_ptrs[_CC_PUNCT] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXPUNCT]);
PL_XPosix_ptrs[_CC_SPACE] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXSPACE]);
PL_XPosix_ptrs[_CC_UPPER] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXUPPER]);
PL_XPosix_ptrs[_CC_VERTSPACE] = _new_invlist_C_array(uni_prop_ptrs[UNI_VERTSPACE]);
PL_XPosix_ptrs[_CC_WORDCHAR] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXWORD]);
PL_XPosix_ptrs[_CC_XDIGIT] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXXDIGIT]);
PL_Posix_ptrs[_CC_ASCII] = _new_invlist_C_array(uni_prop_ptrs[UNI_ASCII]);
PL_Posix_ptrs[_CC_ALPHANUMERIC] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXALNUM]);
PL_Posix_ptrs[_CC_ALPHA] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXALPHA]);
PL_Posix_ptrs[_CC_BLANK] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXBLANK]);
PL_Posix_ptrs[_CC_CASED] = PL_Posix_ptrs[_CC_ALPHA];
PL_Posix_ptrs[_CC_CNTRL] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXCNTRL]);
PL_Posix_ptrs[_CC_DIGIT] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXDIGIT]);
PL_Posix_ptrs[_CC_GRAPH] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXGRAPH]);
PL_Posix_ptrs[_CC_LOWER] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXLOWER]);
PL_Posix_ptrs[_CC_PRINT] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXPRINT]);
PL_Posix_ptrs[_CC_PUNCT] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXPUNCT]);
PL_Posix_ptrs[_CC_SPACE] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXSPACE]);
PL_Posix_ptrs[_CC_UPPER] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXUPPER]);
PL_Posix_ptrs[_CC_VERTSPACE] = NULL;
PL_Posix_ptrs[_CC_WORDCHAR] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXWORD]);
PL_Posix_ptrs[_CC_XDIGIT] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXXDIGIT]);
PL_GCB_invlist = _new_invlist_C_array(_Perl_GCB_invlist);
PL_SB_invlist = _new_invlist_C_array(_Perl_SB_invlist);
PL_WB_invlist = _new_invlist_C_array(_Perl_WB_invlist);
PL_LB_invlist = _new_invlist_C_array(_Perl_LB_invlist);
PL_SCX_invlist = _new_invlist_C_array(_Perl_SCX_invlist);
PL_AboveLatin1 = _new_invlist_C_array(AboveLatin1_invlist);
PL_Latin1 = _new_invlist_C_array(Latin1_invlist);
PL_UpperLatin1 = _new_invlist_C_array(UpperLatin1_invlist);
PL_Assigned_invlist = _new_invlist_C_array(uni_prop_ptrs[UNI_ASSIGNED]);
PL_utf8_perl_idstart = _new_invlist_C_array(uni_prop_ptrs[UNI__PERL_IDSTART]);
PL_utf8_perl_idcont = _new_invlist_C_array(uni_prop_ptrs[UNI__PERL_IDCONT]);
PL_utf8_charname_begin = _new_invlist_C_array(uni_prop_ptrs[UNI__PERL_CHARNAME_BEGIN]);
PL_utf8_charname_continue = _new_invlist_C_array(uni_prop_ptrs[UNI__PERL_CHARNAME_CONTINUE]);
PL_in_some_fold = _new_invlist_C_array(uni_prop_ptrs[UNI__PERL_ANY_FOLDS]);
PL_HasMultiCharFold = _new_invlist_C_array(uni_prop_ptrs[
UNI__PERL_FOLDS_TO_MULTI_CHAR]);
PL_InMultiCharFold = _new_invlist_C_array(uni_prop_ptrs[
UNI__PERL_IS_IN_MULTI_CHAR_FOLD]);
PL_NonFinalFold = _new_invlist_C_array(uni_prop_ptrs[
UNI__PERL_NON_FINAL_FOLDS]);
PL_utf8_toupper = _new_invlist_C_array(Uppercase_Mapping_invlist);
PL_utf8_tolower = _new_invlist_C_array(Lowercase_Mapping_invlist);
PL_utf8_totitle = _new_invlist_C_array(Titlecase_Mapping_invlist);
PL_utf8_tofold = _new_invlist_C_array(Case_Folding_invlist);
PL_utf8_tosimplefold = _new_invlist_C_array(Simple_Case_Folding_invlist);
PL_utf8_foldclosures = _new_invlist_C_array(_Perl_IVCF_invlist);
PL_utf8_mark = _new_invlist_C_array(uni_prop_ptrs[UNI_M]);
PL_CCC_non0_non230 = _new_invlist_C_array(_Perl_CCC_non0_non230_invlist);
PL_Private_Use = _new_invlist_C_array(uni_prop_ptrs[UNI_CO]);
#ifdef UNI_XIDC
/* The below are used only by deprecated functions. They could be removed */
PL_utf8_xidcont = _new_invlist_C_array(uni_prop_ptrs[UNI_XIDC]);
PL_utf8_idcont = _new_invlist_C_array(uni_prop_ptrs[UNI_IDC]);
PL_utf8_xidstart = _new_invlist_C_array(uni_prop_ptrs[UNI_XIDS]);
#endif
}
#if 0
This code was mainly added for backcompat to give a warning for non-portable
code points in user-defined properties. But experiments showed that the
warning in earlier perls were only omitted on overflow, which should be an
error, so there really isnt a backcompat issue, and actually adding the
warning when none was present before might cause breakage, for little gain. So
khw left this code in, but not enabled. Tests were never added.
embed.fnc entry:
Ei |const char *|get_extended_utf8_msg|const UV cp
PERL_STATIC_INLINE const char *
S_get_extended_utf8_msg(pTHX_ const UV cp)
{
U8 dummy[UTF8_MAXBYTES + 1];
HV *msgs;
SV **msg;
uvchr_to_utf8_flags_msgs(dummy, cp, UNICODE_WARN_PERL_EXTENDED,
&msgs);
msg = hv_fetchs(msgs, "text", 0);
assert(msg);
(void) sv_2mortal((SV *) msgs);
return SvPVX(*msg);
}
#endif
SV *
Perl_handle_user_defined_property(pTHX_
/* Parses the contents of a user-defined property definition; returning the
* expanded definition if possible. If so, the return is an inversion
* list.
*
* If there are subroutines that are part of the expansion and which aren't
* known at the time of the call to this function, this returns what
* parse_uniprop_string() returned for the first one encountered.
*
* If an error was found, NULL is returned, and 'msg' gets a suitable
* message appended to it. (Appending allows the back trace of how we got
* to the faulty definition to be displayed through nested calls of
* user-defined subs.)
*
* The caller IS responsible for freeing any returned SV.
*
* The syntax of the contents is pretty much described in perlunicode.pod,
* but we also allow comments on each line */
const char * name, /* Name of property */
const STRLEN name_len, /* The name's length in bytes */
const bool is_utf8, /* ? Is 'name' encoded in UTF-8 */
const bool to_fold, /* ? Is this under /i */
const bool runtime, /* ? Are we in compile- or run-time */
const bool deferrable, /* Is it ok for this property's full definition
to be deferred until later? */
SV* contents, /* The property's definition */
bool *user_defined_ptr, /* This will be set TRUE as we wouldn't be
getting called unless this is thought to be
a user-defined property */
SV * msg, /* Any error or warning msg(s) are appended to
this */
const STRLEN level) /* Recursion level of this call */
{
STRLEN len;
const char * string = SvPV_const(contents, len);
const char * const e = string + len;
const bool is_contents_utf8 = cBOOL(SvUTF8(contents));
const STRLEN msgs_length_on_entry = SvCUR(msg);
const char * s0 = string; /* Points to first byte in the current line
being parsed in 'string' */
const char overflow_msg[] = "Code point too large in \"";
SV* running_definition = NULL;
PERL_ARGS_ASSERT_HANDLE_USER_DEFINED_PROPERTY;
*user_defined_ptr = TRUE;
/* Look at each line */
while (s0 < e) {
const char * s; /* Current byte */
char op = '+'; /* Default operation is 'union' */
IV min = 0; /* range begin code point */
IV max = -1; /* and range end */
SV* this_definition;
/* Skip comment lines */
if (*s0 == '#') {
s0 = strchr(s0, '\n');
if (s0 == NULL) {
break;
}
s0++;
continue;
}
/* For backcompat, allow an empty first line */
if (*s0 == '\n') {
s0++;
continue;
}
/* First character in the line may optionally be the operation */
if ( *s0 == '+'
|| *s0 == '!'
|| *s0 == '-'
|| *s0 == '&')
{
op = *s0++;
}
/* If the line is one or two hex digits separated by blank space, its
* a range; otherwise it is either another user-defined property or an
* error */
s = s0;
if (! isXDIGIT(*s)) {
goto check_if_property;
}
do { /* Each new hex digit will add 4 bits. */
if (min > ( (IV) MAX_LEGAL_CP >> 4)) {
s = strchr(s, '\n');
if (s == NULL) {
s = e;
}
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpv(msg, overflow_msg);
Perl_sv_catpvf(aTHX_ msg, "%" UTF8f,
UTF8fARG(is_contents_utf8, s - s0, s0));
sv_catpvs(msg, "\"");
goto return_failure;
}
/* Accumulate this digit into the value */
min = (min << 4) + READ_XDIGIT(s);
} while (isXDIGIT(*s));
while (isBLANK(*s)) { s++; }
/* We allow comments at the end of the line */
if (*s == '#') {
s = strchr(s, '\n');
if (s == NULL) {
s = e;
}
s++;
}
else if (s < e && *s != '\n') {
if (! isXDIGIT(*s)) {
goto check_if_property;
}
/* Look for the high point of the range */
max = 0;
do {
if (max > ( (IV) MAX_LEGAL_CP >> 4)) {
s = strchr(s, '\n');
if (s == NULL) {
s = e;
}
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpv(msg, overflow_msg);
Perl_sv_catpvf(aTHX_ msg, "%" UTF8f,
UTF8fARG(is_contents_utf8, s - s0, s0));
sv_catpvs(msg, "\"");
goto return_failure;
}
max = (max << 4) + READ_XDIGIT(s);
} while (isXDIGIT(*s));
while (isBLANK(*s)) { s++; }
if (*s == '#') {
s = strchr(s, '\n');
if (s == NULL) {
s = e;
}
}
else if (s < e && *s != '\n') {
goto check_if_property;
}
}
if (max == -1) { /* The line only had one entry */
max = min;
}
else if (max < min) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvs(msg, "Illegal range in \"");
Perl_sv_catpvf(aTHX_ msg, "%" UTF8f,
UTF8fARG(is_contents_utf8, s - s0, s0));
sv_catpvs(msg, "\"");
goto return_failure;
}
#if 0 /* See explanation at definition above of get_extended_utf8_msg() */
if ( UNICODE_IS_PERL_EXTENDED(min)
|| UNICODE_IS_PERL_EXTENDED(max))
{
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
/* If both code points are non-portable, warn only on the lower
* one. */
sv_catpv(msg, get_extended_utf8_msg(
(UNICODE_IS_PERL_EXTENDED(min))
? min : max));
sv_catpvs(msg, " in \"");
Perl_sv_catpvf(aTHX_ msg, "%" UTF8f,
UTF8fARG(is_contents_utf8, s - s0, s0));
sv_catpvs(msg, "\"");
}
#endif
/* Here, this line contains a legal range */
this_definition = sv_2mortal(_new_invlist(2));
this_definition = _add_range_to_invlist(this_definition, min, max);
goto calculate;
check_if_property:
/* Here it isn't a legal range line. See if it is a legal property
* line. First find the end of the meat of the line */
s = strpbrk(s, "#\n");
if (s == NULL) {
s = e;
}
/* Ignore trailing blanks in keeping with the requirements of
* parse_uniprop_string() */
s--;
while (s > s0 && isBLANK_A(*s)) {
s--;
}
s++;
this_definition = parse_uniprop_string(s0, s - s0,
is_utf8, to_fold, runtime,
deferrable,
user_defined_ptr, msg,
(name_len == 0)
? level /* Don't increase level
if input is empty */
: level + 1
);
if (this_definition == NULL) {
goto return_failure; /* 'msg' should have had the reason
appended to it by the above call */
}
if (! is_invlist(this_definition)) { /* Unknown at this time */
return newSVsv(this_definition);
}
if (*s != '\n') {
s = strchr(s, '\n');
if (s == NULL) {
s = e;
}
}
calculate:
switch (op) {
case '+':
_invlist_union(running_definition, this_definition,
&running_definition);
break;
case '-':
_invlist_subtract(running_definition, this_definition,
&running_definition);
break;
case '&':
_invlist_intersection(running_definition, this_definition,
&running_definition);
break;
case '!':
_invlist_union_complement_2nd(running_definition,
this_definition, &running_definition);
break;
default:
Perl_croak(aTHX_ "panic: %s: %d: Unexpected operation %d",
__FILE__, __LINE__, op);
break;
}
/* Position past the '\n' */
s0 = s + 1;
} /* End of loop through the lines of 'contents' */
/* Here, we processed all the lines in 'contents' without error. If we
* didn't add any warnings, simply return success */
if (msgs_length_on_entry == SvCUR(msg)) {
/* If the expansion was empty, the answer isn't nothing: its an empty
* inversion list */
if (running_definition == NULL) {
running_definition = _new_invlist(1);
}
return running_definition;
}
/* Otherwise, add some explanatory text, but we will return success */
goto return_msg;
return_failure:
running_definition = NULL;
return_msg:
if (name_len > 0) {
sv_catpvs(msg, " in expansion of ");
Perl_sv_catpvf(aTHX_ msg, "%" UTF8f, UTF8fARG(is_utf8, name_len, name));
}
return running_definition;
}
/* As explained below, certain operations need to take place in the first
* thread created. These macros switch contexts */
#ifdef USE_ITHREADS
# define DECLARATION_FOR_GLOBAL_CONTEXT \
PerlInterpreter * save_aTHX = aTHX;
# define SWITCH_TO_GLOBAL_CONTEXT \
PERL_SET_CONTEXT((aTHX = PL_user_def_props_aTHX))
# define RESTORE_CONTEXT PERL_SET_CONTEXT((aTHX = save_aTHX));
# define CUR_CONTEXT aTHX
# define ORIGINAL_CONTEXT save_aTHX
#else
# define DECLARATION_FOR_GLOBAL_CONTEXT
# define SWITCH_TO_GLOBAL_CONTEXT NOOP
# define RESTORE_CONTEXT NOOP
# define CUR_CONTEXT NULL
# define ORIGINAL_CONTEXT NULL
#endif
STATIC void
S_delete_recursion_entry(pTHX_ void *key)
{
/* Deletes the entry used to detect recursion when expanding user-defined
* properties. This is a function so it can be set up to be called even if
* the program unexpectedly quits */
dVAR;
SV ** current_entry;
const STRLEN key_len = strlen((const char *) key);
DECLARATION_FOR_GLOBAL_CONTEXT;
SWITCH_TO_GLOBAL_CONTEXT;
/* If the entry is one of these types, it is a permanent entry, and not the
* one used to detect recursions. This function should delete only the
* recursion entry */
current_entry = hv_fetch(PL_user_def_props, (const char *) key, key_len, 0);
if ( current_entry
&& ! is_invlist(*current_entry)
&& ! SvPOK(*current_entry))
{
(void) hv_delete(PL_user_def_props, (const char *) key, key_len,
G_DISCARD);
}
RESTORE_CONTEXT;
}
STATIC SV *
S_get_fq_name(pTHX_
const char * const name, /* The first non-blank in the \p{}, \P{} */
const Size_t name_len, /* Its length in bytes, not including any trailing space */
const bool is_utf8, /* ? Is 'name' encoded in UTF-8 */
const bool has_colon_colon
)
{
/* Returns a mortal SV containing the fully qualified version of the input
* name */
SV * fq_name;
fq_name = newSVpvs_flags("", SVs_TEMP);
/* Use the current package if it wasn't included in our input */
if (! has_colon_colon) {
const HV * pkg = (IN_PERL_COMPILETIME)
? PL_curstash
: CopSTASH(PL_curcop);
const char* pkgname = HvNAME(pkg);
Perl_sv_catpvf(aTHX_ fq_name, "%" UTF8f,
UTF8fARG(is_utf8, strlen(pkgname), pkgname));
sv_catpvs(fq_name, "::");
}
Perl_sv_catpvf(aTHX_ fq_name, "%" UTF8f,
UTF8fARG(is_utf8, name_len, name));
return fq_name;
}
SV *
Perl_parse_uniprop_string(pTHX_
/* Parse the interior of a \p{}, \P{}. Returns its definition if knowable
* now. If so, the return is an inversion list.
*
* If the property is user-defined, it is a subroutine, which in turn
* may call other subroutines. This function will call the whole nest of
* them to get the definition they return; if some aren't known at the time
* of the call to this function, the fully qualified name of the highest
* level sub is returned. It is an error to call this function at runtime
* without every sub defined.
*
* If an error was found, NULL is returned, and 'msg' gets a suitable
* message appended to it. (Appending allows the back trace of how we got
* to the faulty definition to be displayed through nested calls of
* user-defined subs.)
*
* The caller should NOT try to free any returned inversion list.
*
* Other parameters will be set on return as described below */
const char * const name, /* The first non-blank in the \p{}, \P{} */
const Size_t name_len, /* Its length in bytes, not including any
trailing space */
const bool is_utf8, /* ? Is 'name' encoded in UTF-8 */
const bool to_fold, /* ? Is this under /i */
const bool runtime, /* TRUE if this is being called at run time */
const bool deferrable, /* TRUE if it's ok for the definition to not be
known at this call */
bool *user_defined_ptr, /* Upon return from this function it will be
set to TRUE if any component is a
user-defined property */
SV * msg, /* Any error or warning msg(s) are appended to
this */
const STRLEN level) /* Recursion level of this call */
{
dVAR;
char* lookup_name; /* normalized name for lookup in our tables */
unsigned lookup_len; /* Its length */
bool stricter = FALSE; /* Some properties have stricter name
normalization rules, which we decide upon
based on parsing */
/* nv= or numeric_value=, or possibly one of the cjk numeric properties
* (though it requires extra effort to download them from Unicode and
* compile perl to know about them) */
bool is_nv_type = FALSE;
unsigned int i, j = 0;
int equals_pos = -1; /* Where the '=' is found, or negative if none */
int slash_pos = -1; /* Where the '/' is found, or negative if none */
int table_index = 0; /* The entry number for this property in the table
of all Unicode property names */
bool starts_with_In_or_Is = FALSE; /* ? Does the name start with 'In' or
'Is' */
Size_t lookup_offset = 0; /* Used to ignore the first few characters of
the normalized name in certain situations */
Size_t non_pkg_begin = 0; /* Offset of first byte in 'name' that isn't
part of a package name */
bool could_be_user_defined = TRUE; /* ? Could this be a user-defined
property rather than a Unicode
one. */
SV * prop_definition = NULL; /* The returned definition of 'name' or NULL
if an error. If it is an inversion list,
it is the definition. Otherwise it is a
string containing the fully qualified sub
name of 'name' */
SV * fq_name = NULL; /* For user-defined properties, the fully
qualified name */
bool invert_return = FALSE; /* ? Do we need to complement the result before
returning it */
PERL_ARGS_ASSERT_PARSE_UNIPROP_STRING;
/* The input will be normalized into 'lookup_name' */
Newx(lookup_name, name_len, char);
SAVEFREEPV(lookup_name);
/* Parse the input. */
for (i = 0; i < name_len; i++) {
char cur = name[i];
/* Most of the characters in the input will be of this ilk, being parts
* of a name */
if (isIDCONT_A(cur)) {
/* Case differences are ignored. Our lookup routine assumes
* everything is lowercase, so normalize to that */
if (isUPPER_A(cur)) {
lookup_name[j++] = toLOWER_A(cur);
continue;
}
if (cur == '_') { /* Don't include these in the normalized name */
continue;
}
lookup_name[j++] = cur;
/* The first character in a user-defined name must be of this type.
* */
if (i - non_pkg_begin == 0 && ! isIDFIRST_A(cur)) {
could_be_user_defined = FALSE;
}
continue;
}
/* Here, the character is not something typically in a name, But these
* two types of characters (and the '_' above) can be freely ignored in
* most situations. Later it may turn out we shouldn't have ignored
* them, and we have to reparse, but we don't have enough information
* yet to make that decision */
if (cur == '-' || isSPACE_A(cur)) {
could_be_user_defined = FALSE;
continue;
}
/* An equals sign or single colon mark the end of the first part of
* the property name */
if ( cur == '='
|| (cur == ':' && (i >= name_len - 1 || name[i+1] != ':')))
{
lookup_name[j++] = '='; /* Treat the colon as an '=' */
equals_pos = j; /* Note where it occurred in the input */
could_be_user_defined = FALSE;
break;
}
/* Otherwise, this character is part of the name. */
lookup_name[j++] = cur;
/* Here it isn't a single colon, so if it is a colon, it must be a
* double colon */
if (cur == ':') {
/* A double colon should be a package qualifier. We note its
* position and continue. Note that one could have
* pkg1::pkg2::...::foo
* so that the position at the end of the loop will be just after
* the final qualifier */
i++;
non_pkg_begin = i + 1;
lookup_name[j++] = ':';
}
else { /* Only word chars (and '::') can be in a user-defined name */
could_be_user_defined = FALSE;
}
} /* End of parsing through the lhs of the property name (or all of it if
no rhs) */
#define STRLENs(s) (sizeof("" s "") - 1)
/* If there is a single package name 'utf8::', it is ambiguous. It could
* be for a user-defined property, or it could be a Unicode property, as
* all of them are considered to be for that package. For the purposes of
* parsing the rest of the property, strip it off */
if (non_pkg_begin == STRLENs("utf8::") && memBEGINPs(name, name_len, "utf8::")) {
lookup_name += STRLENs("utf8::");
j -= STRLENs("utf8::");
equals_pos -= STRLENs("utf8::");
}
/* Here, we are either done with the whole property name, if it was simple;
* or are positioned just after the '=' if it is compound. */
if (equals_pos >= 0) {
assert(! stricter); /* We shouldn't have set this yet */
/* Space immediately after the '=' is ignored */
i++;
for (; i < name_len; i++) {
if (! isSPACE_A(name[i])) {
break;
}
}
/* Most punctuation after the equals indicates a subpattern, like
* \p{foo=/bar/} */
if ( isPUNCT_A(name[i])
&& name[i] != '-'
&& name[i] != '+'
&& name[i] != '_'
&& name[i] != '{')
{
/* Find the property. The table includes the equals sign, so we
* use 'j' as-is */
table_index = match_uniprop((U8 *) lookup_name, j);
if (table_index) {
const char * const * prop_values
= UNI_prop_value_ptrs[table_index];
SV * subpattern;
Size_t subpattern_len;
REGEXP * subpattern_re;
char open = name[i++];
char close;
const char * pos_in_brackets;
bool escaped = 0;
/* A backslash means the real delimitter is the next character.
* */
if (open == '\\') {
open = name[i++];
escaped = 1;
}
/* This data structure is constructed so that the matching
* closing bracket is 3 past its matching opening. The second
* set of closing is so that if the opening is something like
* ']', the closing will be that as well. Something similar is
* done in toke.c */
pos_in_brackets = strchr("([<)]>)]>", open);
close = (pos_in_brackets) ? pos_in_brackets[3] : open;
if ( i >= name_len
|| name[name_len-1] != close
|| (escaped && name[name_len-2] != '\\'))
{
sv_catpvs(msg, "Unicode property wildcard not terminated");
goto append_name_to_msg;
}
Perl_ck_warner_d(aTHX_
packWARN(WARN_EXPERIMENTAL__UNIPROP_WILDCARDS),
"The Unicode property wildcards feature is experimental");
/* Now create and compile the wildcard subpattern. Use /iaa
* because nothing outside of ASCII will match, and it the
* property values should all match /i. Note that when the
* pattern fails to compile, our added text to the user's
* pattern will be displayed to the user, which is not so
* desirable. */
subpattern_len = name_len - i - 1 - escaped;
subpattern = Perl_newSVpvf(aTHX_ "(?iaa:%.*s)",
(unsigned) subpattern_len,
name + i);
subpattern = sv_2mortal(subpattern);
subpattern_re = re_compile(subpattern, 0);
assert(subpattern_re); /* Should have died if didn't compile
successfully */
/* For each legal property value, see if the supplied pattern
* matches it. */
while (*prop_values) {
const char * const entry = *prop_values;
const Size_t len = strlen(entry);
SV* entry_sv = newSVpvn_flags(entry, len, SVs_TEMP);
if (pregexec(subpattern_re,
(char *) entry,
(char *) entry + len,
(char *) entry, 0,
entry_sv,
0))
{ /* Here, matched. Add to the returned list */
Size_t total_len = j + len;
SV * sub_invlist = NULL;
char * this_string;
/* We know this is a legal \p{property=value}. Call
* the function to return the list of code points that
* match it */
Newxz(this_string, total_len + 1, char);
Copy(lookup_name, this_string, j, char);
my_strlcat(this_string, entry, total_len + 1);
SAVEFREEPV(this_string);
sub_invlist = parse_uniprop_string(this_string,
total_len,
is_utf8,
to_fold,
runtime,
deferrable,
user_defined_ptr,
msg,
level + 1);
_invlist_union(prop_definition, sub_invlist,
&prop_definition);
}
prop_values++; /* Next iteration, look at next propvalue */
} /* End of looking through property values; (the data
structure is terminated by a NULL ptr) */
SvREFCNT_dec_NN(subpattern_re);
if (prop_definition) {
return prop_definition;
}
sv_catpvs(msg, "No Unicode property value wildcard matches:");
goto append_name_to_msg;
}
/* Here's how khw thinks we should proceed to handle the properties
* not yet done: Bidi Mirroring Glyph
Bidi Paired Bracket
Case Folding (both full and simple)
Decomposition Mapping
Equivalent Unified Ideograph
Name
Name Alias
Lowercase Mapping (both full and simple)
NFKC Case Fold
Titlecase Mapping (both full and simple)
Uppercase Mapping (both full and simple)
* Move the part that looks at the property values into a perl
* script, like utf8_heavy.pl is done. This makes things somewhat
* easier, but most importantly, it avoids always adding all these
* strings to the memory usage when the feature is little-used.
*
* The property values would all be concatenated into a single
* string per property with each value on a separate line, and the
* code point it's for on alternating lines. Then we match the
* user's input pattern m//mg, without having to worry about their
* uses of '^' and '$'. Only the values that aren't the default
* would be in the strings. Code points would be in UTF-8. The
* search pattern that we would construct would look like
* (?: \n (code-point_re) \n (?aam: user-re ) \n )
* And so $1 would contain the code point that matched the user-re.
* For properties where the default is the code point itself, such
* as any of the case changing mappings, the string would otherwise
* consist of all Unicode code points in UTF-8 strung together.
* This would be impractical. So instead, examine their compiled
* pattern, looking at the ssc. If none, reject the pattern as an
* error. Otherwise run the pattern against every code point in
* the ssc. The ssc is kind of like tr18's 3.9 Possible Match Sets
* And it might be good to create an API to return the ssc.
*
* For the name properties, a new function could be created in
* charnames which essentially does the same thing as above,
* sharing Name.pl with the other charname functions. Don't know
* about loose name matching, or algorithmically determined names.
* Decomposition.pl similarly.
*
* It might be that a new pattern modifier would have to be
* created, like /t for resTricTed, which changed the behavior of
* some constructs in their subpattern, like \A. */
} /* End of is a wildcard subppattern */
/* Certain properties whose values are numeric need special handling.
* They may optionally be prefixed by 'is'. Ignore that prefix for the
* purposes of checking if this is one of those properties */
if (memBEGINPs(lookup_name, j, "is")) {
lookup_offset = 2;
}
/* Then check if it is one of these specially-handled properties. The
* possibilities are hard-coded because easier this way, and the list
* is unlikely to change.
*
* All numeric value type properties are of this ilk, and are also
* special in a different way later on. So find those first. There
* are several numeric value type properties in the Unihan DB (which is
* unlikely to be compiled with perl, but we handle it here in case it
* does get compiled). They all end with 'numeric'. The interiors
* aren't checked for the precise property. This would stop working if
* a cjk property were to be created that ended with 'numeric' and
* wasn't a numeric type */
is_nv_type = memEQs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "numericvalue")
|| memEQs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "nv")
|| ( memENDPs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "numeric")
&& ( memBEGINPs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "cjk")
|| memBEGINPs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "k")));
if ( is_nv_type
|| memEQs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "canonicalcombiningclass")
|| memEQs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "ccc")
|| memEQs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "age")
|| memEQs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "in")
|| memEQs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "presentin"))
{
unsigned int k;
/* Since the stuff after the '=' is a number, we can't throw away
* '-' willy-nilly, as those could be a minus sign. Other stricter
* rules also apply. However, these properties all can have the
* rhs not be a number, in which case they contain at least one
* alphabetic. In those cases, the stricter rules don't apply.
* But the numeric type properties can have the alphas [Ee] to
* signify an exponent, and it is still a number with stricter
* rules. So look for an alpha that signifies not-strict */
stricter = TRUE;
for (k = i; k < name_len; k++) {
if ( isALPHA_A(name[k])
&& (! is_nv_type || ! isALPHA_FOLD_EQ(name[k], 'E')))
{
stricter = FALSE;
break;
}
}
}
if (stricter) {
/* A number may have a leading '+' or '-'. The latter is retained
* */
if (name[i] == '+') {
i++;
}
else if (name[i] == '-') {
lookup_name[j++] = '-';
i++;
}
/* Skip leading zeros including single underscores separating the
* zeros, or between the final leading zero and the first other
* digit */
for (; i < name_len - 1; i++) {
if ( name[i] != '0'
&& (name[i] != '_' || ! isDIGIT_A(name[i+1])))
{
break;
}
}
}
}
else { /* No '=' */
/* Only a few properties without an '=' should be parsed with stricter
* rules. The list is unlikely to change. */
if ( memBEGINPs(lookup_name, j, "perl")
&& memNEs(lookup_name + 4, j - 4, "space")
&& memNEs(lookup_name + 4, j - 4, "word"))
{
stricter = TRUE;
/* We set the inputs back to 0 and the code below will reparse,
* using strict */
i = j = 0;
}
}
/* Here, we have either finished the property, or are positioned to parse
* the remainder, and we know if stricter rules apply. Finish out, if not
* already done */
for (; i < name_len; i++) {
char cur = name[i];
/* In all instances, case differences are ignored, and we normalize to
* lowercase */
if (isUPPER_A(cur)) {
lookup_name[j++] = toLOWER(cur);
continue;
}
/* An underscore is skipped, but not under strict rules unless it
* separates two digits */
if (cur == '_') {
if ( stricter
&& ( i == 0 || (int) i == equals_pos || i == name_len- 1
|| ! isDIGIT_A(name[i-1]) || ! isDIGIT_A(name[i+1])))
{
lookup_name[j++] = '_';
}
continue;
}
/* Hyphens are skipped except under strict */
if (cur == '-' && ! stricter) {
continue;
}
/* XXX Bug in documentation. It says white space skipped adjacent to
* non-word char. Maybe we should, but shouldn't skip it next to a dot
* in a number */
if (isSPACE_A(cur) && ! stricter) {
continue;
}
lookup_name[j++] = cur;
/* Unless this is a non-trailing slash, we are done with it */
if (i >= name_len - 1 || cur != '/') {
continue;
}
slash_pos = j;
/* A slash in the 'numeric value' property indicates that what follows
* is a denominator. It can have a leading '+' and '0's that should be
* skipped. But we have never allowed a negative denominator, so treat
* a minus like every other character. (No need to rule out a second
* '/', as that won't match anything anyway */
if (is_nv_type) {
i++;
if (i < name_len && name[i] == '+') {
i++;
}
/* Skip leading zeros including underscores separating digits */
for (; i < name_len - 1; i++) {
if ( name[i] != '0'
&& (name[i] != '_' || ! isDIGIT_A(name[i+1])))
{
break;
}
}
/* Store the first real character in the denominator */
lookup_name[j++] = name[i];
}
}
/* Here are completely done parsing the input 'name', and 'lookup_name'
* contains a copy, normalized.
*
* This special case is grandfathered in: 'L_' and 'GC=L_' are accepted and
* different from without the underscores. */
if ( ( UNLIKELY(memEQs(lookup_name, j, "l"))
|| UNLIKELY(memEQs(lookup_name, j, "gc=l")))
&& UNLIKELY(name[name_len-1] == '_'))
{
lookup_name[j++] = '&';
}
/* If the original input began with 'In' or 'Is', it could be a subroutine
* call to a user-defined property instead of a Unicode property name. */
if ( non_pkg_begin + name_len > 2
&& name[non_pkg_begin+0] == 'I'
&& (name[non_pkg_begin+1] == 'n' || name[non_pkg_begin+1] == 's'))
{
starts_with_In_or_Is = TRUE;
}
else {
could_be_user_defined = FALSE;
}
if (could_be_user_defined) {
CV* user_sub;
/* If the user defined property returns the empty string, it could
* easily be because the pattern is being compiled before the data it
* actually needs to compile is available. This could be argued to be
* a bug in the perl code, but this is a change of behavior for Perl,
* so we handle it. This means that intentionally returning nothing
* will not be resolved until runtime */
bool empty_return = FALSE;
/* Here, the name could be for a user defined property, which are
* implemented as subs. */
user_sub = get_cvn_flags(name, name_len, 0);
if (user_sub) {
const char insecure[] = "Insecure user-defined property";
/* Here, there is a sub by the correct name. Normally we call it
* to get the property definition */
dSP;
SV * user_sub_sv = MUTABLE_SV(user_sub);
SV * error; /* Any error returned by calling 'user_sub' */
SV * key; /* The key into the hash of user defined sub names
*/
SV * placeholder;
SV ** saved_user_prop_ptr; /* Hash entry for this property */
/* How many times to retry when another thread is in the middle of
* expanding the same definition we want */
PERL_INT_FAST8_T retry_countdown = 10;
DECLARATION_FOR_GLOBAL_CONTEXT;
/* If we get here, we know this property is user-defined */
*user_defined_ptr = TRUE;
/* We refuse to call a potentially tainted subroutine; returning an
* error instead */
if (TAINT_get) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvn(msg, insecure, sizeof(insecure) - 1);
goto append_name_to_msg;
}
/* In principal, we only call each subroutine property definition
* once during the life of the program. This guarantees that the
* property definition never changes. The results of the single
* sub call are stored in a hash, which is used instead for future
* references to this property. The property definition is thus
* immutable. But, to allow the user to have a /i-dependent
* definition, we call the sub once for non-/i, and once for /i,
* should the need arise, passing the /i status as a parameter.
*
* We start by constructing the hash key name, consisting of the
* fully qualified subroutine name, preceded by the /i status, so
* that there is a key for /i and a different key for non-/i */
key = newSVpvn(((to_fold) ? "1" : "0"), 1);
fq_name = S_get_fq_name(aTHX_ name, name_len, is_utf8,
non_pkg_begin != 0);
sv_catsv(key, fq_name);
sv_2mortal(key);
/* We only call the sub once throughout the life of the program
* (with the /i, non-/i exception noted above). That means the
* hash must be global and accessible to all threads. It is
* created at program start-up, before any threads are created, so
* is accessible to all children. But this creates some
* complications.
*
* 1) The keys can't be shared, or else problems arise; sharing is
* turned off at hash creation time
* 2) All SVs in it are there for the remainder of the life of the
* program, and must be created in the same interpreter context
* as the hash, or else they will be freed from the wrong pool
* at global destruction time. This is handled by switching to
* the hash's context to create each SV going into it, and then
* immediately switching back
* 3) All accesses to the hash must be controlled by a mutex, to
* prevent two threads from getting an unstable state should
* they simultaneously be accessing it. The code below is
* crafted so that the mutex is locked whenever there is an
* access and unlocked only when the next stable state is
* achieved.
*
* The hash stores either the definition of the property if it was
* valid, or, if invalid, the error message that was raised. We
* use the type of SV to distinguish.
*
* There's also the need to guard against the definition expansion
* from infinitely recursing. This is handled by storing the aTHX
* of the expanding thread during the expansion. Again the SV type
* is used to distinguish this from the other two cases. If we
* come to here and the hash entry for this property is our aTHX,
* it means we have recursed, and the code assumes that we would
* infinitely recurse, so instead stops and raises an error.
* (Any recursion has always been treated as infinite recursion in
* this feature.)
*
* If instead, the entry is for a different aTHX, it means that
* that thread has gotten here first, and hasn't finished expanding
* the definition yet. We just have to wait until it is done. We
* sleep and retry a few times, returning an error if the other
* thread doesn't complete. */
re_fetch:
USER_PROP_MUTEX_LOCK;
/* If we have an entry for this key, the subroutine has already
* been called once with this /i status. */
saved_user_prop_ptr = hv_fetch(PL_user_def_props,
SvPVX(key), SvCUR(key), 0);
if (saved_user_prop_ptr) {
/* If the saved result is an inversion list, it is the valid
* definition of this property */
if (is_invlist(*saved_user_prop_ptr)) {
prop_definition = *saved_user_prop_ptr;
/* The SV in the hash won't be removed until global
* destruction, so it is stable and we can unlock */
USER_PROP_MUTEX_UNLOCK;
/* The caller shouldn't try to free this SV */
return prop_definition;
}
/* Otherwise, if it is a string, it is the error message
* that was returned when we first tried to evaluate this
* property. Fail, and append the message */
if (SvPOK(*saved_user_prop_ptr)) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catsv(msg, *saved_user_prop_ptr);
/* The SV in the hash won't be removed until global
* destruction, so it is stable and we can unlock */
USER_PROP_MUTEX_UNLOCK;
return NULL;
}
assert(SvIOK(*saved_user_prop_ptr));
/* Here, we have an unstable entry in the hash. Either another
* thread is in the middle of expanding the property's
* definition, or we are ourselves recursing. We use the aTHX
* in it to distinguish */
if (SvIV(*saved_user_prop_ptr) != PTR2IV(CUR_CONTEXT)) {
/* Here, it's another thread doing the expanding. We've
* looked as much as we are going to at the contents of the
* hash entry. It's safe to unlock. */
USER_PROP_MUTEX_UNLOCK;
/* Retry a few times */
if (retry_countdown-- > 0) {
PerlProc_sleep(1);
goto re_fetch;
}
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvs(msg, "Timeout waiting for another thread to "
"define");
goto append_name_to_msg;
}
/* Here, we are recursing; don't dig any deeper */
USER_PROP_MUTEX_UNLOCK;
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvs(msg,
"Infinite recursion in user-defined property");
goto append_name_to_msg;
}
/* Here, this thread has exclusive control, and there is no entry
* for this property in the hash. So we have the go ahead to
* expand the definition ourselves. */
PUSHSTACKi(PERLSI_MAGIC);
ENTER;
/* Create a temporary placeholder in the hash to detect recursion
* */
SWITCH_TO_GLOBAL_CONTEXT;
placeholder= newSVuv(PTR2IV(ORIGINAL_CONTEXT));
(void) hv_store_ent(PL_user_def_props, key, placeholder, 0);
RESTORE_CONTEXT;
/* Now that we have a placeholder, we can let other threads
* continue */
USER_PROP_MUTEX_UNLOCK;
/* Make sure the placeholder always gets destroyed */
SAVEDESTRUCTOR_X(S_delete_recursion_entry, SvPVX(key));
PUSHMARK(SP);
SAVETMPS;
/* Call the user's function, with the /i status as a parameter.
* Note that we have gone to a lot of trouble to keep this call
* from being within the locked mutex region. */
XPUSHs(boolSV(to_fold));
PUTBACK;
/* The following block was taken from swash_init(). Presumably
* they apply to here as well, though we no longer use a swash --
* khw */
SAVEHINTS();
save_re_context();
/* We might get here via a subroutine signature which uses a utf8
* parameter name, at which point PL_subname will have been set
* but not yet used. */
save_item(PL_subname);
(void) call_sv(user_sub_sv, G_EVAL|G_SCALAR);
SPAGAIN;
error = ERRSV;
if (TAINT_get || SvTRUE(error)) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
if (SvTRUE(error)) {
sv_catpvs(msg, "Error \"");
sv_catsv(msg, error);
sv_catpvs(msg, "\"");
}
if (TAINT_get) {
if (SvTRUE(error)) sv_catpvs(msg, "; ");
sv_catpvn(msg, insecure, sizeof(insecure) - 1);
}
if (name_len > 0) {
sv_catpvs(msg, " in expansion of ");
Perl_sv_catpvf(aTHX_ msg, "%" UTF8f, UTF8fARG(is_utf8,
name_len,
name));
}
(void) POPs;
prop_definition = NULL;
}
else { /* G_SCALAR guarantees a single return value */
SV * contents = POPs;
/* The contents is supposed to be the expansion of the property
* definition. If the definition is deferrable, and we got an
* empty string back, set a flag to later defer it (after clean
* up below). */
if ( deferrable
&& (! SvPOK(contents) || SvCUR(contents) == 0))
{
empty_return = TRUE;
}
else { /* Otherwise, call a function to check for valid syntax,
and handle it */
prop_definition = handle_user_defined_property(
name, name_len,
is_utf8, to_fold, runtime,
deferrable,
contents, user_defined_ptr,
msg,
level);
}
}
/* Here, we have the results of the expansion. Delete the
* placeholder, and if the definition is now known, replace it with
* that definition. We need exclusive access to the hash, and we
* can't let anyone else in, between when we delete the placeholder
* and add the permanent entry */
USER_PROP_MUTEX_LOCK;
S_delete_recursion_entry(aTHX_ SvPVX(key));
if ( ! empty_return
&& (! prop_definition || is_invlist(prop_definition)))
{
/* If we got success we use the inversion list defining the
* property; otherwise use the error message */
SWITCH_TO_GLOBAL_CONTEXT;
(void) hv_store_ent(PL_user_def_props,
key,
((prop_definition)
? newSVsv(prop_definition)
: newSVsv(msg)),
0);
RESTORE_CONTEXT;
}
/* All done, and the hash now has a permanent entry for this
* property. Give up exclusive control */
USER_PROP_MUTEX_UNLOCK;
FREETMPS;
LEAVE;
POPSTACK;
if (empty_return) {
goto definition_deferred;
}
if (prop_definition) {
/* If the definition is for something not known at this time,
* we toss it, and go return the main property name, as that's
* the one the user will be aware of */
if (! is_invlist(prop_definition)) {
SvREFCNT_dec_NN(prop_definition);
goto definition_deferred;
}
sv_2mortal(prop_definition);
}
/* And return */
return prop_definition;
} /* End of calling the subroutine for the user-defined property */
} /* End of it could be a user-defined property */
/* Here it wasn't a user-defined property that is known at this time. See
* if it is a Unicode property */
lookup_len = j; /* This is a more mnemonic name than 'j' */
/* Get the index into our pointer table of the inversion list corresponding
* to the property */
table_index = match_uniprop((U8 *) lookup_name, lookup_len);
/* If it didn't find the property ... */
if (table_index == 0) {
/* Try again stripping off any initial 'In' or 'Is' */
if (starts_with_In_or_Is) {
lookup_name += 2;
lookup_len -= 2;
equals_pos -= 2;
slash_pos -= 2;
table_index = match_uniprop((U8 *) lookup_name, lookup_len);
}
if (table_index == 0) {
char * canonical;
/* Here, we didn't find it. If not a numeric type property, and
* can't be a user-defined one, it isn't a legal property */
if (! is_nv_type) {
if (! could_be_user_defined) {
goto failed;
}
/* Here, the property name is legal as a user-defined one. At
* compile time, it might just be that the subroutine for that
* property hasn't been encountered yet, but at runtime, it's
* an error to try to use an undefined one */
if (! deferrable) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvs(msg, "Unknown user-defined property name");
goto append_name_to_msg;
}
goto definition_deferred;
} /* End of isn't a numeric type property */
/* The numeric type properties need more work to decide. What we
* do is make sure we have the number in canonical form and look
* that up. */
if (slash_pos < 0) { /* No slash */
/* When it isn't a rational, take the input, convert it to a
* NV, then create a canonical string representation of that
* NV. */
NV value;
SSize_t value_len = lookup_len - equals_pos;
/* Get the value */
if ( value_len <= 0
|| my_atof3(lookup_name + equals_pos, &value,
value_len)
!= lookup_name + lookup_len)
{
goto failed;
}
/* If the value is an integer, the canonical value is integral
* */
if (Perl_ceil(value) == value) {
canonical = Perl_form(aTHX_ "%.*s%.0" NVff,
equals_pos, lookup_name, value);
}
else { /* Otherwise, it is %e with a known precision */
char * exp_ptr;
canonical = Perl_form(aTHX_ "%.*s%.*" NVef,
equals_pos, lookup_name,
PL_E_FORMAT_PRECISION, value);
/* The exponent generated is expecting two digits, whereas
* %e on some systems will generate three. Remove leading
* zeros in excess of 2 from the exponent. We start
* looking for them after the '=' */
exp_ptr = strchr(canonical + equals_pos, 'e');
if (exp_ptr) {
char * cur_ptr = exp_ptr + 2; /* past the 'e[+-]' */
SSize_t excess_exponent_len = strlen(cur_ptr) - 2;
assert(*(cur_ptr - 1) == '-' || *(cur_ptr - 1) == '+');
if (excess_exponent_len > 0) {
SSize_t leading_zeros = strspn(cur_ptr, "0");
SSize_t excess_leading_zeros
= MIN(leading_zeros, excess_exponent_len);
if (excess_leading_zeros > 0) {
Move(cur_ptr + excess_leading_zeros,
cur_ptr,
strlen(cur_ptr) - excess_leading_zeros
+ 1, /* Copy the NUL as well */
char);
}
}
}
}
}
else { /* Has a slash. Create a rational in canonical form */
UV numerator, denominator, gcd, trial;
const char * end_ptr;
const char * sign = "";
/* We can't just find the numerator, denominator, and do the
* division, then use the method above, because that is
* inexact. And the input could be a rational that is within
* epsilon (given our precision) of a valid rational, and would
* then incorrectly compare valid.
*
* We're only interested in the part after the '=' */
const char * this_lookup_name = lookup_name + equals_pos;
lookup_len -= equals_pos;
slash_pos -= equals_pos;
/* Handle any leading minus */
if (this_lookup_name[0] == '-') {
sign = "-";
this_lookup_name++;
lookup_len--;
slash_pos--;
}
/* Convert the numerator to numeric */
end_ptr = this_lookup_name + slash_pos;
if (! grok_atoUV(this_lookup_name, &numerator, &end_ptr)) {
goto failed;
}
/* It better have included all characters before the slash */
if (*end_ptr != '/') {
goto failed;
}
/* Set to look at just the denominator */
this_lookup_name += slash_pos;
lookup_len -= slash_pos;
end_ptr = this_lookup_name + lookup_len;
/* Convert the denominator to numeric */
if (! grok_atoUV(this_lookup_name, &denominator, &end_ptr)) {
goto failed;
}
/* It better be the rest of the characters, and don't divide by
* 0 */
if ( end_ptr != this_lookup_name + lookup_len
|| denominator == 0)
{
goto failed;
}
/* Get the greatest common denominator using
http://en.wikipedia.org/wiki/Euclidean_algorithm */
gcd = numerator;
trial = denominator;
while (trial != 0) {
UV temp = trial;
trial = gcd % trial;
gcd = temp;
}
/* If already in lowest possible terms, we have already tried
* looking this up */
if (gcd == 1) {
goto failed;
}
/* Reduce the rational, which should put it in canonical form
* */
numerator /= gcd;
denominator /= gcd;
canonical = Perl_form(aTHX_ "%.*s%s%" UVuf "/%" UVuf,
equals_pos, lookup_name, sign, numerator, denominator);
}
/* Here, we have the number in canonical form. Try that */
table_index = match_uniprop((U8 *) canonical, strlen(canonical));
if (table_index == 0) {
goto failed;
}
} /* End of still didn't find the property in our table */
} /* End of didn't find the property in our table */
/* Here, we have a non-zero return, which is an index into a table of ptrs.
* A negative return signifies that the real index is the absolute value,
* but the result needs to be inverted */
if (table_index < 0) {
invert_return = TRUE;
table_index = -table_index;
}
/* Out-of band indices indicate a deprecated property. The proper index is
* modulo it with the table size. And dividing by the table size yields
* an offset into a table constructed by regen/mk_invlists.pl to contain
* the corresponding warning message */
if (table_index > MAX_UNI_KEYWORD_INDEX) {
Size_t warning_offset = table_index / MAX_UNI_KEYWORD_INDEX;
table_index %= MAX_UNI_KEYWORD_INDEX;
Perl_ck_warner_d(aTHX_ packWARN(WARN_DEPRECATED),
"Use of '%.*s' in \\p{} or \\P{} is deprecated because: %s",
(int) name_len, name, deprecated_property_msgs[warning_offset]);
}
/* In a few properties, a different property is used under /i. These are
* unlikely to change, so are hard-coded here. */
if (to_fold) {
if ( table_index == UNI_XPOSIXUPPER
|| table_index == UNI_XPOSIXLOWER
|| table_index == UNI_TITLE)
{
table_index = UNI_CASED;
}
else if ( table_index == UNI_UPPERCASELETTER
|| table_index == UNI_LOWERCASELETTER
# ifdef UNI_TITLECASELETTER /* Missing from early Unicodes */
|| table_index == UNI_TITLECASELETTER
# endif
) {
table_index = UNI_CASEDLETTER;
}
else if ( table_index == UNI_POSIXUPPER
|| table_index == UNI_POSIXLOWER)
{
table_index = UNI_POSIXALPHA;
}
}
/* Create and return the inversion list */
prop_definition =_new_invlist_C_array(uni_prop_ptrs[table_index]);
sv_2mortal(prop_definition);
/* See if there is a private use override to add to this definition */
{
COPHH * hinthash = (IN_PERL_COMPILETIME)
? CopHINTHASH_get(&PL_compiling)
: CopHINTHASH_get(PL_curcop);
SV * pu_overrides = cophh_fetch_pv(hinthash, "private_use", 0, 0);
if (UNLIKELY(pu_overrides && SvPOK(pu_overrides))) {
/* See if there is an element in the hints hash for this table */
SV * pu_lookup = Perl_newSVpvf(aTHX_ "%d=", table_index);
const char * pos = strstr(SvPVX(pu_overrides), SvPVX(pu_lookup));
if (pos) {
bool dummy;
SV * pu_definition;
SV * pu_invlist;
SV * expanded_prop_definition =
sv_2mortal(invlist_clone(prop_definition, NULL));
/* If so, it's definition is the string from here to the next
* \a character. And its format is the same as a user-defined
* property */
pos += SvCUR(pu_lookup);
pu_definition = newSVpvn(pos, strchr(pos, '\a') - pos);
pu_invlist = handle_user_defined_property(lookup_name,
lookup_len,
0, /* Not UTF-8 */
0, /* Not folded */
runtime,
deferrable,
pu_definition,
&dummy,
msg,
level);
if (TAINT_get) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvs(msg, "Insecure private-use override");
goto append_name_to_msg;
}
/* For now, as a safety measure, make sure that it doesn't
* override non-private use code points */
_invlist_intersection(pu_invlist, PL_Private_Use, &pu_invlist);
/* Add it to the list to be returned */
_invlist_union(prop_definition, pu_invlist,
&expanded_prop_definition);
prop_definition = expanded_prop_definition;
Perl_ck_warner_d(aTHX_ packWARN(WARN_EXPERIMENTAL__PRIVATE_USE), "The private_use feature is experimental");
}
}
}
if (invert_return) {
_invlist_invert(prop_definition);
}
return prop_definition;
failed:
if (non_pkg_begin != 0) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvs(msg, "Illegal user-defined property name");
}
else {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvs(msg, "Can't find Unicode property definition");
}
/* FALLTHROUGH */
append_name_to_msg:
{
const char * prefix = (runtime && level == 0) ? " \\p{" : " \"";
const char * suffix = (runtime && level == 0) ? "}" : "\"";
sv_catpv(msg, prefix);
Perl_sv_catpvf(aTHX_ msg, "%" UTF8f, UTF8fARG(is_utf8, name_len, name));
sv_catpv(msg, suffix);
}
return NULL;
definition_deferred:
/* Here it could yet to be defined, so defer evaluation of this
* until its needed at runtime. We need the fully qualified property name
* to avoid ambiguity, and a trailing newline */
if (! fq_name) {
fq_name = S_get_fq_name(aTHX_ name, name_len, is_utf8,
non_pkg_begin != 0 /* If has "::" */
);
}
sv_catpvs(fq_name, "\n");
*user_defined_ptr = TRUE;
return fq_name;
}
#endif
/*
* ex: set ts=8 sts=4 sw=4 et:
*/
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_4012_3 |
crossvul-cpp_data_good_4523_1 | /* NetHack 3.6 topten.c $NHDT-Date: 1450451497 2015/12/18 15:11:37 $ $NHDT-Branch: NetHack-3.6.0 $:$NHDT-Revision: 1.44 $ */
/* Copyright (c) Stichting Mathematisch Centrum, Amsterdam, 1985. */
/*-Copyright (c) Robert Patrick Rankin, 2012. */
/* NetHack may be freely redistributed. See license for details. */
#include "hack.h"
#include "dlb.h"
#ifdef SHORT_FILENAMES
#include "patchlev.h"
#else
#include "patchlevel.h"
#endif
#ifdef VMS
/* We don't want to rewrite the whole file, because that entails
creating a new version which requires that the old one be deletable. */
#define UPDATE_RECORD_IN_PLACE
#endif
/*
* Updating in place can leave junk at the end of the file in some
* circumstances (if it shrinks and the O.S. doesn't have a straightforward
* way to truncate it). The trailing junk is harmless and the code
* which reads the scores will ignore it.
*/
#ifdef UPDATE_RECORD_IN_PLACE
static long final_fpos;
#endif
#define done_stopprint program_state.stopprint
#define newttentry() (struct toptenentry *) alloc(sizeof (struct toptenentry))
#define dealloc_ttentry(ttent) free((genericptr_t) (ttent))
#ifndef NAMSZ
/* Changing NAMSZ can break your existing record/logfile */
#define NAMSZ 10
#endif
#define DTHSZ 100
#define ROLESZ 3
struct toptenentry {
struct toptenentry *tt_next;
#ifdef UPDATE_RECORD_IN_PLACE
long fpos;
#endif
long points;
int deathdnum, deathlev;
int maxlvl, hp, maxhp, deaths;
int ver_major, ver_minor, patchlevel;
long deathdate, birthdate;
int uid;
char plrole[ROLESZ + 1];
char plrace[ROLESZ + 1];
char plgend[ROLESZ + 1];
char plalign[ROLESZ + 1];
char name[NAMSZ + 1];
char death[DTHSZ + 1];
} * tt_head;
/* size big enough to read in all the string fields at once; includes
room for separating space or trailing newline plus string terminator */
#define SCANBUFSZ (4 * (ROLESZ + 1) + (NAMSZ + 1) + (DTHSZ + 1) + 1)
STATIC_DCL void FDECL(topten_print, (const char *));
STATIC_DCL void FDECL(topten_print_bold, (const char *));
STATIC_DCL void NDECL(outheader);
STATIC_DCL void FDECL(outentry, (int, struct toptenentry *, BOOLEAN_P));
STATIC_DCL void FDECL(discardexcess, (FILE *));
STATIC_DCL void FDECL(readentry, (FILE *, struct toptenentry *));
STATIC_DCL void FDECL(writeentry, (FILE *, struct toptenentry *));
#ifdef XLOGFILE
STATIC_DCL void FDECL(writexlentry, (FILE *, struct toptenentry *, int));
STATIC_DCL long NDECL(encodexlogflags);
STATIC_DCL long NDECL(encodeconduct);
STATIC_DCL long NDECL(encodeachieve);
#endif
STATIC_DCL void FDECL(free_ttlist, (struct toptenentry *));
STATIC_DCL int FDECL(classmon, (char *, BOOLEAN_P));
STATIC_DCL int FDECL(score_wanted, (BOOLEAN_P, int, struct toptenentry *, int,
const char **, int));
#ifdef NO_SCAN_BRACK
STATIC_DCL void FDECL(nsb_mung_line, (char *));
STATIC_DCL void FDECL(nsb_unmung_line, (char *));
#endif
static winid toptenwin = WIN_ERR;
/* "killed by",&c ["an"] 'killer.name' */
void
formatkiller(buf, siz, how, incl_helpless)
char *buf;
unsigned siz;
int how;
boolean incl_helpless;
{
static NEARDATA const char *const killed_by_prefix[] = {
/* DIED, CHOKING, POISONING, STARVING, */
"killed by ", "choked on ", "poisoned by ", "died of ",
/* DROWNING, BURNING, DISSOLVED, CRUSHING, */
"drowned in ", "burned by ", "dissolved in ", "crushed to death by ",
/* STONING, TURNED_SLIME, GENOCIDED, */
"petrified by ", "turned to slime by ", "killed by ",
/* PANICKED, TRICKED, QUIT, ESCAPED, ASCENDED */
"", "", "", "", ""
};
unsigned l;
char c, *kname = killer.name;
buf[0] = '\0'; /* lint suppression */
switch (killer.format) {
default:
impossible("bad killer format? (%d)", killer.format);
/*FALLTHRU*/
case NO_KILLER_PREFIX:
break;
case KILLED_BY_AN:
kname = an(kname);
/*FALLTHRU*/
case KILLED_BY:
(void) strncat(buf, killed_by_prefix[how], siz - 1);
l = strlen(buf);
buf += l, siz -= l;
break;
}
/* Copy kname into buf[].
* Object names and named fruit have already been sanitized, but
* monsters can have "called 'arbitrary text'" attached to them,
* so make sure that that text can't confuse field splitting when
* record, logfile, or xlogfile is re-read at some later point.
*/
while (--siz > 0) {
c = *kname++;
if (!c)
break;
else if (c == ',')
c = ';';
/* 'xlogfile' doesn't really need protection for '=', but
fixrecord.awk for corrupted 3.6.0 'record' does (only
if using xlogfile rather than logfile to repair record) */
else if (c == '=')
c = '_';
/* tab is not possible due to use of mungspaces() when naming;
it would disrupt xlogfile parsing if it were present */
else if (c == '\t')
c = ' ';
*buf++ = c;
}
*buf = '\0';
if (incl_helpless && multi) {
/* X <= siz: 'sizeof "string"' includes 1 for '\0' terminator */
if (multi_reason && strlen(multi_reason) + sizeof ", while " <= siz)
Sprintf(buf, ", while %s", multi_reason);
/* either multi_reason wasn't specified or wouldn't fit */
else if (sizeof ", while helpless" <= siz)
Strcpy(buf, ", while helpless");
/* else extra death info won't fit, so leave it out */
}
}
STATIC_OVL void
topten_print(x)
const char *x;
{
if (toptenwin == WIN_ERR)
raw_print(x);
else
putstr(toptenwin, ATR_NONE, x);
}
STATIC_OVL void
topten_print_bold(x)
const char *x;
{
if (toptenwin == WIN_ERR)
raw_print_bold(x);
else
putstr(toptenwin, ATR_BOLD, x);
}
int
observable_depth(lev)
d_level *lev;
{
#if 0
/* if we ever randomize the order of the elemental planes, we
must use a constant external representation in the record file */
if (In_endgame(lev)) {
if (Is_astralevel(lev))
return -5;
else if (Is_waterlevel(lev))
return -4;
else if (Is_firelevel(lev))
return -3;
else if (Is_airlevel(lev))
return -2;
else if (Is_earthlevel(lev))
return -1;
else
return 0; /* ? */
} else
#endif
return depth(lev);
}
/* throw away characters until current record has been entirely consumed */
STATIC_OVL void
discardexcess(rfile)
FILE *rfile;
{
int c;
do {
c = fgetc(rfile);
} while (c != '\n' && c != EOF);
}
STATIC_OVL void
readentry(rfile, tt)
FILE *rfile;
struct toptenentry *tt;
{
char inbuf[SCANBUFSZ], s1[SCANBUFSZ], s2[SCANBUFSZ], s3[SCANBUFSZ],
s4[SCANBUFSZ], s5[SCANBUFSZ], s6[SCANBUFSZ];
#ifdef NO_SCAN_BRACK /* Version_ Pts DgnLevs_ Hp___ Died__Born id */
static const char fmt[] = "%d %d %d %ld %d %d %d %d %d %d %ld %ld %d%*c";
static const char fmt32[] = "%c%c %s %s%*c";
static const char fmt33[] = "%s %s %s %s %s %s%*c";
#else
static const char fmt[] = "%d.%d.%d %ld %d %d %d %d %d %d %ld %ld %d ";
static const char fmt32[] = "%c%c %[^,],%[^\n]%*c";
static const char fmt33[] = "%s %s %s %s %[^,],%[^\n]%*c";
#endif
#ifdef UPDATE_RECORD_IN_PLACE
/* note: input below must read the record's terminating newline */
final_fpos = tt->fpos = ftell(rfile);
#endif
#define TTFIELDS 13
if (fscanf(rfile, fmt, &tt->ver_major, &tt->ver_minor, &tt->patchlevel,
&tt->points, &tt->deathdnum, &tt->deathlev, &tt->maxlvl,
&tt->hp, &tt->maxhp, &tt->deaths, &tt->deathdate,
&tt->birthdate, &tt->uid) != TTFIELDS) {
#undef TTFIELDS
tt->points = 0;
discardexcess(rfile);
} else {
/* load remainder of record into a local buffer;
this imposes an implicit length limit of SCANBUFSZ
on every string field extracted from the buffer */
if (!fgets(inbuf, sizeof inbuf, rfile)) {
/* sscanf will fail and tt->points will be set to 0 */
*inbuf = '\0';
} else if (!index(inbuf, '\n')) {
Strcpy(&inbuf[sizeof inbuf - 2], "\n");
discardexcess(rfile);
}
/* Check for backwards compatibility */
if (tt->ver_major < 3 || (tt->ver_major == 3 && tt->ver_minor < 3)) {
int i;
if (sscanf(inbuf, fmt32, tt->plrole, tt->plgend, s1, s2) == 4) {
tt->plrole[1] = tt->plgend[1] = '\0'; /* read via %c */
copynchars(tt->name, s1, (int) (sizeof tt->name) - 1);
copynchars(tt->death, s2, (int) (sizeof tt->death) - 1);
} else
tt->points = 0;
tt->plrole[1] = '\0';
if ((i = str2role(tt->plrole)) >= 0)
Strcpy(tt->plrole, roles[i].filecode);
Strcpy(tt->plrace, "?");
Strcpy(tt->plgend, (tt->plgend[0] == 'M') ? "Mal" : "Fem");
Strcpy(tt->plalign, "?");
} else if (sscanf(inbuf, fmt33, s1, s2, s3, s4, s5, s6) == 6) {
copynchars(tt->plrole, s1, (int) (sizeof tt->plrole) - 1);
copynchars(tt->plrace, s2, (int) (sizeof tt->plrace) - 1);
copynchars(tt->plgend, s3, (int) (sizeof tt->plgend) - 1);
copynchars(tt->plalign, s4, (int) (sizeof tt->plalign) - 1);
copynchars(tt->name, s5, (int) (sizeof tt->name) - 1);
copynchars(tt->death, s6, (int) (sizeof tt->death) - 1);
} else
tt->points = 0;
#ifdef NO_SCAN_BRACK
if (tt->points > 0) {
nsb_unmung_line(tt->name);
nsb_unmung_line(tt->death);
}
#endif
}
/* check old score entries for Y2K problem and fix whenever found */
if (tt->points > 0) {
if (tt->birthdate < 19000000L)
tt->birthdate += 19000000L;
if (tt->deathdate < 19000000L)
tt->deathdate += 19000000L;
}
}
STATIC_OVL void
writeentry(rfile, tt)
FILE *rfile;
struct toptenentry *tt;
{
static const char fmt32[] = "%c%c "; /* role,gender */
static const char fmt33[] = "%s %s %s %s "; /* role,race,gndr,algn */
#ifndef NO_SCAN_BRACK
static const char fmt0[] = "%d.%d.%d %ld %d %d %d %d %d %d %ld %ld %d ";
static const char fmtX[] = "%s,%s\n";
#else /* NO_SCAN_BRACK */
static const char fmt0[] = "%d %d %d %ld %d %d %d %d %d %d %ld %ld %d ";
static const char fmtX[] = "%s %s\n";
nsb_mung_line(tt->name);
nsb_mung_line(tt->death);
#endif
(void) fprintf(rfile, fmt0, tt->ver_major, tt->ver_minor, tt->patchlevel,
tt->points, tt->deathdnum, tt->deathlev, tt->maxlvl,
tt->hp, tt->maxhp, tt->deaths, tt->deathdate,
tt->birthdate, tt->uid);
if (tt->ver_major < 3 || (tt->ver_major == 3 && tt->ver_minor < 3))
(void) fprintf(rfile, fmt32, tt->plrole[0], tt->plgend[0]);
else
(void) fprintf(rfile, fmt33, tt->plrole, tt->plrace, tt->plgend,
tt->plalign);
(void) fprintf(rfile, fmtX, onlyspace(tt->name) ? "_" : tt->name,
tt->death);
#ifdef NO_SCAN_BRACK
nsb_unmung_line(tt->name);
nsb_unmung_line(tt->death);
#endif
}
#ifdef XLOGFILE
/* as tab is never used in eg. plname or death, no need to mangle those. */
STATIC_OVL void
writexlentry(rfile, tt, how)
FILE *rfile;
struct toptenentry *tt;
int how;
{
#define Fprintf (void) fprintf
#define XLOG_SEP '\t' /* xlogfile field separator. */
char buf[BUFSZ], tmpbuf[DTHSZ + 1];
Sprintf(buf, "version=%d.%d.%d", tt->ver_major, tt->ver_minor,
tt->patchlevel);
Sprintf(eos(buf), "%cpoints=%ld%cdeathdnum=%d%cdeathlev=%d", XLOG_SEP,
tt->points, XLOG_SEP, tt->deathdnum, XLOG_SEP, tt->deathlev);
Sprintf(eos(buf), "%cmaxlvl=%d%chp=%d%cmaxhp=%d", XLOG_SEP, tt->maxlvl,
XLOG_SEP, tt->hp, XLOG_SEP, tt->maxhp);
Sprintf(eos(buf), "%cdeaths=%d%cdeathdate=%ld%cbirthdate=%ld%cuid=%d",
XLOG_SEP, tt->deaths, XLOG_SEP, tt->deathdate, XLOG_SEP,
tt->birthdate, XLOG_SEP, tt->uid);
Fprintf(rfile, "%s", buf);
Sprintf(buf, "%crole=%s%crace=%s%cgender=%s%calign=%s", XLOG_SEP,
tt->plrole, XLOG_SEP, tt->plrace, XLOG_SEP, tt->plgend, XLOG_SEP,
tt->plalign);
/* make a copy of death reason that doesn't include ", while helpless" */
formatkiller(tmpbuf, sizeof tmpbuf, how, FALSE);
Fprintf(rfile, "%s%cname=%s%cdeath=%s",
buf, /* (already includes separator) */
XLOG_SEP, plname, XLOG_SEP, tmpbuf);
if (multi)
Fprintf(rfile, "%cwhile=%s", XLOG_SEP,
multi_reason ? multi_reason : "helpless");
Fprintf(rfile, "%cconduct=0x%lx%cturns=%ld%cachieve=0x%lx", XLOG_SEP,
encodeconduct(), XLOG_SEP, moves, XLOG_SEP, encodeachieve());
Fprintf(rfile, "%crealtime=%ld%cstarttime=%ld%cendtime=%ld", XLOG_SEP,
(long) urealtime.realtime, XLOG_SEP,
(long) ubirthday, XLOG_SEP, (long) urealtime.finish_time);
Fprintf(rfile, "%cgender0=%s%calign0=%s", XLOG_SEP,
genders[flags.initgend].filecode, XLOG_SEP,
aligns[1 - u.ualignbase[A_ORIGINAL]].filecode);
Fprintf(rfile, "%cflags=0x%lx", XLOG_SEP, encodexlogflags());
Fprintf(rfile, "\n");
#undef XLOG_SEP
}
STATIC_OVL long
encodexlogflags()
{
long e = 0L;
if (wizard)
e |= 1L << 0;
if (discover)
e |= 1L << 1;
if (!u.uroleplay.numbones)
e |= 1L << 2;
return e;
}
STATIC_OVL long
encodeconduct()
{
long e = 0L;
if (!u.uconduct.food)
e |= 1L << 0;
if (!u.uconduct.unvegan)
e |= 1L << 1;
if (!u.uconduct.unvegetarian)
e |= 1L << 2;
if (!u.uconduct.gnostic)
e |= 1L << 3;
if (!u.uconduct.weaphit)
e |= 1L << 4;
if (!u.uconduct.killer)
e |= 1L << 5;
if (!u.uconduct.literate)
e |= 1L << 6;
if (!u.uconduct.polypiles)
e |= 1L << 7;
if (!u.uconduct.polyselfs)
e |= 1L << 8;
if (!u.uconduct.wishes)
e |= 1L << 9;
if (!u.uconduct.wisharti)
e |= 1L << 10;
if (!num_genocides())
e |= 1L << 11;
return e;
}
STATIC_OVL long
encodeachieve()
{
long r = 0L;
if (u.uachieve.bell)
r |= 1L << 0;
if (u.uachieve.enter_gehennom)
r |= 1L << 1;
if (u.uachieve.menorah)
r |= 1L << 2;
if (u.uachieve.book)
r |= 1L << 3;
if (u.uevent.invoked)
r |= 1L << 4;
if (u.uachieve.amulet)
r |= 1L << 5;
if (In_endgame(&u.uz))
r |= 1L << 6;
if (Is_astralevel(&u.uz))
r |= 1L << 7;
if (u.uachieve.ascended)
r |= 1L << 8;
if (u.uachieve.mines_luckstone)
r |= 1L << 9;
if (u.uachieve.finish_sokoban)
r |= 1L << 10;
if (u.uachieve.killed_medusa)
r |= 1L << 11;
if (u.uroleplay.blind)
r |= 1L << 12;
if (u.uroleplay.nudist)
r |= 1L << 13;
return r;
}
#endif /* XLOGFILE */
STATIC_OVL void
free_ttlist(tt)
struct toptenentry *tt;
{
struct toptenentry *ttnext;
while (tt->points > 0) {
ttnext = tt->tt_next;
dealloc_ttentry(tt);
tt = ttnext;
}
dealloc_ttentry(tt);
}
void
topten(how, when)
int how;
time_t when;
{
int uid = getuid();
int rank, rank0 = -1, rank1 = 0;
int occ_cnt = sysopt.persmax;
register struct toptenentry *t0, *tprev;
struct toptenentry *t1;
FILE *rfile;
register int flg = 0;
boolean t0_used;
#ifdef LOGFILE
FILE *lfile;
#endif /* LOGFILE */
#ifdef XLOGFILE
FILE *xlfile;
#endif /* XLOGFILE */
#ifdef _DCC
/* Under DICE 3.0, this crashes the system consistently, apparently due to
* corruption of *rfile somewhere. Until I figure this out, just cut out
* topten support entirely - at least then the game exits cleanly. --AC
*/
return;
#endif
/* If we are in the midst of a panic, cut out topten entirely.
* topten uses alloc() several times, which will lead to
* problems if the panic was the result of an alloc() failure.
*/
if (program_state.panicking)
return;
if (iflags.toptenwin) {
toptenwin = create_nhwindow(NHW_TEXT);
}
#if defined(UNIX) || defined(VMS) || defined(__EMX__)
#define HUP if (!program_state.done_hup)
#else
#define HUP
#endif
#ifdef TOS
restore_colors(); /* make sure the screen is black on white */
#endif
/* create a new 'topten' entry */
t0_used = FALSE;
t0 = newttentry();
t0->ver_major = VERSION_MAJOR;
t0->ver_minor = VERSION_MINOR;
t0->patchlevel = PATCHLEVEL;
t0->points = u.urexp;
t0->deathdnum = u.uz.dnum;
/* deepest_lev_reached() is in terms of depth(), and reporting the
* deepest level reached in the dungeon death occurred in doesn't
* seem right, so we have to report the death level in depth() terms
* as well (which also seems reasonable since that's all the player
* sees on the screen anyway)
*/
t0->deathlev = observable_depth(&u.uz);
t0->maxlvl = deepest_lev_reached(TRUE);
t0->hp = u.uhp;
t0->maxhp = u.uhpmax;
t0->deaths = u.umortality;
t0->uid = uid;
copynchars(t0->plrole, urole.filecode, ROLESZ);
copynchars(t0->plrace, urace.filecode, ROLESZ);
copynchars(t0->plgend, genders[flags.female].filecode, ROLESZ);
copynchars(t0->plalign, aligns[1 - u.ualign.type].filecode, ROLESZ);
copynchars(t0->name, plname, NAMSZ);
formatkiller(t0->death, sizeof t0->death, how, TRUE);
t0->birthdate = yyyymmdd(ubirthday);
t0->deathdate = yyyymmdd(when);
t0->tt_next = 0;
#ifdef UPDATE_RECORD_IN_PLACE
t0->fpos = -1L;
#endif
#ifdef LOGFILE /* used for debugging (who dies of what, where) */
if (lock_file(LOGFILE, SCOREPREFIX, 10)) {
if (!(lfile = fopen_datafile(LOGFILE, "a", SCOREPREFIX))) {
HUP raw_print("Cannot open log file!");
} else {
writeentry(lfile, t0);
(void) fclose(lfile);
}
unlock_file(LOGFILE);
}
#endif /* LOGFILE */
#ifdef XLOGFILE
if (lock_file(XLOGFILE, SCOREPREFIX, 10)) {
if (!(xlfile = fopen_datafile(XLOGFILE, "a", SCOREPREFIX))) {
HUP raw_print("Cannot open extended log file!");
} else {
writexlentry(xlfile, t0, how);
(void) fclose(xlfile);
}
unlock_file(XLOGFILE);
}
#endif /* XLOGFILE */
if (wizard || discover) {
if (how != PANICKED)
HUP {
char pbuf[BUFSZ];
topten_print("");
Sprintf(pbuf,
"Since you were in %s mode, the score list will not be checked.",
wizard ? "wizard" : "discover");
topten_print(pbuf);
}
goto showwin;
}
if (!lock_file(RECORD, SCOREPREFIX, 60))
goto destroywin;
#ifdef UPDATE_RECORD_IN_PLACE
rfile = fopen_datafile(RECORD, "r+", SCOREPREFIX);
#else
rfile = fopen_datafile(RECORD, "r", SCOREPREFIX);
#endif
if (!rfile) {
HUP raw_print("Cannot open record file!");
unlock_file(RECORD);
goto destroywin;
}
HUP topten_print("");
/* assure minimum number of points */
if (t0->points < sysopt.pointsmin)
t0->points = 0;
t1 = tt_head = newttentry();
tprev = 0;
/* rank0: -1 undefined, 0 not_on_list, n n_th on list */
for (rank = 1;;) {
readentry(rfile, t1);
if (t1->points < sysopt.pointsmin)
t1->points = 0;
if (rank0 < 0 && t1->points < t0->points) {
rank0 = rank++;
if (tprev == 0)
tt_head = t0;
else
tprev->tt_next = t0;
t0->tt_next = t1;
#ifdef UPDATE_RECORD_IN_PLACE
t0->fpos = t1->fpos; /* insert here */
#endif
t0_used = TRUE;
occ_cnt--;
flg++; /* ask for a rewrite */
} else
tprev = t1;
if (t1->points == 0)
break;
if ((sysopt.pers_is_uid ? t1->uid == t0->uid
: strncmp(t1->name, t0->name, NAMSZ) == 0)
&& !strncmp(t1->plrole, t0->plrole, ROLESZ) && --occ_cnt <= 0) {
if (rank0 < 0) {
rank0 = 0;
rank1 = rank;
HUP {
char pbuf[BUFSZ];
Sprintf(pbuf,
"You didn't beat your previous score of %ld points.",
t1->points);
topten_print(pbuf);
topten_print("");
}
}
if (occ_cnt < 0) {
flg++;
continue;
}
}
if (rank <= sysopt.entrymax) {
t1->tt_next = newttentry();
t1 = t1->tt_next;
rank++;
}
if (rank > sysopt.entrymax) {
t1->points = 0;
break;
}
}
if (flg) { /* rewrite record file */
#ifdef UPDATE_RECORD_IN_PLACE
(void) fseek(rfile, (t0->fpos >= 0 ? t0->fpos : final_fpos),
SEEK_SET);
#else
(void) fclose(rfile);
if (!(rfile = fopen_datafile(RECORD, "w", SCOREPREFIX))) {
HUP raw_print("Cannot write record file");
unlock_file(RECORD);
free_ttlist(tt_head);
goto destroywin;
}
#endif /* UPDATE_RECORD_IN_PLACE */
if (!done_stopprint)
if (rank0 > 0) {
if (rank0 <= 10) {
topten_print("You made the top ten list!");
} else {
char pbuf[BUFSZ];
Sprintf(pbuf,
"You reached the %d%s place on the top %d list.",
rank0, ordin(rank0), sysopt.entrymax);
topten_print(pbuf);
}
topten_print("");
}
}
if (rank0 == 0)
rank0 = rank1;
if (rank0 <= 0)
rank0 = rank;
if (!done_stopprint)
outheader();
t1 = tt_head;
for (rank = 1; t1->points != 0; rank++, t1 = t1->tt_next) {
if (flg
#ifdef UPDATE_RECORD_IN_PLACE
&& rank >= rank0
#endif
)
writeentry(rfile, t1);
if (done_stopprint)
continue;
if (rank > flags.end_top && (rank < rank0 - flags.end_around
|| rank > rank0 + flags.end_around)
&& (!flags.end_own
|| (sysopt.pers_is_uid
? t1->uid == t0->uid
: strncmp(t1->name, t0->name, NAMSZ) == 0)))
continue;
if (rank == rank0 - flags.end_around
&& rank0 > flags.end_top + flags.end_around + 1 && !flags.end_own)
topten_print("");
if (rank != rank0)
outentry(rank, t1, FALSE);
else if (!rank1)
outentry(rank, t1, TRUE);
else {
outentry(rank, t1, TRUE);
outentry(0, t0, TRUE);
}
}
if (rank0 >= rank)
if (!done_stopprint)
outentry(0, t0, TRUE);
#ifdef UPDATE_RECORD_IN_PLACE
if (flg) {
#ifdef TRUNCATE_FILE
/* if a reasonable way to truncate a file exists, use it */
truncate_file(rfile);
#else
/* use sentinel record rather than relying on truncation */
t1->points = 0L; /* terminates file when read back in */
t1->ver_major = t1->ver_minor = t1->patchlevel = 0;
t1->uid = t1->deathdnum = t1->deathlev = 0;
t1->maxlvl = t1->hp = t1->maxhp = t1->deaths = 0;
t1->plrole[0] = t1->plrace[0] = t1->plgend[0] = t1->plalign[0] = '-';
t1->plrole[1] = t1->plrace[1] = t1->plgend[1] = t1->plalign[1] = 0;
t1->birthdate = t1->deathdate = yyyymmdd((time_t) 0L);
Strcpy(t1->name, "@");
Strcpy(t1->death, "<eod>\n");
writeentry(rfile, t1);
(void) fflush(rfile);
#endif /* TRUNCATE_FILE */
}
#endif /* UPDATE_RECORD_IN_PLACE */
(void) fclose(rfile);
unlock_file(RECORD);
free_ttlist(tt_head);
showwin:
if (iflags.toptenwin && !done_stopprint)
display_nhwindow(toptenwin, 1);
destroywin:
if (!t0_used)
dealloc_ttentry(t0);
if (iflags.toptenwin) {
destroy_nhwindow(toptenwin);
toptenwin = WIN_ERR;
}
}
STATIC_OVL void
outheader()
{
char linebuf[BUFSZ];
register char *bp;
Strcpy(linebuf, " No Points Name");
bp = eos(linebuf);
while (bp < linebuf + COLNO - 9)
*bp++ = ' ';
Strcpy(bp, "Hp [max]");
topten_print(linebuf);
}
/* so>0: standout line; so=0: ordinary line */
STATIC_OVL void
outentry(rank, t1, so)
struct toptenentry *t1;
int rank;
boolean so;
{
boolean second_line = TRUE;
char linebuf[BUFSZ];
char *bp, hpbuf[24], linebuf3[BUFSZ];
int hppos, lngr;
linebuf[0] = '\0';
if (rank)
Sprintf(eos(linebuf), "%3d", rank);
else
Strcat(linebuf, " ");
Sprintf(eos(linebuf), " %10ld %.10s", t1->points ? t1->points : u.urexp,
t1->name);
Sprintf(eos(linebuf), "-%s", t1->plrole);
if (t1->plrace[0] != '?')
Sprintf(eos(linebuf), "-%s", t1->plrace);
/* Printing of gender and alignment is intentional. It has been
* part of the NetHack Geek Code, and illustrates a proper way to
* specify a character from the command line.
*/
Sprintf(eos(linebuf), "-%s", t1->plgend);
if (t1->plalign[0] != '?')
Sprintf(eos(linebuf), "-%s ", t1->plalign);
else
Strcat(linebuf, " ");
if (!strncmp("escaped", t1->death, 7)) {
Sprintf(eos(linebuf), "escaped the dungeon %s[max level %d]",
!strncmp(" (", t1->death + 7, 2) ? t1->death + 7 + 2 : "",
t1->maxlvl);
/* fixup for closing paren in "escaped... with...Amulet)[max..." */
if ((bp = index(linebuf, ')')) != 0)
*bp = (t1->deathdnum == astral_level.dnum) ? '\0' : ' ';
second_line = FALSE;
} else if (!strncmp("ascended", t1->death, 8)) {
Sprintf(eos(linebuf), "ascended to demigod%s-hood",
(t1->plgend[0] == 'F') ? "dess" : "");
second_line = FALSE;
} else {
if (!strncmp(t1->death, "quit", 4)) {
Strcat(linebuf, "quit");
second_line = FALSE;
} else if (!strncmp(t1->death, "died of st", 10)) {
Strcat(linebuf, "starved to death");
second_line = FALSE;
} else if (!strncmp(t1->death, "choked", 6)) {
Sprintf(eos(linebuf), "choked on h%s food",
(t1->plgend[0] == 'F') ? "er" : "is");
} else if (!strncmp(t1->death, "poisoned", 8)) {
Strcat(linebuf, "was poisoned");
} else if (!strncmp(t1->death, "crushed", 7)) {
Strcat(linebuf, "was crushed to death");
} else if (!strncmp(t1->death, "petrified by ", 13)) {
Strcat(linebuf, "turned to stone");
} else
Strcat(linebuf, "died");
if (t1->deathdnum == astral_level.dnum) {
const char *arg, *fmt = " on the Plane of %s";
switch (t1->deathlev) {
case -5:
fmt = " on the %s Plane";
arg = "Astral";
break;
case -4:
arg = "Water";
break;
case -3:
arg = "Fire";
break;
case -2:
arg = "Air";
break;
case -1:
arg = "Earth";
break;
default:
arg = "Void";
break;
}
Sprintf(eos(linebuf), fmt, arg);
} else {
Sprintf(eos(linebuf), " in %s", dungeons[t1->deathdnum].dname);
if (t1->deathdnum != knox_level.dnum)
Sprintf(eos(linebuf), " on level %d", t1->deathlev);
if (t1->deathlev != t1->maxlvl)
Sprintf(eos(linebuf), " [max %d]", t1->maxlvl);
}
/* kludge for "quit while already on Charon's boat" */
if (!strncmp(t1->death, "quit ", 5))
Strcat(linebuf, t1->death + 4);
}
Strcat(linebuf, ".");
/* Quit, starved, ascended, and escaped contain no second line */
if (second_line)
Sprintf(eos(linebuf), " %c%s.", highc(*(t1->death)), t1->death + 1);
lngr = (int) strlen(linebuf);
if (t1->hp <= 0)
hpbuf[0] = '-', hpbuf[1] = '\0';
else
Sprintf(hpbuf, "%d", t1->hp);
/* beginning of hp column after padding (not actually padded yet) */
hppos = COLNO - (sizeof(" Hp [max]") - 1); /* sizeof(str) includes \0 */
while (lngr >= hppos) {
for (bp = eos(linebuf); !(*bp == ' ' && (bp - linebuf < hppos)); bp--)
;
/* special case: word is too long, wrap in the middle */
if (linebuf + 15 >= bp)
bp = linebuf + hppos - 1;
/* special case: if about to wrap in the middle of maximum
dungeon depth reached, wrap in front of it instead */
if (bp > linebuf + 5 && !strncmp(bp - 5, " [max", 5))
bp -= 5;
if (*bp != ' ')
Strcpy(linebuf3, bp);
else
Strcpy(linebuf3, bp + 1);
*bp = 0;
if (so) {
while (bp < linebuf + (COLNO - 1))
*bp++ = ' ';
*bp = 0;
topten_print_bold(linebuf);
} else
topten_print(linebuf);
Sprintf(linebuf, "%15s %s", "", linebuf3);
lngr = strlen(linebuf);
}
/* beginning of hp column not including padding */
hppos = COLNO - 7 - (int) strlen(hpbuf);
bp = eos(linebuf);
if (bp <= linebuf + hppos) {
/* pad any necessary blanks to the hit point entry */
while (bp < linebuf + hppos)
*bp++ = ' ';
Strcpy(bp, hpbuf);
Sprintf(eos(bp), " %s[%d]",
(t1->maxhp < 10) ? " " : (t1->maxhp < 100) ? " " : "",
t1->maxhp);
}
if (so) {
bp = eos(linebuf);
if (so >= COLNO)
so = COLNO - 1;
while (bp < linebuf + so)
*bp++ = ' ';
*bp = 0;
topten_print_bold(linebuf);
} else
topten_print(linebuf);
}
STATIC_OVL int
score_wanted(current_ver, rank, t1, playerct, players, uid)
boolean current_ver;
int rank;
struct toptenentry *t1;
int playerct;
const char **players;
int uid;
{
int i;
if (current_ver
&& (t1->ver_major != VERSION_MAJOR || t1->ver_minor != VERSION_MINOR
|| t1->patchlevel != PATCHLEVEL))
return 0;
if (sysopt.pers_is_uid && !playerct && t1->uid == uid)
return 1;
for (i = 0; i < playerct; i++) {
if (players[i][0] == '-' && index("pr", players[i][1])
&& players[i][2] == 0 && i + 1 < playerct) {
const char *arg = players[i + 1];
if ((players[i][1] == 'p'
&& str2role(arg) == str2role(t1->plrole))
|| (players[i][1] == 'r'
&& str2race(arg) == str2race(t1->plrace)))
return 1;
i++;
} else if (strcmp(players[i], "all") == 0
|| strncmp(t1->name, players[i], NAMSZ) == 0
|| (players[i][0] == '-' && players[i][1] == t1->plrole[0]
&& players[i][2] == 0)
|| (digit(players[i][0]) && rank <= atoi(players[i])))
return 1;
}
return 0;
}
/*
* print selected parts of score list.
* argc >= 2, with argv[0] untrustworthy (directory names, et al.),
* and argv[1] starting with "-s".
* caveat: some shells might allow argv elements to be arbitrarily long.
*/
void
prscore(argc, argv)
int argc;
char **argv;
{
const char **players;
int playerct, rank;
boolean current_ver = TRUE, init_done = FALSE;
register struct toptenentry *t1;
FILE *rfile;
boolean match_found = FALSE;
register int i;
char pbuf[BUFSZ];
int uid = -1;
const char *player0;
if (argc < 2 || strncmp(argv[1], "-s", 2)) {
raw_printf("prscore: bad arguments (%d)", argc);
return;
}
rfile = fopen_datafile(RECORD, "r", SCOREPREFIX);
if (!rfile) {
raw_print("Cannot open record file!");
return;
}
#ifdef AMIGA
{
extern winid amii_rawprwin;
init_nhwindows(&argc, argv);
amii_rawprwin = create_nhwindow(NHW_TEXT);
}
#endif
/* If the score list isn't after a game, we never went through
* initialization. */
if (wiz1_level.dlevel == 0) {
dlb_init();
init_dungeons();
init_done = TRUE;
}
if (!argv[1][2]) { /* plain "-s" */
argc--;
argv++;
} else
argv[1] += 2;
if (argc > 1 && !strcmp(argv[1], "-v")) {
current_ver = FALSE;
argc--;
argv++;
}
if (argc <= 1) {
if (sysopt.pers_is_uid) {
uid = getuid();
playerct = 0;
players = (const char **) 0;
} else {
player0 = plname;
if (!*player0)
#ifdef AMIGA
player0 = "all"; /* single user system */
#else
player0 = "hackplayer";
#endif
playerct = 1;
players = &player0;
}
} else {
playerct = --argc;
players = (const char **) ++argv;
}
raw_print("");
t1 = tt_head = newttentry();
for (rank = 1;; rank++) {
readentry(rfile, t1);
if (t1->points == 0)
break;
if (!match_found
&& score_wanted(current_ver, rank, t1, playerct, players, uid))
match_found = TRUE;
t1->tt_next = newttentry();
t1 = t1->tt_next;
}
(void) fclose(rfile);
if (init_done) {
free_dungeons();
dlb_cleanup();
}
if (match_found) {
outheader();
t1 = tt_head;
for (rank = 1; t1->points != 0; rank++, t1 = t1->tt_next) {
if (score_wanted(current_ver, rank, t1, playerct, players, uid))
(void) outentry(rank, t1, FALSE);
}
} else {
Sprintf(pbuf, "Cannot find any %sentries for ",
current_ver ? "current " : "");
if (playerct < 1)
Strcat(pbuf, "you.");
else {
if (playerct > 1)
Strcat(pbuf, "any of ");
for (i = 0; i < playerct; i++) {
/* stop printing players if there are too many to fit */
if (strlen(pbuf) + strlen(players[i]) + 2 >= BUFSZ) {
if (strlen(pbuf) < BUFSZ - 4)
Strcat(pbuf, "...");
else
Strcpy(pbuf + strlen(pbuf) - 4, "...");
break;
}
Strcat(pbuf, players[i]);
if (i < playerct - 1) {
if (players[i][0] == '-' && index("pr", players[i][1])
&& players[i][2] == 0)
Strcat(pbuf, " ");
else
Strcat(pbuf, ":");
}
}
}
raw_print(pbuf);
raw_printf("Usage: %s -s [-v] <playertypes> [maxrank] [playernames]",
hname);
raw_printf("Player types are: [-p role] [-r race]");
}
free_ttlist(tt_head);
#ifdef AMIGA
{
extern winid amii_rawprwin;
display_nhwindow(amii_rawprwin, 1);
destroy_nhwindow(amii_rawprwin);
amii_rawprwin = WIN_ERR;
}
#endif
}
STATIC_OVL int
classmon(plch, fem)
char *plch;
boolean fem;
{
int i;
/* Look for this role in the role table */
for (i = 0; roles[i].name.m; i++)
if (!strncmp(plch, roles[i].filecode, ROLESZ)) {
if (fem && roles[i].femalenum != NON_PM)
return roles[i].femalenum;
else if (roles[i].malenum != NON_PM)
return roles[i].malenum;
else
return PM_HUMAN;
}
/* this might be from a 3.2.x score for former Elf class */
if (!strcmp(plch, "E"))
return PM_RANGER;
impossible("What weird role is this? (%s)", plch);
return PM_HUMAN_MUMMY;
}
/*
* Get a random player name and class from the high score list,
*/
struct toptenentry *
get_rnd_toptenentry()
{
int rank, i;
FILE *rfile;
register struct toptenentry *tt;
static struct toptenentry tt_buf;
rfile = fopen_datafile(RECORD, "r", SCOREPREFIX);
if (!rfile) {
impossible("Cannot open record file!");
return NULL;
}
tt = &tt_buf;
rank = rnd(sysopt.tt_oname_maxrank);
pickentry:
for (i = rank; i; i--) {
readentry(rfile, tt);
if (tt->points == 0)
break;
}
if (tt->points == 0) {
if (rank > 1) {
rank = 1;
rewind(rfile);
goto pickentry;
}
tt = NULL;
}
(void) fclose(rfile);
return tt;
}
/*
* Attach random player name and class from high score list
* to an object (for statues or morgue corpses).
*/
struct obj *
tt_oname(otmp)
struct obj *otmp;
{
struct toptenentry *tt;
if (!otmp)
return (struct obj *) 0;
tt = get_rnd_toptenentry();
if (!tt)
return (struct obj *) 0;
set_corpsenm(otmp, classmon(tt->plrole, (tt->plgend[0] == 'F')));
otmp = oname(otmp, tt->name);
return otmp;
}
#ifdef NO_SCAN_BRACK
/* Lattice scanf isn't up to reading the scorefile. What */
/* follows deals with that; I admit it's ugly. (KL) */
/* Now generally available (KL) */
STATIC_OVL void
nsb_mung_line(p)
char *p;
{
while ((p = index(p, ' ')) != 0)
*p = '|';
}
STATIC_OVL void
nsb_unmung_line(p)
char *p;
{
while ((p = index(p, '|')) != 0)
*p = ' ';
}
#endif /* NO_SCAN_BRACK */
/*topten.c*/
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_4523_1 |
crossvul-cpp_data_good_4523_2 | /* NetHack 3.6 windows.c $NHDT-Date: 1575245096 2019/12/02 00:04:56 $ $NHDT-Branch: NetHack-3.6 $:$NHDT-Revision: 1.60 $ */
/* Copyright (c) D. Cohrs, 1993. */
/* NetHack may be freely redistributed. See license for details. */
#include "hack.h"
#ifdef TTY_GRAPHICS
#include "wintty.h"
#endif
#ifdef CURSES_GRAPHICS
extern struct window_procs curses_procs;
#endif
#ifdef X11_GRAPHICS
/* Cannot just blindly include winX.h without including all of X11 stuff
and must get the order of include files right. Don't bother. */
extern struct window_procs X11_procs;
extern void FDECL(win_X11_init, (int));
#endif
#ifdef QT_GRAPHICS
extern struct window_procs Qt_procs;
#endif
#ifdef GEM_GRAPHICS
#include "wingem.h"
#endif
#ifdef MAC
extern struct window_procs mac_procs;
#endif
#ifdef BEOS_GRAPHICS
extern struct window_procs beos_procs;
extern void FDECL(be_win_init, (int));
FAIL /* be_win_init doesn't exist? XXX*/
#endif
#ifdef AMIGA_INTUITION
extern struct window_procs amii_procs;
extern struct window_procs amiv_procs;
extern void FDECL(ami_wininit_data, (int));
#endif
#ifdef WIN32_GRAPHICS
extern struct window_procs win32_procs;
#endif
#ifdef GNOME_GRAPHICS
#include "winGnome.h"
extern struct window_procs Gnome_procs;
#endif
#ifdef MSWIN_GRAPHICS
extern struct window_procs mswin_procs;
#endif
#ifdef WINCHAIN
extern struct window_procs chainin_procs;
extern void FDECL(chainin_procs_init, (int));
extern void *FDECL(chainin_procs_chain, (int, int, void *, void *, void *));
extern struct chain_procs chainout_procs;
extern void FDECL(chainout_procs_init, (int));
extern void *FDECL(chainout_procs_chain, (int, int, void *, void *, void *));
extern struct chain_procs trace_procs;
extern void FDECL(trace_procs_init, (int));
extern void *FDECL(trace_procs_chain, (int, int, void *, void *, void *));
#endif
STATIC_DCL void FDECL(def_raw_print, (const char *s));
STATIC_DCL void NDECL(def_wait_synch);
#ifdef DUMPLOG
STATIC_DCL winid FDECL(dump_create_nhwindow, (int));
STATIC_DCL void FDECL(dump_clear_nhwindow, (winid));
STATIC_DCL void FDECL(dump_display_nhwindow, (winid, BOOLEAN_P));
STATIC_DCL void FDECL(dump_destroy_nhwindow, (winid));
STATIC_DCL void FDECL(dump_start_menu, (winid));
STATIC_DCL void FDECL(dump_add_menu, (winid, int, const ANY_P *, CHAR_P,
CHAR_P, int, const char *, BOOLEAN_P));
STATIC_DCL void FDECL(dump_end_menu, (winid, const char *));
STATIC_DCL int FDECL(dump_select_menu, (winid, int, MENU_ITEM_P **));
STATIC_DCL void FDECL(dump_putstr, (winid, int, const char *));
#endif /* DUMPLOG */
#ifdef HANGUPHANDLING
volatile
#endif
NEARDATA struct window_procs windowprocs;
#ifdef WINCHAIN
#define CHAINR(x) , x
#else
#define CHAINR(x)
#endif
static struct win_choices {
struct window_procs *procs;
void FDECL((*ini_routine), (int)); /* optional (can be 0) */
#ifdef WINCHAIN
void *FDECL((*chain_routine), (int, int, void *, void *, void *));
#endif
} winchoices[] = {
#ifdef TTY_GRAPHICS
{ &tty_procs, win_tty_init CHAINR(0) },
#endif
#ifdef CURSES_GRAPHICS
{ &curses_procs, 0 },
#endif
#ifdef X11_GRAPHICS
{ &X11_procs, win_X11_init CHAINR(0) },
#endif
#ifdef QT_GRAPHICS
{ &Qt_procs, 0 CHAINR(0) },
#endif
#ifdef GEM_GRAPHICS
{ &Gem_procs, win_Gem_init CHAINR(0) },
#endif
#ifdef MAC
{ &mac_procs, 0 CHAINR(0) },
#endif
#ifdef BEOS_GRAPHICS
{ &beos_procs, be_win_init CHAINR(0) },
#endif
#ifdef AMIGA_INTUITION
{ &amii_procs,
ami_wininit_data CHAINR(0) }, /* Old font version of the game */
{ &amiv_procs,
ami_wininit_data CHAINR(0) }, /* Tile version of the game */
#endif
#ifdef WIN32_GRAPHICS
{ &win32_procs, 0 CHAINR(0) },
#endif
#ifdef GNOME_GRAPHICS
{ &Gnome_procs, 0 CHAINR(0) },
#endif
#ifdef MSWIN_GRAPHICS
{ &mswin_procs, 0 CHAINR(0) },
#endif
#ifdef WINCHAIN
{ &chainin_procs, chainin_procs_init, chainin_procs_chain },
{ (struct window_procs *) &chainout_procs, chainout_procs_init,
chainout_procs_chain },
{ (struct window_procs *) &trace_procs, trace_procs_init,
trace_procs_chain },
#endif
{ 0, 0 CHAINR(0) } /* must be last */
};
#ifdef WINCHAIN
struct winlink {
struct winlink *nextlink;
struct win_choices *wincp;
void *linkdata;
};
/* NB: this chain does not contain the terminal real window system pointer */
static struct winlink *chain = 0;
static struct winlink *
wl_new()
{
struct winlink *wl = (struct winlink *) alloc(sizeof *wl);
wl->nextlink = 0;
wl->wincp = 0;
wl->linkdata = 0;
return wl;
}
static void
wl_addhead(struct winlink *wl)
{
wl->nextlink = chain;
chain = wl;
}
static void
wl_addtail(struct winlink *wl)
{
struct winlink *p = chain;
if (!chain) {
chain = wl;
return;
}
while (p->nextlink) {
p = p->nextlink;
}
p->nextlink = wl;
return;
}
#endif /* WINCHAIN */
static struct win_choices *last_winchoice = 0;
boolean
genl_can_suspend_no(VOID_ARGS)
{
return FALSE;
}
boolean
genl_can_suspend_yes(VOID_ARGS)
{
return TRUE;
}
STATIC_OVL
void
def_raw_print(s)
const char *s;
{
puts(s);
}
STATIC_OVL
void
def_wait_synch(VOID_ARGS)
{
/* Config file error handling routines
* call wait_sync() without checking to
* see if it actually has a value,
* leading to spectacular violations
* when you try to execute address zero.
* The existence of this allows early
* processing to have something to execute
* even though it essentially does nothing
*/
return;
}
#ifdef WINCHAIN
static struct win_choices *
win_choices_find(s)
const char *s;
{
register int i;
for (i = 0; winchoices[i].procs; i++) {
if (!strcmpi(s, winchoices[i].procs->name)) {
return &winchoices[i];
}
}
return (struct win_choices *) 0;
}
#endif
void
choose_windows(s)
const char *s;
{
int i;
char *tmps = 0;
for (i = 0; winchoices[i].procs; i++) {
if ('+' == winchoices[i].procs->name[0])
continue;
if ('-' == winchoices[i].procs->name[0])
continue;
if (!strcmpi(s, winchoices[i].procs->name)) {
windowprocs = *winchoices[i].procs;
if (last_winchoice && last_winchoice->ini_routine)
(*last_winchoice->ini_routine)(WININIT_UNDO);
if (winchoices[i].ini_routine)
(*winchoices[i].ini_routine)(WININIT);
last_winchoice = &winchoices[i];
return;
}
}
if (!windowprocs.win_raw_print)
windowprocs.win_raw_print = def_raw_print;
if (!windowprocs.win_wait_synch)
/* early config file error processing routines call this */
windowprocs.win_wait_synch = def_wait_synch;
if (!winchoices[0].procs) {
raw_printf("No window types supported?");
nh_terminate(EXIT_FAILURE);
}
/* 50: arbitrary, no real window_type names are anywhere near that long;
used to prevent potential raw_printf() overflow if user supplies a
very long string (on the order of 1200 chars) on the command line
(config file options can't get that big; they're truncated at 1023) */
#define WINDOW_TYPE_MAXLEN 50
if (strlen(s) >= WINDOW_TYPE_MAXLEN) {
tmps = (char *) alloc(WINDOW_TYPE_MAXLEN);
(void) strncpy(tmps, s, WINDOW_TYPE_MAXLEN - 1);
tmps[WINDOW_TYPE_MAXLEN - 1] = '\0';
s = tmps;
}
#undef WINDOW_TYPE_MAXLEN
if (!winchoices[1].procs) {
config_error_add(
"Window type %s not recognized. The only choice is: %s",
s, winchoices[0].procs->name);
} else {
char buf[BUFSZ];
boolean first = TRUE;
buf[0] = '\0';
for (i = 0; winchoices[i].procs; i++) {
if ('+' == winchoices[i].procs->name[0])
continue;
if ('-' == winchoices[i].procs->name[0])
continue;
Sprintf(eos(buf), "%s%s",
first ? "" : ", ", winchoices[i].procs->name);
first = FALSE;
}
config_error_add("Window type %s not recognized. Choices are: %s",
s, buf);
}
if (tmps)
free((genericptr_t) tmps) /*, tmps = 0*/ ;
if (windowprocs.win_raw_print == def_raw_print
|| WINDOWPORT("safe-startup"))
nh_terminate(EXIT_SUCCESS);
}
#ifdef WINCHAIN
void
addto_windowchain(s)
const char *s;
{
register int i;
for (i = 0; winchoices[i].procs; i++) {
if ('+' != winchoices[i].procs->name[0])
continue;
if (!strcmpi(s, winchoices[i].procs->name)) {
struct winlink *p = wl_new();
p->wincp = &winchoices[i];
wl_addtail(p);
/* NB: The ini_routine() will be called during commit. */
return;
}
}
windowprocs.win_raw_print = def_raw_print;
raw_printf("Window processor %s not recognized. Choices are:", s);
for (i = 0; winchoices[i].procs; i++) {
if ('+' != winchoices[i].procs->name[0])
continue;
raw_printf(" %s", winchoices[i].procs->name);
}
nh_terminate(EXIT_FAILURE);
}
void
commit_windowchain()
{
struct winlink *p;
int n;
int wincap, wincap2;
if (!chain)
return;
/* Save wincap* from the real window system - we'll restore it below. */
wincap = windowprocs.wincap;
wincap2 = windowprocs.wincap2;
/* add -chainin at head and -chainout at tail */
p = wl_new();
p->wincp = win_choices_find("-chainin");
if (!p->wincp) {
raw_printf("Can't locate processor '-chainin'");
exit(EXIT_FAILURE);
}
wl_addhead(p);
p = wl_new();
p->wincp = win_choices_find("-chainout");
if (!p->wincp) {
raw_printf("Can't locate processor '-chainout'");
exit(EXIT_FAILURE);
}
wl_addtail(p);
/* Now alloc() init() similar to Objective-C. */
for (n = 1, p = chain; p; n++, p = p->nextlink) {
p->linkdata = (*p->wincp->chain_routine)(WINCHAIN_ALLOC, n, 0, 0, 0);
}
for (n = 1, p = chain; p; n++, p = p->nextlink) {
if (p->nextlink) {
(void) (*p->wincp->chain_routine)(WINCHAIN_INIT, n, p->linkdata,
p->nextlink->wincp->procs,
p->nextlink->linkdata);
} else {
(void) (*p->wincp->chain_routine)(WINCHAIN_INIT, n, p->linkdata,
last_winchoice->procs, 0);
}
}
/* Restore the saved wincap* values. We do it here to give the
* ini_routine()s a chance to change or check them. */
chain->wincp->procs->wincap = wincap;
chain->wincp->procs->wincap2 = wincap2;
/* Call the init procs. Do not re-init the terminal real win. */
p = chain;
while (p->nextlink) {
if (p->wincp->ini_routine) {
(*p->wincp->ini_routine)(WININIT);
}
p = p->nextlink;
}
/* Install the chain into window procs very late so ini_routine()s
* can raw_print on error. */
windowprocs = *chain->wincp->procs;
p = chain;
while (p) {
struct winlink *np = p->nextlink;
free(p);
p = np; /* assignment, not proof */
}
}
#endif /* WINCHAIN */
/*
* tty_message_menu() provides a means to get feedback from the
* --More-- prompt; other interfaces generally don't need that.
*/
/*ARGSUSED*/
char
genl_message_menu(let, how, mesg)
char let UNUSED;
int how UNUSED;
const char *mesg;
{
pline("%s", mesg);
return 0;
}
/*ARGSUSED*/
void
genl_preference_update(pref)
const char *pref UNUSED;
{
/* window ports are expected to provide
their own preference update routine
for the preference capabilities that
they support.
Just return in this genl one. */
return;
}
char *
genl_getmsghistory(init)
boolean init UNUSED;
{
/* window ports can provide
their own getmsghistory() routine to
preserve message history between games.
The routine is called repeatedly from
the core save routine, and the window
port is expected to successively return
each message that it wants saved, starting
with the oldest message first, finishing
with the most recent.
Return null pointer when finished.
*/
return (char *) 0;
}
void
genl_putmsghistory(msg, is_restoring)
const char *msg;
boolean is_restoring;
{
/* window ports can provide
their own putmsghistory() routine to
load message history from a saved game.
The routine is called repeatedly from
the core restore routine, starting with
the oldest saved message first, and
finishing with the latest.
The window port routine is expected to
load the message recall buffers in such
a way that the ordering is preserved.
The window port routine should make no
assumptions about how many messages are
forthcoming, nor should it assume that
another message will follow this one,
so it should keep all pointers/indexes
intact at the end of each call.
*/
/* this doesn't provide for reloading the message window with the
previous session's messages upon restore, but it does put the quest
message summary lines there by treating them as ordinary messages */
if (!is_restoring)
pline("%s", msg);
return;
}
#ifdef HANGUPHANDLING
/*
* Dummy windowing scheme used to replace current one with no-ops
* in order to avoid all terminal I/O after hangup/disconnect.
*/
static int NDECL(hup_nhgetch);
static char FDECL(hup_yn_function, (const char *, const char *, CHAR_P));
static int FDECL(hup_nh_poskey, (int *, int *, int *));
static void FDECL(hup_getlin, (const char *, char *));
static void FDECL(hup_init_nhwindows, (int *, char **));
static void FDECL(hup_exit_nhwindows, (const char *));
static winid FDECL(hup_create_nhwindow, (int));
static int FDECL(hup_select_menu, (winid, int, MENU_ITEM_P **));
static void FDECL(hup_add_menu, (winid, int, const anything *, CHAR_P, CHAR_P,
int, const char *, BOOLEAN_P));
static void FDECL(hup_end_menu, (winid, const char *));
static void FDECL(hup_putstr, (winid, int, const char *));
static void FDECL(hup_print_glyph, (winid, XCHAR_P, XCHAR_P, int, int));
static void FDECL(hup_outrip, (winid, int, time_t));
static void FDECL(hup_curs, (winid, int, int));
static void FDECL(hup_display_nhwindow, (winid, BOOLEAN_P));
static void FDECL(hup_display_file, (const char *, BOOLEAN_P));
#ifdef CLIPPING
static void FDECL(hup_cliparound, (int, int));
#endif
#ifdef CHANGE_COLOR
static void FDECL(hup_change_color, (int, long, int));
#ifdef MAC
static short FDECL(hup_set_font_name, (winid, char *));
#endif
static char *NDECL(hup_get_color_string);
#endif /* CHANGE_COLOR */
static void FDECL(hup_status_update, (int, genericptr_t, int, int, int,
unsigned long *));
static int NDECL(hup_int_ndecl);
static void NDECL(hup_void_ndecl);
static void FDECL(hup_void_fdecl_int, (int));
static void FDECL(hup_void_fdecl_winid, (winid));
static void FDECL(hup_void_fdecl_constchar_p, (const char *));
static struct window_procs hup_procs = {
"hup", 0L, 0L,
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
hup_init_nhwindows,
hup_void_ndecl, /* player_selection */
hup_void_ndecl, /* askname */
hup_void_ndecl, /* get_nh_event */
hup_exit_nhwindows, hup_void_fdecl_constchar_p, /* suspend_nhwindows */
hup_void_ndecl, /* resume_nhwindows */
hup_create_nhwindow, hup_void_fdecl_winid, /* clear_nhwindow */
hup_display_nhwindow, hup_void_fdecl_winid, /* destroy_nhwindow */
hup_curs, hup_putstr, hup_putstr, /* putmixed */
hup_display_file, hup_void_fdecl_winid, /* start_menu */
hup_add_menu, hup_end_menu, hup_select_menu, genl_message_menu,
hup_void_ndecl, /* update_inventory */
hup_void_ndecl, /* mark_synch */
hup_void_ndecl, /* wait_synch */
#ifdef CLIPPING
hup_cliparound,
#endif
#ifdef POSITIONBAR
(void FDECL((*), (char *))) hup_void_fdecl_constchar_p,
/* update_positionbar */
#endif
hup_print_glyph,
hup_void_fdecl_constchar_p, /* raw_print */
hup_void_fdecl_constchar_p, /* raw_print_bold */
hup_nhgetch, hup_nh_poskey, hup_void_ndecl, /* nhbell */
hup_int_ndecl, /* doprev_message */
hup_yn_function, hup_getlin, hup_int_ndecl, /* get_ext_cmd */
hup_void_fdecl_int, /* number_pad */
hup_void_ndecl, /* delay_output */
#ifdef CHANGE_COLOR
hup_change_color,
#ifdef MAC
hup_void_fdecl_int, /* change_background */
hup_set_font_name,
#endif
hup_get_color_string,
#endif /* CHANGE_COLOR */
hup_void_ndecl, /* start_screen */
hup_void_ndecl, /* end_screen */
hup_outrip, genl_preference_update, genl_getmsghistory,
genl_putmsghistory,
hup_void_ndecl, /* status_init */
hup_void_ndecl, /* status_finish */
genl_status_enablefield, hup_status_update,
genl_can_suspend_no,
};
static void FDECL((*previnterface_exit_nhwindows), (const char *)) = 0;
/* hangup has occurred; switch to no-op user interface */
void
nhwindows_hangup()
{
char *FDECL((*previnterface_getmsghistory), (BOOLEAN_P)) = 0;
#ifdef ALTMETA
/* command processor shouldn't look for 2nd char after seeing ESC */
iflags.altmeta = FALSE;
#endif
/* don't call exit_nhwindows() directly here; if a hangup occurs
while interface code is executing, exit_nhwindows could knock
the interface's active data structures out from under itself */
if (iflags.window_inited
&& windowprocs.win_exit_nhwindows != hup_exit_nhwindows)
previnterface_exit_nhwindows = windowprocs.win_exit_nhwindows;
/* also, we have to leave the old interface's getmsghistory()
in place because it will be called while saving the game */
if (windowprocs.win_getmsghistory != hup_procs.win_getmsghistory)
previnterface_getmsghistory = windowprocs.win_getmsghistory;
windowprocs = hup_procs;
if (previnterface_getmsghistory)
windowprocs.win_getmsghistory = previnterface_getmsghistory;
}
static void
hup_exit_nhwindows(lastgasp)
const char *lastgasp;
{
/* core has called exit_nhwindows(); call the previous interface's
shutdown routine now; xxx_exit_nhwindows() needs to call other
xxx_ routines directly rather than through windowprocs pointers */
if (previnterface_exit_nhwindows) {
lastgasp = 0; /* don't want exit routine to attempt extra output */
(*previnterface_exit_nhwindows)(lastgasp);
previnterface_exit_nhwindows = 0;
}
iflags.window_inited = 0;
}
static int
hup_nhgetch(VOID_ARGS)
{
return '\033'; /* ESC */
}
/*ARGSUSED*/
static char
hup_yn_function(prompt, resp, deflt)
const char *prompt UNUSED, *resp UNUSED;
char deflt;
{
if (!deflt)
deflt = '\033';
return deflt;
}
/*ARGSUSED*/
static int
hup_nh_poskey(x, y, mod)
int *x UNUSED, *y UNUSED, *mod UNUSED;
{
return '\033';
}
/*ARGSUSED*/
static void
hup_getlin(prompt, outbuf)
const char *prompt UNUSED;
char *outbuf;
{
Strcpy(outbuf, "\033");
}
/*ARGSUSED*/
static void
hup_init_nhwindows(argc_p, argv)
int *argc_p UNUSED;
char **argv UNUSED;
{
iflags.window_inited = 1;
}
/*ARGUSED*/
static winid
hup_create_nhwindow(type)
int type UNUSED;
{
return WIN_ERR;
}
/*ARGSUSED*/
static int
hup_select_menu(window, how, menu_list)
winid window UNUSED;
int how UNUSED;
struct mi **menu_list UNUSED;
{
return -1;
}
/*ARGSUSED*/
static void
hup_add_menu(window, glyph, identifier, sel, grpsel, attr, txt, preselected)
winid window UNUSED;
int glyph UNUSED, attr UNUSED;
const anything *identifier UNUSED;
char sel UNUSED, grpsel UNUSED;
const char *txt UNUSED;
boolean preselected UNUSED;
{
return;
}
/*ARGSUSED*/
static void
hup_end_menu(window, prompt)
winid window UNUSED;
const char *prompt UNUSED;
{
return;
}
/*ARGSUSED*/
static void
hup_putstr(window, attr, text)
winid window UNUSED;
int attr UNUSED;
const char *text UNUSED;
{
return;
}
/*ARGSUSED*/
static void
hup_print_glyph(window, x, y, glyph, bkglyph)
winid window UNUSED;
xchar x UNUSED, y UNUSED;
int glyph UNUSED;
int bkglyph UNUSED;
{
return;
}
/*ARGSUSED*/
static void
hup_outrip(tmpwin, how, when)
winid tmpwin UNUSED;
int how UNUSED;
time_t when UNUSED;
{
return;
}
/*ARGSUSED*/
static void
hup_curs(window, x, y)
winid window UNUSED;
int x UNUSED, y UNUSED;
{
return;
}
/*ARGSUSED*/
static void
hup_display_nhwindow(window, blocking)
winid window UNUSED;
boolean blocking UNUSED;
{
return;
}
/*ARGSUSED*/
static void
hup_display_file(fname, complain)
const char *fname UNUSED;
boolean complain UNUSED;
{
return;
}
#ifdef CLIPPING
/*ARGSUSED*/
static void
hup_cliparound(x, y)
int x UNUSED, y UNUSED;
{
return;
}
#endif
#ifdef CHANGE_COLOR
/*ARGSUSED*/
static void
hup_change_color(color, rgb, reverse)
int color, reverse;
long rgb;
{
return;
}
#ifdef MAC
/*ARGSUSED*/
static short
hup_set_font_name(window, fontname)
winid window;
char *fontname;
{
return 0;
}
#endif /* MAC */
static char *
hup_get_color_string(VOID_ARGS)
{
return (char *) 0;
}
#endif /* CHANGE_COLOR */
/*ARGSUSED*/
static void
hup_status_update(idx, ptr, chg, pc, color, colormasks)
int idx UNUSED;
genericptr_t ptr UNUSED;
int chg UNUSED, pc UNUSED, color UNUSED;
unsigned long *colormasks UNUSED;
{
return;
}
/*
* Non-specific stubs.
*/
static int
hup_int_ndecl(VOID_ARGS)
{
return -1;
}
static void
hup_void_ndecl(VOID_ARGS)
{
return;
}
/*ARGUSED*/
static void
hup_void_fdecl_int(arg)
int arg UNUSED;
{
return;
}
/*ARGUSED*/
static void
hup_void_fdecl_winid(window)
winid window UNUSED;
{
return;
}
/*ARGUSED*/
static void
hup_void_fdecl_constchar_p(string)
const char *string UNUSED;
{
return;
}
#endif /* HANGUPHANDLING */
/****************************************************************************/
/* genl backward compat stuff */
/****************************************************************************/
const char *status_fieldnm[MAXBLSTATS];
const char *status_fieldfmt[MAXBLSTATS];
char *status_vals[MAXBLSTATS];
boolean status_activefields[MAXBLSTATS];
NEARDATA winid WIN_STATUS;
void
genl_status_init()
{
int i;
for (i = 0; i < MAXBLSTATS; ++i) {
status_vals[i] = (char *) alloc(MAXCO);
*status_vals[i] = '\0';
status_activefields[i] = FALSE;
status_fieldfmt[i] = (const char *) 0;
}
/* Use a window for the genl version; backward port compatibility */
WIN_STATUS = create_nhwindow(NHW_STATUS);
display_nhwindow(WIN_STATUS, FALSE);
}
void
genl_status_finish()
{
/* tear down routine */
int i;
/* free alloc'd memory here */
for (i = 0; i < MAXBLSTATS; ++i) {
if (status_vals[i])
free((genericptr_t) status_vals[i]), status_vals[i] = (char *) 0;
}
}
void
genl_status_enablefield(fieldidx, nm, fmt, enable)
int fieldidx;
const char *nm;
const char *fmt;
boolean enable;
{
status_fieldfmt[fieldidx] = fmt;
status_fieldnm[fieldidx] = nm;
status_activefields[fieldidx] = enable;
}
/* call once for each field, then call with BL_FLUSH to output the result */
void
genl_status_update(idx, ptr, chg, percent, color, colormasks)
int idx;
genericptr_t ptr;
int chg UNUSED, percent UNUSED, color UNUSED;
unsigned long *colormasks UNUSED;
{
char newbot1[MAXCO], newbot2[MAXCO];
long cond, *condptr = (long *) ptr;
register int i;
unsigned pass, lndelta;
enum statusfields idx1, idx2, *fieldlist;
char *nb, *text = (char *) ptr;
static enum statusfields fieldorder[][15] = {
/* line one */
{ BL_TITLE, BL_STR, BL_DX, BL_CO, BL_IN, BL_WI, BL_CH, BL_ALIGN,
BL_SCORE, BL_FLUSH, BL_FLUSH, BL_FLUSH, BL_FLUSH, BL_FLUSH,
BL_FLUSH },
/* line two, default order */
{ BL_LEVELDESC, BL_GOLD,
BL_HP, BL_HPMAX, BL_ENE, BL_ENEMAX, BL_AC,
BL_XP, BL_EXP, BL_HD,
BL_TIME,
BL_HUNGER, BL_CAP, BL_CONDITION,
BL_FLUSH },
/* move time to the end */
{ BL_LEVELDESC, BL_GOLD,
BL_HP, BL_HPMAX, BL_ENE, BL_ENEMAX, BL_AC,
BL_XP, BL_EXP, BL_HD,
BL_HUNGER, BL_CAP, BL_CONDITION,
BL_TIME, BL_FLUSH },
/* move experience and time to the end */
{ BL_LEVELDESC, BL_GOLD,
BL_HP, BL_HPMAX, BL_ENE, BL_ENEMAX, BL_AC,
BL_HUNGER, BL_CAP, BL_CONDITION,
BL_XP, BL_EXP, BL_HD, BL_TIME, BL_FLUSH },
/* move level description plus gold and experience and time to end */
{ BL_HP, BL_HPMAX, BL_ENE, BL_ENEMAX, BL_AC,
BL_HUNGER, BL_CAP, BL_CONDITION,
BL_LEVELDESC, BL_GOLD, BL_XP, BL_EXP, BL_HD, BL_TIME, BL_FLUSH },
};
/* in case interface is using genl_status_update() but has not
specified WC2_FLUSH_STATUS (status_update() for field values
is buffered so final BL_FLUSH is needed to produce output) */
windowprocs.wincap2 |= WC2_FLUSH_STATUS;
if (idx >= 0) {
if (!status_activefields[idx])
return;
switch (idx) {
case BL_CONDITION:
cond = condptr ? *condptr : 0L;
nb = status_vals[idx];
*nb = '\0';
if (cond & BL_MASK_STONE)
Strcpy(nb = eos(nb), " Stone");
if (cond & BL_MASK_SLIME)
Strcpy(nb = eos(nb), " Slime");
if (cond & BL_MASK_STRNGL)
Strcpy(nb = eos(nb), " Strngl");
if (cond & BL_MASK_FOODPOIS)
Strcpy(nb = eos(nb), " FoodPois");
if (cond & BL_MASK_TERMILL)
Strcpy(nb = eos(nb), " TermIll");
if (cond & BL_MASK_BLIND)
Strcpy(nb = eos(nb), " Blind");
if (cond & BL_MASK_DEAF)
Strcpy(nb = eos(nb), " Deaf");
if (cond & BL_MASK_STUN)
Strcpy(nb = eos(nb), " Stun");
if (cond & BL_MASK_CONF)
Strcpy(nb = eos(nb), " Conf");
if (cond & BL_MASK_HALLU)
Strcpy(nb = eos(nb), " Hallu");
if (cond & BL_MASK_LEV)
Strcpy(nb = eos(nb), " Lev");
if (cond & BL_MASK_FLY)
Strcpy(nb = eos(nb), " Fly");
if (cond & BL_MASK_RIDE)
Strcpy(nb = eos(nb), " Ride");
break;
default:
Sprintf(status_vals[idx],
status_fieldfmt[idx] ? status_fieldfmt[idx] : "%s",
text ? text : "");
break;
}
return; /* processed one field other than BL_FLUSH */
} /* (idx >= 0, thus not BL_FLUSH, BL_RESET, BL_CHARACTERISTICS) */
/* does BL_RESET require any specific code to ensure all fields ? */
if (!(idx == BL_FLUSH || idx == BL_RESET))
return;
/* We've received BL_FLUSH; time to output the gathered data */
nb = newbot1;
*nb = '\0';
/* BL_FLUSH is the only pseudo-index value we need to check for
in the loop below because it is the only entry used to pad the
end of the fieldorder array. We could stop on any
negative (illegal) index, but this should be fine */
for (i = 0; (idx1 = fieldorder[0][i]) != BL_FLUSH; ++i) {
if (status_activefields[idx1])
Strcpy(nb = eos(nb), status_vals[idx1]);
}
/* if '$' is encoded, buffer length of \GXXXXNNNN is 9 greater than
single char; we want to subtract that 9 when checking display length */
lndelta = (status_activefields[BL_GOLD]
&& strstr(status_vals[BL_GOLD], "\\G")) ? 9 : 0;
/* basic bot2 formats groups of second line fields into five buffers,
then decides how to order those buffers based on comparing lengths
of [sub]sets of them to the width of the map; we have more control
here but currently emulate that behavior */
for (pass = 1; pass <= 4; pass++) {
fieldlist = fieldorder[pass];
nb = newbot2;
*nb = '\0';
for (i = 0; (idx2 = fieldlist[i]) != BL_FLUSH; ++i) {
if (status_activefields[idx2]) {
const char *val = status_vals[idx2];
switch (idx2) {
case BL_HP: /* for pass 4, Hp comes first; mungspaces()
will strip the unwanted leading spaces */
case BL_XP: case BL_HD:
case BL_TIME:
Strcpy(nb = eos(nb), " ");
break;
case BL_LEVELDESC:
/* leveldesc has no leading space, so if we've moved
it past the first position, provide one */
if (i != 0)
Strcpy(nb = eos(nb), " ");
break;
/*
* We want " hunger encumbrance conditions"
* or " encumbrance conditions"
* or " hunger conditions"
* or " conditions"
* 'hunger' is either " " or " hunger_text";
* 'encumbrance' is either " " or " encumbrance_text";
* 'conditions' is either "" or " cond1 cond2...".
*/
case BL_HUNGER:
/* hunger==" " - keep it, end up with " ";
hunger!=" " - insert space and get " hunger" */
if (strcmp(val, " "))
Strcpy(nb = eos(nb), " ");
break;
case BL_CAP:
/* cap==" " - suppress it, retain " hunger" or " ";
cap!=" " - use it, get " hunger cap" or " cap" */
if (!strcmp(val, " "))
++val;
break;
default:
break;
}
Strcpy(nb = eos(nb), val); /* status_vals[idx2] */
} /* status_activefields[idx2] */
if (idx2 == BL_CONDITION && pass < 4
&& strlen(newbot2) - lndelta > COLNO)
break; /* switch to next order */
} /* i */
if (idx2 == BL_FLUSH) { /* made it past BL_CONDITION */
if (pass > 1)
mungspaces(newbot2);
break;
}
} /* pass */
curs(WIN_STATUS, 1, 0);
putstr(WIN_STATUS, 0, newbot1);
curs(WIN_STATUS, 1, 1);
putmixed(WIN_STATUS, 0, newbot2); /* putmixed() due to GOLD glyph */
}
STATIC_VAR struct window_procs dumplog_windowprocs_backup;
STATIC_VAR FILE *dumplog_file;
#ifdef DUMPLOG
STATIC_VAR time_t dumplog_now;
char *
dump_fmtstr(fmt, buf, fullsubs)
const char *fmt;
char *buf;
boolean fullsubs; /* True -> full substitution for file name, False ->
* partial substitution for '--showpaths' feedback
* where there's no game in progress when executed */
{
const char *fp = fmt;
char *bp = buf;
int slen, len = 0;
char tmpbuf[BUFSZ];
char verbuf[BUFSZ];
long uid;
time_t now;
now = dumplog_now;
uid = (long) getuid();
/*
* Note: %t and %T assume that time_t is a 'long int' number of
* seconds since some epoch value. That's quite iffy.... The
* unit of time might be different and the datum size might be
* some variant of 'long long int'. [Their main purpose is to
* construct a unique file name rather than record the date and
* time; violating the 'long seconds since base-date' assumption
* may or may not interfere with that usage.]
*/
while (fp && *fp && len < BUFSZ - 1) {
if (*fp == '%') {
fp++;
switch (*fp) {
default:
goto finish;
case '\0': /* fallthrough */
case '%': /* literal % */
Sprintf(tmpbuf, "%%");
break;
case 't': /* game start, timestamp */
if (fullsubs)
Sprintf(tmpbuf, "%lu", (unsigned long) ubirthday);
else
Strcpy(tmpbuf, "{game start cookie}");
break;
case 'T': /* current time, timestamp */
if (fullsubs)
Sprintf(tmpbuf, "%lu", (unsigned long) now);
else
Strcpy(tmpbuf, "{current time cookie}");
break;
case 'd': /* game start, YYYYMMDDhhmmss */
if (fullsubs)
Sprintf(tmpbuf, "%08ld%06ld",
yyyymmdd(ubirthday), hhmmss(ubirthday));
else
Strcpy(tmpbuf, "{game start date+time}");
break;
case 'D': /* current time, YYYYMMDDhhmmss */
if (fullsubs)
Sprintf(tmpbuf, "%08ld%06ld", yyyymmdd(now), hhmmss(now));
else
Strcpy(tmpbuf, "{current date+time}");
break;
case 'v': /* version, eg. "3.6.5-0" */
Sprintf(tmpbuf, "%s", version_string(verbuf));
break;
case 'u': /* UID */
Sprintf(tmpbuf, "%ld", uid);
break;
case 'n': /* player name */
if (fullsubs)
Sprintf(tmpbuf, "%s", *plname ? plname : "unknown");
else
Strcpy(tmpbuf, "{hero name}");
break;
case 'N': /* first character of player name */
if (fullsubs)
Sprintf(tmpbuf, "%c", *plname ? *plname : 'u');
else
Strcpy(tmpbuf, "{hero initial}");
break;
}
if (fullsubs) {
/* replace potentially troublesome characters (including
<space> even though it might be an acceptable file name
character); user shouldn't be able to get ' ' or '/'
or '\\' into plname[] but play things safe */
(void) strNsubst(tmpbuf, " ", "_", 0);
(void) strNsubst(tmpbuf, "/", "_", 0);
(void) strNsubst(tmpbuf, "\\", "_", 0);
/* note: replacements are only done on field substitutions,
not on the template (from sysconf or DUMPLOG_FILE) */
}
slen = (int) strlen(tmpbuf);
if (len + slen < BUFSZ - 1) {
len += slen;
Sprintf(bp, "%s", tmpbuf);
bp += slen;
if (*fp)
fp++;
} else
break;
} else {
*bp = *fp;
bp++;
fp++;
len++;
}
}
finish:
*bp = '\0';
return buf;
}
#endif /* DUMPLOG */
void
dump_open_log(now)
time_t now;
{
#ifdef DUMPLOG
char buf[BUFSZ];
char *fname;
dumplog_now = now;
#ifdef SYSCF
if (!sysopt.dumplogfile)
return;
fname = dump_fmtstr(sysopt.dumplogfile, buf, TRUE);
#else
fname = dump_fmtstr(DUMPLOG_FILE, buf, TRUE);
#endif
dumplog_file = fopen(fname, "w");
dumplog_windowprocs_backup = windowprocs;
#else /*!DUMPLOG*/
nhUse(now);
#endif /*?DUMPLOG*/
}
void
dump_close_log()
{
if (dumplog_file) {
(void) fclose(dumplog_file);
dumplog_file = (FILE *) 0;
}
}
void
dump_forward_putstr(win, attr, str, no_forward)
winid win;
int attr;
const char *str;
int no_forward;
{
if (dumplog_file)
fprintf(dumplog_file, "%s\n", str);
if (!no_forward)
putstr(win, attr, str);
}
/*ARGSUSED*/
STATIC_OVL void
dump_putstr(win, attr, str)
winid win UNUSED;
int attr UNUSED;
const char *str;
{
if (dumplog_file)
fprintf(dumplog_file, "%s\n", str);
}
STATIC_OVL winid
dump_create_nhwindow(dummy)
int dummy;
{
return dummy;
}
/*ARGUSED*/
STATIC_OVL void
dump_clear_nhwindow(win)
winid win UNUSED;
{
return;
}
/*ARGSUSED*/
STATIC_OVL void
dump_display_nhwindow(win, p)
winid win UNUSED;
boolean p UNUSED;
{
return;
}
/*ARGUSED*/
STATIC_OVL void
dump_destroy_nhwindow(win)
winid win UNUSED;
{
return;
}
/*ARGUSED*/
STATIC_OVL void
dump_start_menu(win)
winid win UNUSED;
{
return;
}
/*ARGSUSED*/
STATIC_OVL void
dump_add_menu(win, glyph, identifier, ch, gch, attr, str, preselected)
winid win UNUSED;
int glyph;
const anything *identifier UNUSED;
char ch;
char gch UNUSED;
int attr UNUSED;
const char *str;
boolean preselected UNUSED;
{
if (dumplog_file) {
if (glyph == NO_GLYPH)
fprintf(dumplog_file, " %s\n", str);
else
fprintf(dumplog_file, " %c - %s\n", ch, str);
}
}
/*ARGSUSED*/
STATIC_OVL void
dump_end_menu(win, str)
winid win UNUSED;
const char *str;
{
if (dumplog_file) {
if (str)
fprintf(dumplog_file, "%s\n", str);
else
fputs("\n", dumplog_file);
}
}
STATIC_OVL int
dump_select_menu(win, how, item)
winid win UNUSED;
int how UNUSED;
menu_item **item;
{
*item = (menu_item *) 0;
return 0;
}
void
dump_redirect(onoff_flag)
boolean onoff_flag;
{
if (dumplog_file) {
if (onoff_flag) {
windowprocs.win_create_nhwindow = dump_create_nhwindow;
windowprocs.win_clear_nhwindow = dump_clear_nhwindow;
windowprocs.win_display_nhwindow = dump_display_nhwindow;
windowprocs.win_destroy_nhwindow = dump_destroy_nhwindow;
windowprocs.win_start_menu = dump_start_menu;
windowprocs.win_add_menu = dump_add_menu;
windowprocs.win_end_menu = dump_end_menu;
windowprocs.win_select_menu = dump_select_menu;
windowprocs.win_putstr = dump_putstr;
} else {
windowprocs = dumplog_windowprocs_backup;
}
iflags.in_dumplog = onoff_flag;
} else {
iflags.in_dumplog = FALSE;
}
}
#ifdef TTY_GRAPHICS
#ifdef TEXTCOLOR
#ifdef TOS
extern const char *hilites[CLR_MAX];
#else
extern NEARDATA char *hilites[CLR_MAX];
#endif
#endif
#endif
int
has_color(color)
int color;
{
return (iflags.use_color && windowprocs.name
&& (windowprocs.wincap & WC_COLOR) && windowprocs.has_color[color]
#ifdef TTY_GRAPHICS
#if defined(TEXTCOLOR) && defined(TERMLIB) && !defined(NO_TERMS)
&& (hilites[color] != 0)
#endif
#endif
);
}
/*windows.c*/
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_4523_2 |
crossvul-cpp_data_bad_999_0 | /*
* Marvell Wireless LAN device driver: management IE handling- setting and
* deleting IE.
*
* Copyright (C) 2012-2014, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "main.h"
/* This function checks if current IE index is used by any on other interface.
* Return: -1: yes, current IE index is used by someone else.
* 0: no, current IE index is NOT used by other interface.
*/
static int
mwifiex_ie_index_used_by_other_intf(struct mwifiex_private *priv, u16 idx)
{
int i;
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie *ie;
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i] != priv) {
ie = &adapter->priv[i]->mgmt_ie[idx];
if (ie->mgmt_subtype_mask && ie->ie_length)
return -1;
}
}
return 0;
}
/* Get unused IE index. This index will be used for setting new IE */
static int
mwifiex_ie_get_autoidx(struct mwifiex_private *priv, u16 subtype_mask,
struct mwifiex_ie *ie, u16 *index)
{
u16 mask, len, i;
for (i = 0; i < priv->adapter->max_mgmt_ie_index; i++) {
mask = le16_to_cpu(priv->mgmt_ie[i].mgmt_subtype_mask);
len = le16_to_cpu(ie->ie_length);
if (mask == MWIFIEX_AUTO_IDX_MASK)
continue;
if (mask == subtype_mask) {
if (len > IEEE_MAX_IE_SIZE)
continue;
*index = i;
return 0;
}
if (!priv->mgmt_ie[i].ie_length) {
if (mwifiex_ie_index_used_by_other_intf(priv, i))
continue;
*index = i;
return 0;
}
}
return -1;
}
/* This function prepares IE data buffer for command to be sent to FW */
static int
mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
struct mwifiex_ie_list *ie_list)
{
u16 travel_len, index, mask;
s16 input_len, tlv_len;
struct mwifiex_ie *ie;
u8 *tmp;
input_len = le16_to_cpu(ie_list->len);
travel_len = sizeof(struct mwifiex_ie_types_header);
ie_list->len = 0;
while (input_len >= sizeof(struct mwifiex_ie_types_header)) {
ie = (struct mwifiex_ie *)(((u8 *)ie_list) + travel_len);
tlv_len = le16_to_cpu(ie->ie_length);
travel_len += tlv_len + MWIFIEX_IE_HDR_SIZE;
if (input_len < tlv_len + MWIFIEX_IE_HDR_SIZE)
return -1;
index = le16_to_cpu(ie->ie_index);
mask = le16_to_cpu(ie->mgmt_subtype_mask);
if (index == MWIFIEX_AUTO_IDX_MASK) {
/* automatic addition */
if (mwifiex_ie_get_autoidx(priv, mask, ie, &index))
return -1;
if (index == MWIFIEX_AUTO_IDX_MASK)
return -1;
tmp = (u8 *)&priv->mgmt_ie[index].ie_buffer;
memcpy(tmp, &ie->ie_buffer, le16_to_cpu(ie->ie_length));
priv->mgmt_ie[index].ie_length = ie->ie_length;
priv->mgmt_ie[index].ie_index = cpu_to_le16(index);
priv->mgmt_ie[index].mgmt_subtype_mask =
cpu_to_le16(mask);
ie->ie_index = cpu_to_le16(index);
} else {
if (mask != MWIFIEX_DELETE_MASK)
return -1;
/*
* Check if this index is being used on any
* other interface.
*/
if (mwifiex_ie_index_used_by_other_intf(priv, index))
return -1;
ie->ie_length = 0;
memcpy(&priv->mgmt_ie[index], ie,
sizeof(struct mwifiex_ie));
}
le16_unaligned_add_cpu(&ie_list->len,
le16_to_cpu(
priv->mgmt_ie[index].ie_length) +
MWIFIEX_IE_HDR_SIZE);
input_len -= tlv_len + MWIFIEX_IE_HDR_SIZE;
}
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
return mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
HostCmd_ACT_GEN_SET,
UAP_CUSTOM_IE_I, ie_list, true);
return 0;
}
/* Copy individual custom IEs for beacon, probe response and assoc response
* and prepare single structure for IE setting.
* This function also updates allocated IE indices from driver.
*/
static int
mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
struct mwifiex_ie *beacon_ie, u16 *beacon_idx,
struct mwifiex_ie *pr_ie, u16 *probe_idx,
struct mwifiex_ie *ar_ie, u16 *assoc_idx)
{
struct mwifiex_ie_list *ap_custom_ie;
u8 *pos;
u16 len;
int ret;
ap_custom_ie = kzalloc(sizeof(*ap_custom_ie), GFP_KERNEL);
if (!ap_custom_ie)
return -ENOMEM;
ap_custom_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
pos = (u8 *)ap_custom_ie->ie_list;
if (beacon_ie) {
len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(beacon_ie->ie_length);
memcpy(pos, beacon_ie, len);
pos += len;
le16_unaligned_add_cpu(&ap_custom_ie->len, len);
}
if (pr_ie) {
len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(pr_ie->ie_length);
memcpy(pos, pr_ie, len);
pos += len;
le16_unaligned_add_cpu(&ap_custom_ie->len, len);
}
if (ar_ie) {
len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(ar_ie->ie_length);
memcpy(pos, ar_ie, len);
pos += len;
le16_unaligned_add_cpu(&ap_custom_ie->len, len);
}
ret = mwifiex_update_autoindex_ies(priv, ap_custom_ie);
pos = (u8 *)(&ap_custom_ie->ie_list[0].ie_index);
if (beacon_ie && *beacon_idx == MWIFIEX_AUTO_IDX_MASK) {
/* save beacon ie index after auto-indexing */
*beacon_idx = le16_to_cpu(ap_custom_ie->ie_list[0].ie_index);
len = sizeof(*beacon_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(beacon_ie->ie_length);
pos += len;
}
if (pr_ie && le16_to_cpu(pr_ie->ie_index) == MWIFIEX_AUTO_IDX_MASK) {
/* save probe resp ie index after auto-indexing */
*probe_idx = *((u16 *)pos);
len = sizeof(*pr_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(pr_ie->ie_length);
pos += len;
}
if (ar_ie && le16_to_cpu(ar_ie->ie_index) == MWIFIEX_AUTO_IDX_MASK)
/* save assoc resp ie index after auto-indexing */
*assoc_idx = *((u16 *)pos);
kfree(ap_custom_ie);
return ret;
}
/* This function checks if the vendor specified IE is present in passed buffer
* and copies it to mwifiex_ie structure.
* Function takes pointer to struct mwifiex_ie pointer as argument.
* If the vendor specified IE is present then memory is allocated for
* mwifiex_ie pointer and filled in with IE. Caller should take care of freeing
* this memory.
*/
static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
struct mwifiex_ie **ie_ptr, u16 mask,
unsigned int oui, u8 oui_type)
{
struct ieee_types_header *vs_ie;
struct mwifiex_ie *ie = *ie_ptr;
const u8 *vendor_ie;
vendor_ie = cfg80211_find_vendor_ie(oui, oui_type, ies, ies_len);
if (vendor_ie) {
if (!*ie_ptr) {
*ie_ptr = kzalloc(sizeof(struct mwifiex_ie),
GFP_KERNEL);
if (!*ie_ptr)
return -ENOMEM;
ie = *ie_ptr;
}
vs_ie = (struct ieee_types_header *)vendor_ie;
memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
vs_ie, vs_ie->len + 2);
le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
ie->mgmt_subtype_mask = cpu_to_le16(mask);
ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK);
}
*ie_ptr = ie;
return 0;
}
/* This function parses beacon IEs, probe response IEs, association response IEs
* from cfg80211_ap_settings->beacon and sets these IE to FW.
*/
static int mwifiex_set_mgmt_beacon_data_ies(struct mwifiex_private *priv,
struct cfg80211_beacon_data *data)
{
struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL, *ar_ie = NULL;
u16 beacon_idx = MWIFIEX_AUTO_IDX_MASK, pr_idx = MWIFIEX_AUTO_IDX_MASK;
u16 ar_idx = MWIFIEX_AUTO_IDX_MASK;
int ret = 0;
if (data->beacon_ies && data->beacon_ies_len) {
mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
&beacon_ie, MGMT_MASK_BEACON,
WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPS);
mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
&beacon_ie, MGMT_MASK_BEACON,
WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
}
if (data->proberesp_ies && data->proberesp_ies_len) {
mwifiex_update_vs_ie(data->proberesp_ies,
data->proberesp_ies_len, &pr_ie,
MGMT_MASK_PROBE_RESP, WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPS);
mwifiex_update_vs_ie(data->proberesp_ies,
data->proberesp_ies_len, &pr_ie,
MGMT_MASK_PROBE_RESP,
WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
}
if (data->assocresp_ies && data->assocresp_ies_len) {
mwifiex_update_vs_ie(data->assocresp_ies,
data->assocresp_ies_len, &ar_ie,
MGMT_MASK_ASSOC_RESP |
MGMT_MASK_REASSOC_RESP,
WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPS);
mwifiex_update_vs_ie(data->assocresp_ies,
data->assocresp_ies_len, &ar_ie,
MGMT_MASK_ASSOC_RESP |
MGMT_MASK_REASSOC_RESP, WLAN_OUI_WFA,
WLAN_OUI_TYPE_WFA_P2P);
}
if (beacon_ie || pr_ie || ar_ie) {
ret = mwifiex_update_uap_custom_ie(priv, beacon_ie,
&beacon_idx, pr_ie,
&pr_idx, ar_ie, &ar_idx);
if (ret)
goto done;
}
priv->beacon_idx = beacon_idx;
priv->proberesp_idx = pr_idx;
priv->assocresp_idx = ar_idx;
done:
kfree(beacon_ie);
kfree(pr_ie);
kfree(ar_ie);
return ret;
}
/* This function parses head and tail IEs, from cfg80211_beacon_data and sets
* these IE to FW.
*/
static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
struct cfg80211_beacon_data *info)
{
struct mwifiex_ie *gen_ie;
struct ieee_types_header *hdr;
struct ieee80211_vendor_ie *vendorhdr;
u16 gen_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
int left_len, parsed_len = 0;
unsigned int token_len;
int err = 0;
if (!info->tail || !info->tail_len)
return 0;
gen_ie = kzalloc(sizeof(*gen_ie), GFP_KERNEL);
if (!gen_ie)
return -ENOMEM;
left_len = info->tail_len;
/* Many IEs are generated in FW by parsing bss configuration.
* Let's not add them here; else we may end up duplicating these IEs
*/
while (left_len > sizeof(struct ieee_types_header)) {
hdr = (void *)(info->tail + parsed_len);
token_len = hdr->len + sizeof(struct ieee_types_header);
if (token_len > left_len) {
err = -EINVAL;
goto out;
}
switch (hdr->element_id) {
case WLAN_EID_SSID:
case WLAN_EID_SUPP_RATES:
case WLAN_EID_COUNTRY:
case WLAN_EID_PWR_CONSTRAINT:
case WLAN_EID_ERP_INFO:
case WLAN_EID_EXT_SUPP_RATES:
case WLAN_EID_HT_CAPABILITY:
case WLAN_EID_HT_OPERATION:
case WLAN_EID_VHT_CAPABILITY:
case WLAN_EID_VHT_OPERATION:
break;
case WLAN_EID_VENDOR_SPECIFIC:
/* Skip only Microsoft WMM IE */
if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
(const u8 *)hdr,
token_len))
break;
/* fall through */
default:
if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
err = -EINVAL;
goto out;
}
memcpy(gen_ie->ie_buffer + ie_len, hdr, token_len);
ie_len += token_len;
break;
}
left_len -= token_len;
parsed_len += token_len;
}
/* parse only WPA vendor IE from tail, WMM IE is configured by
* bss_config command
*/
vendorhdr = (void *)cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
info->tail, info->tail_len);
if (vendorhdr) {
token_len = vendorhdr->len + sizeof(struct ieee_types_header);
if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
err = -EINVAL;
goto out;
}
memcpy(gen_ie->ie_buffer + ie_len, vendorhdr, token_len);
ie_len += token_len;
}
if (!ie_len)
goto out;
gen_ie->ie_index = cpu_to_le16(gen_idx);
gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
MGMT_MASK_PROBE_RESP |
MGMT_MASK_ASSOC_RESP);
gen_ie->ie_length = cpu_to_le16(ie_len);
if (mwifiex_update_uap_custom_ie(priv, gen_ie, &gen_idx, NULL, NULL,
NULL, NULL)) {
err = -EINVAL;
goto out;
}
priv->gen_idx = gen_idx;
out:
kfree(gen_ie);
return err;
}
/* This function parses different IEs-head & tail IEs, beacon IEs,
* probe response IEs, association response IEs from cfg80211_ap_settings
* function and sets these IE to FW.
*/
int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
struct cfg80211_beacon_data *info)
{
int ret;
ret = mwifiex_uap_parse_tail_ies(priv, info);
if (ret)
return ret;
return mwifiex_set_mgmt_beacon_data_ies(priv, info);
}
/* This function removes management IE set */
int mwifiex_del_mgmt_ies(struct mwifiex_private *priv)
{
struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL;
struct mwifiex_ie *ar_ie = NULL, *gen_ie = NULL;
int ret = 0;
if (priv->gen_idx != MWIFIEX_AUTO_IDX_MASK) {
gen_ie = kmalloc(sizeof(*gen_ie), GFP_KERNEL);
if (!gen_ie)
return -ENOMEM;
gen_ie->ie_index = cpu_to_le16(priv->gen_idx);
gen_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
gen_ie->ie_length = 0;
if (mwifiex_update_uap_custom_ie(priv, gen_ie, &priv->gen_idx,
NULL, &priv->proberesp_idx,
NULL, &priv->assocresp_idx)) {
ret = -1;
goto done;
}
priv->gen_idx = MWIFIEX_AUTO_IDX_MASK;
}
if (priv->beacon_idx != MWIFIEX_AUTO_IDX_MASK) {
beacon_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
if (!beacon_ie) {
ret = -ENOMEM;
goto done;
}
beacon_ie->ie_index = cpu_to_le16(priv->beacon_idx);
beacon_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
beacon_ie->ie_length = 0;
}
if (priv->proberesp_idx != MWIFIEX_AUTO_IDX_MASK) {
pr_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
if (!pr_ie) {
ret = -ENOMEM;
goto done;
}
pr_ie->ie_index = cpu_to_le16(priv->proberesp_idx);
pr_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
pr_ie->ie_length = 0;
}
if (priv->assocresp_idx != MWIFIEX_AUTO_IDX_MASK) {
ar_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
if (!ar_ie) {
ret = -ENOMEM;
goto done;
}
ar_ie->ie_index = cpu_to_le16(priv->assocresp_idx);
ar_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
ar_ie->ie_length = 0;
}
if (beacon_ie || pr_ie || ar_ie)
ret = mwifiex_update_uap_custom_ie(priv,
beacon_ie, &priv->beacon_idx,
pr_ie, &priv->proberesp_idx,
ar_ie, &priv->assocresp_idx);
done:
kfree(gen_ie);
kfree(beacon_ie);
kfree(pr_ie);
kfree(ar_ie);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_999_0 |
crossvul-cpp_data_bad_4522_0 | /*
* Copyright (c) 2003 Sun Microsystems, Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistribution of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistribution in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* Neither the name of Sun Microsystems, Inc. or the names of
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* This software is provided "AS IS," without a warranty of any kind.
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED.
* SUN MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE
* FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING
* OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL
* SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA,
* OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR
* PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF
* LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE,
* EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <ipmitool/ipmi.h>
#include <ipmitool/log.h>
#include <ipmitool/helper.h>
#include <ipmitool/ipmi_cc.h>
#include <ipmitool/ipmi_intf.h>
#include <ipmitool/ipmi_fru.h>
#include <ipmitool/ipmi_mc.h>
#include <ipmitool/ipmi_sdr.h>
#include <ipmitool/ipmi_strings.h> /* IANA id strings */
#include <ipmitool/ipmi_time.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <errno.h>
#if HAVE_CONFIG_H
# include <config.h>
#endif
#define FRU_MULTIREC_CHUNK_SIZE (255 + sizeof(struct fru_multirec_header))
#define FRU_FIELD_VALID(a) (a && a[0])
static const char *section_id[4] = {
"Internal Use Section",
"Chassis Section",
"Board Section",
"Product Section"
};
static const char * combined_voltage_desc[] = {
"12 V",
"-12 V",
"5 V",
"3.3 V"
};
static const char * chassis_type_desc[] = {
"Unspecified",
"Other",
"Unknown",
"Desktop",
"Low Profile Desktop",
"Pizza Box",
"Mini Tower",
"Tower",
"Portable",
"LapTop",
"Notebook",
"Hand Held",
"Docking Station",
"All in One",
"Sub Notebook",
"Space-saving",
"Lunch Box",
"Main Server Chassis",
"Expansion Chassis",
"SubChassis",
"Bus Expansion Chassis",
"Peripheral Chassis",
"RAID Chassis",
"Rack Mount Chassis",
"Sealed-case PC",
"Multi-system Chassis",
"CompactPCI",
"AdvancedTCA",
"Blade",
"Blade Enclosure"
};
static inline bool fru_cc_rq2big(int code) {
return (code == IPMI_CC_REQ_DATA_INV_LENGTH
|| code == IPMI_CC_REQ_DATA_FIELD_EXCEED
|| code == IPMI_CC_CANT_RET_NUM_REQ_BYTES);
}
/* From lib/dimm_spd.c: */
int
ipmi_spd_print_fru(struct ipmi_intf * intf, uint8_t id);
extern int verbose;
static void ipmi_fru_read_to_bin(struct ipmi_intf * intf, char * pFileName, uint8_t fruId);
static void ipmi_fru_write_from_bin(struct ipmi_intf * intf, char * pFileName, uint8_t fruId);
static int ipmi_fru_upg_ekeying(struct ipmi_intf * intf, char * pFileName, uint8_t fruId);
static int ipmi_fru_get_multirec_location_from_fru(struct ipmi_intf * intf, uint8_t fruId,
struct fru_info *pFruInfo, uint32_t * pRetLocation,
uint32_t * pRetSize);
static int ipmi_fru_get_multirec_from_file(char * pFileName, uint8_t * pBufArea,
uint32_t size, uint32_t offset);
static int ipmi_fru_get_multirec_size_from_file(char * pFileName, uint32_t * pSize, uint32_t * pOffset);
int ipmi_fru_get_adjust_size_from_buffer(uint8_t *pBufArea, uint32_t *pSize);
static void ipmi_fru_picmg_ext_print(uint8_t * fru_data, int off, int length);
static int ipmi_fru_set_field_string(struct ipmi_intf * intf, unsigned
char fruId, uint8_t f_type, uint8_t f_index, char *f_string);
static int
ipmi_fru_set_field_string_rebuild(struct ipmi_intf * intf, uint8_t fruId,
struct fru_info fru, struct fru_header header,
uint8_t f_type, uint8_t f_index, char *f_string);
static void
fru_area_print_multirec_bloc(struct ipmi_intf * intf, struct fru_info * fru,
uint8_t id, uint32_t offset);
int
read_fru_area(struct ipmi_intf * intf, struct fru_info *fru, uint8_t id,
uint32_t offset, uint32_t length, uint8_t *frubuf);
void free_fru_bloc(t_ipmi_fru_bloc *bloc);
/* get_fru_area_str - Parse FRU area string from raw data
*
* @data: raw FRU data
* @offset: offset into data for area
*
* returns pointer to FRU area string
*/
char * get_fru_area_str(uint8_t * data, uint32_t * offset)
{
static const char bcd_plus[] = "0123456789 -.:,_";
char * str;
int len, off, size, i, j, k, typecode, char_idx;
union {
uint32_t bits;
char chars[4];
} u;
size = 0;
off = *offset;
/* bits 6:7 contain format */
typecode = ((data[off] & 0xC0) >> 6);
// printf("Typecode:%i\n", typecode);
/* bits 0:5 contain length */
len = data[off++];
len &= 0x3f;
switch (typecode) {
case 0: /* 00b: binary/unspecified */
case 1: /* 01b: BCD plus */
/* hex dump or BCD -> 2x length */
size = (len * 2);
break;
case 2: /* 10b: 6-bit ASCII */
/* 4 chars per group of 1-3 bytes */
size = (((len * 4 + 2) / 3) & ~3);
break;
case 3: /* 11b: 8-bit ASCII */
/* no length adjustment */
size = len;
break;
}
if (size < 1) {
*offset = off;
return NULL;
}
str = malloc(size+1);
if (!str)
return NULL;
memset(str, 0, size+1);
if (size == 0) {
str[0] = '\0';
*offset = off;
return str;
}
switch (typecode) {
case 0: /* Binary */
strncpy(str, buf2str(&data[off], len), size);
break;
case 1: /* BCD plus */
for (k = 0; k < size; k++)
str[k] = bcd_plus[((data[off + k / 2] >> ((k % 2) ? 0 : 4)) & 0x0f)];
str[k] = '\0';
break;
case 2: /* 6-bit ASCII */
for (i = j = 0; i < len; i += 3) {
u.bits = 0;
k = ((len - i) < 3 ? (len - i) : 3);
#if WORDS_BIGENDIAN
u.chars[3] = data[off+i];
u.chars[2] = (k > 1 ? data[off+i+1] : 0);
u.chars[1] = (k > 2 ? data[off+i+2] : 0);
char_idx = 3;
#else
memcpy((void *)&u.bits, &data[off+i], k);
char_idx = 0;
#endif
for (k=0; k<4; k++) {
str[j++] = ((u.chars[char_idx] & 0x3f) + 0x20);
u.bits >>= 6;
}
}
str[j] = '\0';
break;
case 3:
memcpy(str, &data[off], size);
str[size] = '\0';
break;
}
off += len;
*offset = off;
return str;
}
/* is_valid_filename - checks file/path supplied by user
*
* input_filename - user input string
*
* returns 0 if path is ok
* returns -1 if path is NULL
* returns -2 if path is too short
* returns -3 if path is too long
*/
int
is_valid_filename(const char *input_filename)
{
if (!input_filename) {
lprintf(LOG_ERR, "ERROR: NULL pointer passed.");
return -1;
}
if (strlen(input_filename) < 1) {
lprintf(LOG_ERR, "File/path is invalid.");
return -2;
}
if (strlen(input_filename) >= 512) {
lprintf(LOG_ERR, "File/path must be shorter than 512 bytes.");
return -3;
}
return 0;
} /* is_valid_filename() */
/* build_fru_bloc - build fru bloc for write protection
*
* @intf: ipmi interface
* @fru_info: information about FRU device
* @id : Fru id
* @soffset : Source offset (from buffer)
* @doffset : Destination offset (in device)
* @length : Size of data to write (in bytes)
* @pFrubuf : Pointer on data to write
*
* returns 0 on success
* returns -1 on error
*/
#define FRU_NUM_BLOC_COMMON_HEADER 6
t_ipmi_fru_bloc *
build_fru_bloc(struct ipmi_intf * intf, struct fru_info *fru, uint8_t id)
{
t_ipmi_fru_bloc * p_first, * p_bloc, * p_new;
struct ipmi_rs * rsp;
struct ipmi_rq req;
struct fru_header header;
struct fru_multirec_header rec_hdr;
uint8_t msg_data[4];
uint32_t off;
uint16_t i;
/*
* get COMMON Header format
*/
msg_data[0] = id;
msg_data[1] = 0;
msg_data[2] = 0;
msg_data[3] = 8;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_DATA;
req.msg.data = msg_data;
req.msg.data_len = 4;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
lprintf(LOG_ERR, " Device not present (No Response)");
return NULL;
}
if (rsp->ccode) {
lprintf(LOG_ERR," Device not present (%s)",
val2str(rsp->ccode, completion_code_vals));
return NULL;
}
if (verbose > 1) {
printbuf(rsp->data, rsp->data_len, "FRU DATA");
}
memcpy(&header, rsp->data + 1, 8);
/* verify header checksum */
if (ipmi_csum((uint8_t *)&header, 8)) {
lprintf(LOG_ERR, " Bad header checksum");
return NULL;
}
if (header.version != 1) {
lprintf(LOG_ERR, " Unknown FRU header version 0x%02x", header.version);
return NULL;
}
/******************************************
Malloc and fill up the bloc contents
*******************************************/
// Common header
p_first = malloc(sizeof(struct ipmi_fru_bloc));
if (!p_first) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
return NULL;
}
p_bloc = p_first;
p_bloc->next = NULL;
p_bloc->start= 0;
p_bloc->size = fru->size;
strcpy((char *)p_bloc->blocId, "Common Header Section");
for (i = 0; i < 4; i++) {
if (header.offsets[i]) {
p_new = malloc(sizeof(struct ipmi_fru_bloc));
if (!p_new) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
free_fru_bloc(p_first);
return NULL;
}
p_new->next = NULL;
p_new->start = header.offsets[i] * 8;
p_new->size = fru->size - p_new->start;
strncpy((char *)p_new->blocId, section_id[i], sizeof(p_new->blocId));
/* Make sure string is null terminated */
p_new->blocId[sizeof(p_new->blocId)-1] = 0;
p_bloc->next = p_new;
p_bloc->size = p_new->start - p_bloc->start;
p_bloc = p_new;
}
}
// Multi
if (header.offset.multi) {
off = header.offset.multi * 8;
do {
/*
* check for odd offset for the case of fru devices
* accessed by words
*/
if (fru->access && (off & 1)) {
lprintf(LOG_ERR, " Unaligned offset for a block: %d", off);
/* increment offset */
off++;
break;
}
if (read_fru_area(intf, fru, id, off, 5,
(uint8_t *) &rec_hdr) < 0) {
break;
}
p_new = malloc(sizeof(struct ipmi_fru_bloc));
if (!p_new) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
free_fru_bloc(p_first);
return NULL;
}
p_new->next = NULL;
p_new->start = off;
p_new->size = fru->size - p_new->start;
sprintf((char *)p_new->blocId, "Multi-Rec Area: Type %i",
rec_hdr.type);
p_bloc->next = p_new;
p_bloc->size = p_new->start - p_bloc->start;
p_bloc = p_new;
off += rec_hdr.len + sizeof(struct fru_multirec_header);
/* verify record header */
if (ipmi_csum((uint8_t *)&rec_hdr,
sizeof(struct fru_multirec_header))) {
/* can't reliably judge for the rest space */
break;
}
} while (!(rec_hdr.format & 0x80) && (off < fru->size));
lprintf(LOG_DEBUG,"Multi-Record area ends at: %i (%xh)", off, off);
if (fru->size > off) {
// Bloc for remaining space
p_new = malloc(sizeof(struct ipmi_fru_bloc));
if (!p_new) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
free_fru_bloc(p_first);
return NULL;
}
p_new->next = NULL;
p_new->start = off;
p_new->size = fru->size - p_new->start;
strcpy((char *)p_new->blocId, "Unused space");
p_bloc->next = p_new;
p_bloc->size = p_new->start - p_bloc->start;
}
}
/* Dump blocs */
for(p_bloc = p_first, i = 0; p_bloc; p_bloc = p_bloc->next) {
lprintf(LOG_DEBUG ,"Bloc Numb : %i", i++);
lprintf(LOG_DEBUG ,"Bloc Id : %s", p_bloc->blocId);
lprintf(LOG_DEBUG ,"Bloc Start: %i", p_bloc->start);
lprintf(LOG_DEBUG ,"Bloc Size : %i", p_bloc->size);
lprintf(LOG_DEBUG ,"");
}
return p_first;
}
void
free_fru_bloc(t_ipmi_fru_bloc *bloc)
{
t_ipmi_fru_bloc * del;
while (bloc) {
del = bloc;
bloc = bloc->next;
free_n(&del);
}
}
/* By how many bytes to reduce a write command on a size failure. */
#define FRU_BLOCK_SZ 8
/* Baseline for a large enough piece to reduce via steps instead of bytes. */
#define FRU_AREA_MAXIMUM_BLOCK_SZ 32
/*
* write FRU[doffset:length] from the pFrubuf[soffset:length]
* rc=1 on success
**/
int
write_fru_area(struct ipmi_intf * intf, struct fru_info *fru, uint8_t id,
uint16_t soffset, uint16_t doffset,
uint16_t length, uint8_t *pFrubuf)
{
uint16_t tmp, finish;
struct ipmi_rs * rsp;
struct ipmi_rq req;
uint8_t msg_data[255+3];
uint16_t writeLength;
uint16_t found_bloc = 0;
finish = doffset + length; /* destination offset */
if (finish > fru->size)
{
lprintf(LOG_ERROR, "Return error");
return -1;
}
if (fru->access && ((doffset & 1) || (length & 1))) {
lprintf(LOG_ERROR, "Odd offset or length specified");
return -1;
}
t_ipmi_fru_bloc * fru_bloc = build_fru_bloc(intf, fru, id);
t_ipmi_fru_bloc * saved_fru_bloc = fru_bloc;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = SET_FRU_DATA;
req.msg.data = msg_data;
/* initialize request size only once */
if (fru->max_write_size == 0) {
uint16_t max_rq_size = ipmi_intf_get_max_request_data_size(intf);
/* validate lower bound of the maximum request data size */
if (max_rq_size <= 3) {
lprintf(LOG_ERROR, "Maximum request size is too small to send "
"a write request");
return -1;
}
/*
* Write FRU Info command returns the number of written bytes in
* a single byte field.
*/
if (max_rq_size - 3 > 255) {
/* Limit the max write size with 255 bytes. */
fru->max_write_size = 255;
} else {
/* subtract 1 byte for FRU ID an 2 bytes for offset */
fru->max_write_size = max_rq_size - 3;
}
/* check word access */
if (fru->access) {
fru->max_write_size &= ~1;
}
}
do {
uint16_t end_bloc;
uint8_t protected_bloc = 0;
/* Write per bloc, try to find the end of a bloc*/
while (fru_bloc && fru_bloc->start + fru_bloc->size <= doffset) {
fru_bloc = fru_bloc->next;
found_bloc++;
}
if (fru_bloc && fru_bloc->start + fru_bloc->size < finish) {
end_bloc = fru_bloc->start + fru_bloc->size;
} else {
end_bloc = finish;
}
/* calculate write length */
tmp = end_bloc - doffset;
/* check that write length is more than maximum request size */
if (tmp > fru->max_write_size) {
writeLength = fru->max_write_size;
} else {
writeLength = tmp;
}
/* copy fru data */
memcpy(&msg_data[3], pFrubuf + soffset, writeLength);
/* check word access */
if (fru->access) {
writeLength &= ~1;
}
tmp = doffset;
if (fru->access) {
tmp >>= 1;
}
msg_data[0] = id;
msg_data[1] = (uint8_t)tmp;
msg_data[2] = (uint8_t)(tmp >> 8);
req.msg.data_len = writeLength + 3;
if(fru_bloc) {
lprintf(LOG_INFO,"Writing %d bytes (Bloc #%i: %s)",
writeLength, found_bloc, fru_bloc->blocId);
} else {
lprintf(LOG_INFO,"Writing %d bytes", writeLength);
}
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
break;
}
if (fru_cc_rq2big(rsp->ccode)) {
if (fru->max_write_size > FRU_AREA_MAXIMUM_BLOCK_SZ) {
fru->max_write_size -= FRU_BLOCK_SZ;
lprintf(LOG_INFO, "Retrying FRU write with request size %d",
fru->max_write_size);
continue;
}
} else if (rsp->ccode == IPMI_CC_FRU_WRITE_PROTECTED_OFFSET) {
rsp->ccode = IPMI_CC_OK;
// Write protected section
protected_bloc = 1;
}
if (rsp->ccode)
break;
if (protected_bloc == 0) {
// Write OK, bloc not protected, continue
lprintf(LOG_INFO,"Wrote %d bytes", writeLength);
doffset += writeLength;
soffset += writeLength;
} else {
if(fru_bloc) {
// Bloc protected, advise user and jump over protected bloc
lprintf(LOG_INFO,
"Bloc [%s] protected at offset: %i (size %i bytes)",
fru_bloc->blocId, fru_bloc->start, fru_bloc->size);
lprintf(LOG_INFO,"Jumping over this bloc");
} else {
lprintf(LOG_INFO,
"Remaining FRU is protected following offset: %i",
doffset);
}
soffset += end_bloc - doffset;
doffset = end_bloc;
}
} while (doffset < finish);
if (saved_fru_bloc) {
free_fru_bloc(saved_fru_bloc);
}
return doffset >= finish;
}
/* read_fru_area - fill in frubuf[offset:length] from the FRU[offset:length]
*
* @intf: ipmi interface
* @fru: fru info
* @id: fru id
* @offset: offset into buffer
* @length: how much to read
* @frubuf: buffer read into
*
* returns -1 on error
* returns 0 if successful
*/
int
read_fru_area(struct ipmi_intf * intf, struct fru_info *fru, uint8_t id,
uint32_t offset, uint32_t length, uint8_t *frubuf)
{
uint32_t off = offset, tmp, finish;
struct ipmi_rs * rsp;
struct ipmi_rq req;
uint8_t msg_data[4];
if (offset > fru->size) {
lprintf(LOG_ERR, "Read FRU Area offset incorrect: %d > %d",
offset, fru->size);
return -1;
}
finish = offset + length;
if (finish > fru->size) {
finish = fru->size;
lprintf(LOG_NOTICE, "Read FRU Area length %d too large, "
"Adjusting to %d",
offset + length, finish - offset);
}
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_DATA;
req.msg.data = msg_data;
req.msg.data_len = 4;
if (fru->max_read_size == 0) {
uint16_t max_rs_size = ipmi_intf_get_max_response_data_size(intf) - 1;
/* validate lower bound of the maximum response data size */
if (max_rs_size <= 1) {
lprintf(LOG_ERROR, "Maximum response size is too small to send "
"a read request");
return -1;
}
/*
* Read FRU Info command may read up to 255 bytes of data.
*/
if (max_rs_size - 1 > 255) {
/* Limit the max read size with 255 bytes. */
fru->max_read_size = 255;
} else {
/* subtract 1 byte for bytes count */
fru->max_read_size = max_rs_size - 1;
}
/* check word access */
if (fru->access) {
fru->max_read_size &= ~1;
}
}
do {
tmp = fru->access ? off >> 1 : off;
msg_data[0] = id;
msg_data[1] = (uint8_t)(tmp & 0xff);
msg_data[2] = (uint8_t)(tmp >> 8);
tmp = finish - off;
if (tmp > fru->max_read_size)
msg_data[3] = (uint8_t)fru->max_read_size;
else
msg_data[3] = (uint8_t)tmp;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
lprintf(LOG_NOTICE, "FRU Read failed");
break;
}
if (rsp->ccode) {
/* if we get C7h or C8h or CAh return code then we requested too
* many bytes at once so try again with smaller size */
if (fru_cc_rq2big(rsp->ccode)
&& fru->max_read_size > FRU_BLOCK_SZ)
{
if (fru->max_read_size > FRU_AREA_MAXIMUM_BLOCK_SZ) {
/* subtract read length more aggressively */
fru->max_read_size -= FRU_BLOCK_SZ;
} else {
/* subtract length less aggressively */
fru->max_read_size--;
}
lprintf(LOG_INFO, "Retrying FRU read with request size %d",
fru->max_read_size);
continue;
}
lprintf(LOG_NOTICE, "FRU Read failed: %s",
val2str(rsp->ccode, completion_code_vals));
break;
}
tmp = fru->access ? rsp->data[0] << 1 : rsp->data[0];
memcpy(frubuf, rsp->data + 1, tmp);
off += tmp;
frubuf += tmp;
/* sometimes the size returned in the Info command
* is too large. return 0 so higher level function
* still attempts to parse what was returned */
if (tmp == 0 && off < finish) {
return 0;
}
} while (off < finish);
if (off < finish) {
return -1;
}
return 0;
}
/* read_fru_area - fill in frubuf[offset:length] from the FRU[offset:length]
*
* @intf: ipmi interface
* @fru: fru info
* @id: fru id
* @offset: offset into buffer
* @length: how much to read
* @frubuf: buffer read into
*
* returns -1 on error
* returns 0 if successful
*/
int
read_fru_area_section(struct ipmi_intf * intf, struct fru_info *fru, uint8_t id,
uint32_t offset, uint32_t length, uint8_t *frubuf)
{
static uint32_t fru_data_rqst_size = 20;
uint32_t off = offset, tmp, finish;
struct ipmi_rs * rsp;
struct ipmi_rq req;
uint8_t msg_data[4];
if (offset > fru->size) {
lprintf(LOG_ERR, "Read FRU Area offset incorrect: %d > %d",
offset, fru->size);
return -1;
}
finish = offset + length;
if (finish > fru->size) {
finish = fru->size;
lprintf(LOG_NOTICE, "Read FRU Area length %d too large, "
"Adjusting to %d",
offset + length, finish - offset);
}
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_DATA;
req.msg.data = msg_data;
req.msg.data_len = 4;
#ifdef LIMIT_ALL_REQUEST_SIZE
if (fru_data_rqst_size > 16)
#else
if (fru->access && fru_data_rqst_size > 16)
#endif
fru_data_rqst_size = 16;
do {
tmp = fru->access ? off >> 1 : off;
msg_data[0] = id;
msg_data[1] = (uint8_t)(tmp & 0xff);
msg_data[2] = (uint8_t)(tmp >> 8);
tmp = finish - off;
if (tmp > fru_data_rqst_size)
msg_data[3] = (uint8_t)fru_data_rqst_size;
else
msg_data[3] = (uint8_t)tmp;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
lprintf(LOG_NOTICE, "FRU Read failed");
break;
}
if (rsp->ccode) {
/* if we get C7 or C8 or CA return code then we requested too
* many bytes at once so try again with smaller size */
if (fru_cc_rq2big(rsp->ccode) && (--fru_data_rqst_size > FRU_BLOCK_SZ)) {
lprintf(LOG_INFO,
"Retrying FRU read with request size %d",
fru_data_rqst_size);
continue;
}
lprintf(LOG_NOTICE, "FRU Read failed: %s",
val2str(rsp->ccode, completion_code_vals));
break;
}
tmp = fru->access ? rsp->data[0] << 1 : rsp->data[0];
memcpy((frubuf + off)-offset, rsp->data + 1, tmp);
off += tmp;
/* sometimes the size returned in the Info command
* is too large. return 0 so higher level function
* still attempts to parse what was returned */
if (tmp == 0 && off < finish)
return 0;
} while (off < finish);
if (off < finish)
return -1;
return 0;
}
static void
fru_area_print_multirec_bloc(struct ipmi_intf * intf, struct fru_info * fru,
uint8_t id, uint32_t offset)
{
uint8_t * fru_data = NULL;
uint32_t i;
struct fru_multirec_header * h;
uint32_t last_off, len;
i = last_off = offset;
fru_data = malloc(fru->size + 1);
if (!fru_data) {
lprintf(LOG_ERR, " Out of memory!");
return;
}
memset(fru_data, 0, fru->size + 1);
do {
h = (struct fru_multirec_header *) (fru_data + i);
// read area in (at most) FRU_MULTIREC_CHUNK_SIZE bytes at a time
if ((last_off < (i + sizeof(*h))) || (last_off < (i + h->len)))
{
len = fru->size - last_off;
if (len > FRU_MULTIREC_CHUNK_SIZE)
len = FRU_MULTIREC_CHUNK_SIZE;
if (read_fru_area(intf, fru, id, last_off, len, fru_data) < 0)
break;
last_off += len;
}
//printf("Bloc Numb : %i\n", counter);
printf("Bloc Start: %i\n", i);
printf("Bloc Size : %i\n", h->len);
printf("\n");
i += h->len + sizeof (struct fru_multirec_header);
} while (!(h->format & 0x80));
i = offset;
do {
h = (struct fru_multirec_header *) (fru_data + i);
printf("Bloc Start: %i\n", i);
printf("Bloc Size : %i\n", h->len);
printf("\n");
i += h->len + sizeof (struct fru_multirec_header);
} while (!(h->format & 0x80));
lprintf(LOG_DEBUG ,"Multi-Record area ends at: %i (%xh)",i,i);
free_n(&fru_data);
}
/* fru_area_print_chassis - Print FRU Chassis Area
*
* @intf: ipmi interface
* @fru: fru info
* @id: fru id
* @offset: offset pointer
*/
static void
fru_area_print_chassis(struct ipmi_intf * intf, struct fru_info * fru,
uint8_t id, uint32_t offset)
{
char * fru_area;
uint8_t * fru_data;
uint32_t fru_len, i;
uint8_t tmp[2];
size_t chassis_type;
fru_len = 0;
/* read enough to check length field */
if (read_fru_area(intf, fru, id, offset, 2, tmp) == 0) {
fru_len = 8 * tmp[1];
}
if (fru_len == 0) {
return;
}
fru_data = malloc(fru_len);
if (!fru_data) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
return;
}
memset(fru_data, 0, fru_len);
/* read in the full fru */
if (read_fru_area(intf, fru, id, offset, fru_len, fru_data) < 0) {
free_n(&fru_data);
return;
}
/*
* skip first two bytes which specify
* fru area version and fru area length
*/
i = 2;
chassis_type = (fru_data[i] > ARRAY_SIZE(chassis_type_desc) - 1)
? 2
: fru_data[i];
printf(" Chassis Type : %s\n", chassis_type_desc[chassis_type]);
i++;
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Chassis Part Number : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Chassis Serial : %s\n", fru_area);
}
free_n(&fru_area);
}
/* read any extra fields */
while ((i < fru_len) && (fru_data[i] != FRU_END_OF_FIELDS)) {
int j = i;
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Chassis Extra : %s\n", fru_area);
}
free_n(&fru_area);
}
if (i == j) {
break;
}
}
free_n(&fru_data);
}
/* fru_area_print_board - Print FRU Board Area
*
* @intf: ipmi interface
* @fru: fru info
* @id: fru id
* @offset: offset pointer
*/
static void
fru_area_print_board(struct ipmi_intf * intf, struct fru_info * fru,
uint8_t id, uint32_t offset)
{
char * fru_area;
uint8_t * fru_data;
uint32_t fru_len;
uint32_t i;
time_t ts;
uint8_t tmp[2];
fru_len = 0;
/* read enough to check length field */
if (read_fru_area(intf, fru, id, offset, 2, tmp) == 0) {
fru_len = 8 * tmp[1];
}
if (fru_len <= 0) {
return;
}
fru_data = malloc(fru_len);
if (!fru_data) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
return;
}
memset(fru_data, 0, fru_len);
/* read in the full fru */
if (read_fru_area(intf, fru, id, offset, fru_len, fru_data) < 0) {
free_n(&fru_data);
return;
}
/*
* skip first three bytes which specify
* fru area version, fru area length
* and fru board language
*/
i = 3;
ts = ipmi_fru2time_t(&fru_data[i]);
printf(" Board Mfg Date : %s\n", ipmi_timestamp_string(ts));
i += 3; /* skip mfg. date time */
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Board Mfg : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Board Product : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Board Serial : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Board Part Number : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0 && verbose > 0) {
printf(" Board FRU ID : %s\n", fru_area);
}
free_n(&fru_area);
}
/* read any extra fields */
while ((i < fru_len) && (fru_data[i] != FRU_END_OF_FIELDS)) {
int j = i;
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Board Extra : %s\n", fru_area);
}
free_n(&fru_area);
}
if (i == j)
break;
}
free_n(&fru_data);
}
/* fru_area_print_product - Print FRU Product Area
*
* @intf: ipmi interface
* @fru: fru info
* @id: fru id
* @offset: offset pointer
*/
static void
fru_area_print_product(struct ipmi_intf * intf, struct fru_info * fru,
uint8_t id, uint32_t offset)
{
char * fru_area;
uint8_t * fru_data;
uint32_t fru_len, i;
uint8_t tmp[2];
fru_len = 0;
/* read enough to check length field */
if (read_fru_area(intf, fru, id, offset, 2, tmp) == 0) {
fru_len = 8 * tmp[1];
}
if (fru_len == 0) {
return;
}
fru_data = malloc(fru_len);
if (!fru_data) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
return;
}
memset(fru_data, 0, fru_len);
/* read in the full fru */
if (read_fru_area(intf, fru, id, offset, fru_len, fru_data) < 0) {
free_n(&fru_data);
return;
}
/*
* skip first three bytes which specify
* fru area version, fru area length
* and fru board language
*/
i = 3;
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Product Manufacturer : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Product Name : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Product Part Number : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Product Version : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Product Serial : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Product Asset Tag : %s\n", fru_area);
}
free_n(&fru_area);
}
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0 && verbose > 0) {
printf(" Product FRU ID : %s\n", fru_area);
}
free_n(&fru_area);
}
/* read any extra fields */
while ((i < fru_len) && (fru_data[i] != FRU_END_OF_FIELDS)) {
int j = i;
fru_area = get_fru_area_str(fru_data, &i);
if (fru_area) {
if (strlen(fru_area) > 0) {
printf(" Product Extra : %s\n", fru_area);
}
free_n(&fru_area);
}
if (i == j)
break;
}
free_n(&fru_data);
}
/* fru_area_print_multirec - Print FRU Multi Record Area
*
* @intf: ipmi interface
* @fru: fru info
* @id: fru id
* @offset: offset pointer
*/
static void
fru_area_print_multirec(struct ipmi_intf * intf, struct fru_info * fru,
uint8_t id, uint32_t offset)
{
uint8_t * fru_data;
struct fru_multirec_header * h;
struct fru_multirec_powersupply * ps;
struct fru_multirec_dcoutput * dc;
struct fru_multirec_dcload * dl;
uint16_t peak_capacity;
uint8_t peak_hold_up_time;
uint32_t last_off;
last_off = offset;
fru_data = malloc(FRU_MULTIREC_CHUNK_SIZE);
if (!fru_data) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
return;
}
memset(fru_data, 0, FRU_MULTIREC_CHUNK_SIZE);
h = (struct fru_multirec_header *) (fru_data);
do {
if (read_fru_area(intf, fru, id, last_off, sizeof(*h), fru_data) < 0) {
break;
}
if (h->len && read_fru_area(intf, fru, id,
last_off + sizeof(*h), h->len, fru_data + sizeof(*h)) < 0) {
break;
}
last_off += h->len + sizeof(*h);
switch (h->type) {
case FRU_RECORD_TYPE_POWER_SUPPLY_INFORMATION:
ps = (struct fru_multirec_powersupply *)
(fru_data + sizeof(struct fru_multirec_header));
#if WORDS_BIGENDIAN
ps->capacity = BSWAP_16(ps->capacity);
ps->peak_va = BSWAP_16(ps->peak_va);
ps->lowend_input1 = BSWAP_16(ps->lowend_input1);
ps->highend_input1 = BSWAP_16(ps->highend_input1);
ps->lowend_input2 = BSWAP_16(ps->lowend_input2);
ps->highend_input2 = BSWAP_16(ps->highend_input2);
ps->combined_capacity = BSWAP_16(ps->combined_capacity);
ps->peak_cap_ht = BSWAP_16(ps->peak_cap_ht);
#endif
peak_hold_up_time = (ps->peak_cap_ht & 0xf000) >> 12;
peak_capacity = ps->peak_cap_ht & 0x0fff;
printf (" Power Supply Record\n");
printf (" Capacity : %d W\n",
ps->capacity);
printf (" Peak VA : %d VA\n",
ps->peak_va);
printf (" Inrush Current : %d A\n",
ps->inrush_current);
printf (" Inrush Interval : %d ms\n",
ps->inrush_interval);
printf (" Input Voltage Range 1 : %d-%d V\n",
ps->lowend_input1 / 100, ps->highend_input1 / 100);
printf (" Input Voltage Range 2 : %d-%d V\n",
ps->lowend_input2 / 100, ps->highend_input2 / 100);
printf (" Input Frequency Range : %d-%d Hz\n",
ps->lowend_freq, ps->highend_freq);
printf (" A/C Dropout Tolerance : %d ms\n",
ps->dropout_tolerance);
printf (" Flags : %s%s%s%s%s\n",
ps->predictive_fail ? "'Predictive fail' " : "",
ps->pfc ? "'Power factor correction' " : "",
ps->autoswitch ? "'Autoswitch voltage' " : "",
ps->hotswap ? "'Hot swap' " : "",
ps->predictive_fail ? ps->rps_threshold ?
ps->tach ? "'Two pulses per rotation'" : "'One pulse per rotation'" :
ps->tach ? "'Failure on pin de-assertion'" : "'Failure on pin assertion'" : "");
printf (" Peak capacity : %d W\n",
peak_capacity);
printf (" Peak capacity holdup : %d s\n",
peak_hold_up_time);
if (ps->combined_capacity == 0)
printf (" Combined capacity : not specified\n");
else
printf (" Combined capacity : %d W (%s and %s)\n",
ps->combined_capacity,
combined_voltage_desc [ps->combined_voltage1],
combined_voltage_desc [ps->combined_voltage2]);
if (ps->predictive_fail)
printf (" Fan lower threshold : %d RPS\n",
ps->rps_threshold);
break;
case FRU_RECORD_TYPE_DC_OUTPUT:
dc = (struct fru_multirec_dcoutput *)
(fru_data + sizeof(struct fru_multirec_header));
#if WORDS_BIGENDIAN
dc->nominal_voltage = BSWAP_16(dc->nominal_voltage);
dc->max_neg_dev = BSWAP_16(dc->max_neg_dev);
dc->max_pos_dev = BSWAP_16(dc->max_pos_dev);
dc->ripple_and_noise = BSWAP_16(dc->ripple_and_noise);
dc->min_current = BSWAP_16(dc->min_current);
dc->max_current = BSWAP_16(dc->max_current);
#endif
printf (" DC Output Record\n");
printf (" Output Number : %d\n",
dc->output_number);
printf (" Standby power : %s\n",
dc->standby ? "Yes" : "No");
printf (" Nominal voltage : %.2f V\n",
(double) dc->nominal_voltage / 100);
printf (" Max negative deviation : %.2f V\n",
(double) dc->max_neg_dev / 100);
printf (" Max positive deviation : %.2f V\n",
(double) dc->max_pos_dev / 100);
printf (" Ripple and noise pk-pk : %d mV\n",
dc->ripple_and_noise);
printf (" Minimum current draw : %.3f A\n",
(double) dc->min_current / 1000);
printf (" Maximum current draw : %.3f A\n",
(double) dc->max_current / 1000);
break;
case FRU_RECORD_TYPE_DC_LOAD:
dl = (struct fru_multirec_dcload *)
(fru_data + sizeof(struct fru_multirec_header));
#if WORDS_BIGENDIAN
dl->nominal_voltage = BSWAP_16(dl->nominal_voltage);
dl->min_voltage = BSWAP_16(dl->min_voltage);
dl->max_voltage = BSWAP_16(dl->max_voltage);
dl->ripple_and_noise = BSWAP_16(dl->ripple_and_noise);
dl->min_current = BSWAP_16(dl->min_current);
dl->max_current = BSWAP_16(dl->max_current);
#endif
printf (" DC Load Record\n");
printf (" Output Number : %d\n",
dl->output_number);
printf (" Nominal voltage : %.2f V\n",
(double) dl->nominal_voltage / 100);
printf (" Min voltage allowed : %.2f V\n",
(double) dl->min_voltage / 100);
printf (" Max voltage allowed : %.2f V\n",
(double) dl->max_voltage / 100);
printf (" Ripple and noise pk-pk : %d mV\n",
dl->ripple_and_noise);
printf (" Minimum current load : %.3f A\n",
(double) dl->min_current / 1000);
printf (" Maximum current load : %.3f A\n",
(double) dl->max_current / 1000);
break;
case FRU_RECORD_TYPE_OEM_EXTENSION:
{
struct fru_multirec_oem_header *oh=(struct fru_multirec_oem_header *)
&fru_data[sizeof(struct fru_multirec_header)];
uint32_t iana = oh->mfg_id[0] | oh->mfg_id[1]<<8 | oh->mfg_id[2]<<16;
/* Now makes sure this is really PICMG record */
if( iana == IPMI_OEM_PICMG ){
printf(" PICMG Extension Record\n");
ipmi_fru_picmg_ext_print(fru_data,
sizeof(struct fru_multirec_header),
h->len);
}
/* FIXME: Add OEM record support here */
else{
printf(" OEM (%s) Record\n", val2str( iana, ipmi_oem_info));
}
}
break;
}
} while (!(h->format & 0x80));
lprintf(LOG_DEBUG ,"Multi-Record area ends at: %i (%xh)", last_off, last_off);
free_n(&fru_data);
}
/* ipmi_fru_query_new_value - Query new values to replace original FRU content
*
* @data: FRU data
* @offset: offset of the bytes to be modified in data
* @len: size of the modified data
*
* returns : TRUE if data changed
* returns : FALSE if data not changed
*/
static
bool
ipmi_fru_query_new_value(uint8_t *data,int offset, size_t len)
{
bool status = false;
int ret;
char answer;
printf("Would you like to change this value <y/n> ? ");
ret = scanf("%c", &answer);
if (ret != 1) {
return false;
}
if( answer == 'y' || answer == 'Y' ){
int i;
unsigned int *holder;
holder = malloc(len);
printf(
"Enter hex values for each of the %d entries (lsb first), "
"hit <enter> between entries\n", (int)len);
/* I can't assign scanf' %x into a single char */
for( i=0;i<len;i++ ){
ret = scanf("%x", holder+i);
if (ret != 1) {
free_n(&holder);
return false;
}
}
for( i=0;i<len;i++ ){
data[offset++] = (unsigned char) *(holder+i);
}
/* &data[offset++] */
free_n(&holder);
status = true;
}
else{
printf("Entered %c\n",answer);
}
return status;
}
/* ipmi_fru_oemkontron_edit -
* Query new values to replace original FRU content
* This is a generic enough to support any type of 'OEM' record
* because the user supplies 'IANA number' , 'record Id' and 'record' version'
*
* However, the parser must have 'apriori' knowledge of the record format
* The currently supported record is :
*
* IANA : 15000 (Kontron)
* RECORD ID : 3
* RECORD VERSION: 0 (or 1)
*
* I would have like to put that stuff in an OEM specific file, but apart for
* the record format information, all commands are really standard 'FRU' command
*
*
* @data: FRU data
* @offset: start of the current multi record (start of header)
* @len: len of the current record (excluding header)
* @h: pointer to record header
* @oh: pointer to OEM /PICMG header
*
* returns: TRUE if data changed
* returns: FALSE if data not changed
*/
#define OEM_KONTRON_INFORMATION_RECORD 3
#define EDIT_OEM_KONTRON_COMPLETE_ARG_COUNT 12
#define GET_OEM_KONTRON_COMPLETE_ARG_COUNT 5
/*
./src/ipmitool fru edit 0
oem 15000 3 0 name instance FIELD1 FIELD2 FIELD3 crc32
*/
#define OEM_KONTRON_SUBCOMMAND_ARG_POS 2
#define OEM_KONTRON_IANA_ARG_POS 3
#define OEM_KONTRON_RECORDID_ARG_POS 4
#define OEM_KONTRON_FORMAT_ARG_POS 5
#define OEM_KONTRON_NAME_ARG_POS 6
#define OEM_KONTRON_INSTANCE_ARG_POS 7
#define OEM_KONTRON_VERSION_ARG_POS 8
#define OEM_KONTRON_BUILDDATE_ARG_POS 9
#define OEM_KONTRON_UPDATEDATE_ARG_POS 10
#define OEM_KONTRON_CRC32_ARG_POS 11
#define OEM_KONTRON_FIELD_SIZE 8
#define OEM_KONTRON_VERSION_FIELD_SIZE 10
#ifdef HAVE_PRAGMA_PACK
#pragma pack(1)
#endif
typedef struct OemKontronInformationRecordV0{
uint8_t field1TypeLength;
uint8_t field1[OEM_KONTRON_FIELD_SIZE];
uint8_t field2TypeLength;
uint8_t field2[OEM_KONTRON_FIELD_SIZE];
uint8_t field3TypeLength;
uint8_t field3[OEM_KONTRON_FIELD_SIZE];
uint8_t crcTypeLength;
uint8_t crc32[OEM_KONTRON_FIELD_SIZE];
}tOemKontronInformationRecordV0;
#ifdef HAVE_PRAGMA_PACK
#pragma pack(0)
#endif
#ifdef HAVE_PRAGMA_PACK
#pragma pack(1)
#endif
typedef struct OemKontronInformationRecordV1{
uint8_t field1TypeLength;
uint8_t field1[OEM_KONTRON_VERSION_FIELD_SIZE];
uint8_t field2TypeLength;
uint8_t field2[OEM_KONTRON_FIELD_SIZE];
uint8_t field3TypeLength;
uint8_t field3[OEM_KONTRON_FIELD_SIZE];
uint8_t crcTypeLength;
uint8_t crc32[OEM_KONTRON_FIELD_SIZE];
}tOemKontronInformationRecordV1;
#ifdef HAVE_PRAGMA_PACK
#pragma pack(0)
#endif
/*
./src/ipmitool fru get 0 oem iana 3
*/
static void ipmi_fru_oemkontron_get(int argc,
char ** argv,
uint8_t * fru_data,
int off,
struct fru_multirec_oem_header *oh)
{
static bool badParams = false;
int start = off;
int offset = start;
offset += sizeof(struct fru_multirec_oem_header);
if(!badParams){
/* the 'OEM' field is already checked in caller */
if( argc > OEM_KONTRON_SUBCOMMAND_ARG_POS ){
if(strncmp("oem", argv[OEM_KONTRON_SUBCOMMAND_ARG_POS],3)){
printf("usage: fru get <id> <oem>\n");
badParams = true;
return;
}
}
if( argc<GET_OEM_KONTRON_COMPLETE_ARG_COUNT ){
printf("usage: oem <iana> <recordid>\n");
printf("usage: oem 15000 3\n");
badParams = true;
return;
}
}
if (badParams) {
return;
}
if (oh->record_id != OEM_KONTRON_INFORMATION_RECORD) {
return;
}
uint8_t version;
printf("Kontron OEM Information Record\n");
version = oh->record_version;
uint8_t blockCount;
uint8_t blockIndex = 0;
uint8_t instance = 0;
if (str2uchar(argv[OEM_KONTRON_INSTANCE_ARG_POS], &instance) != 0) {
lprintf(LOG_ERR,
"Instance argument '%s' is either invalid or out of range.",
argv[OEM_KONTRON_INSTANCE_ARG_POS]);
badParams = true;
return;
}
blockCount = fru_data[offset++];
for (blockIndex = 0; blockIndex < blockCount; blockIndex++) {
void *pRecordData;
uint8_t nameLen;
nameLen = (fru_data[offset++] &= 0x3F);
printf(" Name: %*.*s\n", nameLen, nameLen,
(const char *)(fru_data + offset));
offset += nameLen;
pRecordData = &fru_data[offset];
printf(" Record Version: %d\n", version);
if (version == 0) {
printf(" Version: %*.*s\n",
OEM_KONTRON_FIELD_SIZE,
OEM_KONTRON_FIELD_SIZE,
((tOemKontronInformationRecordV0 *)pRecordData)->field1);
printf(" Build Date: %*.*s\n",
OEM_KONTRON_FIELD_SIZE,
OEM_KONTRON_FIELD_SIZE,
((tOemKontronInformationRecordV0 *)pRecordData)->field2);
printf(" Update Date: %*.*s\n",
OEM_KONTRON_FIELD_SIZE,
OEM_KONTRON_FIELD_SIZE,
((tOemKontronInformationRecordV0 *)pRecordData)->field3);
printf(" Checksum: %*.*s\n\n",
OEM_KONTRON_FIELD_SIZE,
OEM_KONTRON_FIELD_SIZE,
((tOemKontronInformationRecordV0 *)pRecordData)->crc32);
offset += sizeof(tOemKontronInformationRecordV0);
offset++;
} else if (version == 1) {
printf(" Version: %*.*s\n",
OEM_KONTRON_VERSION_FIELD_SIZE,
OEM_KONTRON_VERSION_FIELD_SIZE,
((tOemKontronInformationRecordV1 *)pRecordData)->field1);
printf(" Build Date: %*.*s\n",
OEM_KONTRON_FIELD_SIZE,
OEM_KONTRON_FIELD_SIZE,
((tOemKontronInformationRecordV1 *)pRecordData)->field2);
printf(" Update Date: %*.*s\n",
OEM_KONTRON_FIELD_SIZE,
OEM_KONTRON_FIELD_SIZE,
((tOemKontronInformationRecordV1 *)pRecordData)->field3);
printf(" Checksum: %*.*s\n\n",
OEM_KONTRON_FIELD_SIZE,
OEM_KONTRON_FIELD_SIZE,
((tOemKontronInformationRecordV1 *)pRecordData)->crc32);
offset += sizeof(tOemKontronInformationRecordV1);
offset++;
} else {
printf(" Unsupported version %d\n", version);
}
}
}
static
bool
ipmi_fru_oemkontron_edit( int argc, char ** argv,uint8_t * fru_data,
int off,int len,
struct fru_multirec_header *h,
struct fru_multirec_oem_header *oh)
{
static bool badParams=false;
bool hasChanged = false;
int start = off;
int offset = start;
int length = len;
int i;
uint8_t record_id = 0;
offset += sizeof(struct fru_multirec_oem_header);
if(!badParams){
/* the 'OEM' field is already checked in caller */
if( argc > OEM_KONTRON_SUBCOMMAND_ARG_POS ){
if(strncmp("oem", argv[OEM_KONTRON_SUBCOMMAND_ARG_POS],3)){
printf("usage: fru edit <id> <oem> <args...>\n");
badParams = true;
return hasChanged;
}
}
if( argc<EDIT_OEM_KONTRON_COMPLETE_ARG_COUNT ){
printf("usage: oem <iana> <recordid> <format> <args...>\n");
printf("usage: oem 15000 3 0 <name> <instance> <field1>"\
" <field2> <field3> <crc32>\n");
badParams = true;
return hasChanged;
}
if (str2uchar(argv[OEM_KONTRON_RECORDID_ARG_POS], &record_id) != 0) {
lprintf(LOG_ERR,
"Record ID argument '%s' is either invalid or out of range.",
argv[OEM_KONTRON_RECORDID_ARG_POS]);
badParams = true;
return hasChanged;
}
if (record_id == OEM_KONTRON_INFORMATION_RECORD) {
for(i=OEM_KONTRON_VERSION_ARG_POS;i<=OEM_KONTRON_CRC32_ARG_POS;i++){
if( (strlen(argv[i]) != OEM_KONTRON_FIELD_SIZE) &&
(strlen(argv[i]) != OEM_KONTRON_VERSION_FIELD_SIZE)) {
printf("error: version fields must have %d characters\n",
OEM_KONTRON_FIELD_SIZE);
badParams = true;
return hasChanged;
}
}
}
}
if(!badParams){
if(oh->record_id == OEM_KONTRON_INFORMATION_RECORD ) {
uint8_t formatVersion = 0;
uint8_t version;
if (str2uchar(argv[OEM_KONTRON_FORMAT_ARG_POS], &formatVersion) != 0) {
lprintf(LOG_ERR,
"Format argument '%s' is either invalid or out of range.",
argv[OEM_KONTRON_FORMAT_ARG_POS]);
badParams = true;
return hasChanged;
}
printf(" Kontron OEM Information Record\n");
version = oh->record_version;
if( version == formatVersion ){
uint8_t blockCount;
uint8_t blockIndex=0;
uint8_t matchInstance = 0;
uint8_t instance = 0;
if (str2uchar(argv[OEM_KONTRON_INSTANCE_ARG_POS], &instance) != 0) {
lprintf(LOG_ERR,
"Instance argument '%s' is either invalid or out of range.",
argv[OEM_KONTRON_INSTANCE_ARG_POS]);
badParams = true;
return hasChanged;
}
blockCount = fru_data[offset++];
printf(" blockCount: %d\n",blockCount);
for(blockIndex=0;blockIndex<blockCount;blockIndex++){
void * pRecordData;
uint8_t nameLen;
nameLen = ( fru_data[offset++] & 0x3F );
if( version == 0 || version == 1 )
{
if(!strncmp((char *)argv[OEM_KONTRON_NAME_ARG_POS],
(const char *)(fru_data+offset),nameLen)&& (matchInstance == instance)){
printf ("Found : %s\n",argv[OEM_KONTRON_NAME_ARG_POS]);
offset+=nameLen;
pRecordData = &fru_data[offset];
if( version == 0 )
{
memcpy( ((tOemKontronInformationRecordV0 *)
pRecordData)->field1 ,
argv[OEM_KONTRON_VERSION_ARG_POS] ,
OEM_KONTRON_FIELD_SIZE);
memcpy( ((tOemKontronInformationRecordV0 *)
pRecordData)->field2 ,
argv[OEM_KONTRON_BUILDDATE_ARG_POS],
OEM_KONTRON_FIELD_SIZE);
memcpy( ((tOemKontronInformationRecordV0 *)
pRecordData)->field3 ,
argv[OEM_KONTRON_UPDATEDATE_ARG_POS],
OEM_KONTRON_FIELD_SIZE);
memcpy( ((tOemKontronInformationRecordV0 *)
pRecordData)->crc32 ,
argv[OEM_KONTRON_CRC32_ARG_POS] ,
OEM_KONTRON_FIELD_SIZE);
}
else
{
memcpy( ((tOemKontronInformationRecordV1 *)
pRecordData)->field1 ,
argv[OEM_KONTRON_VERSION_ARG_POS] ,
OEM_KONTRON_VERSION_FIELD_SIZE);
memcpy( ((tOemKontronInformationRecordV1 *)
pRecordData)->field2 ,
argv[OEM_KONTRON_BUILDDATE_ARG_POS],
OEM_KONTRON_FIELD_SIZE);
memcpy( ((tOemKontronInformationRecordV1 *)
pRecordData)->field3 ,
argv[OEM_KONTRON_UPDATEDATE_ARG_POS],
OEM_KONTRON_FIELD_SIZE);
memcpy( ((tOemKontronInformationRecordV1 *)
pRecordData)->crc32 ,
argv[OEM_KONTRON_CRC32_ARG_POS] ,
OEM_KONTRON_FIELD_SIZE);
}
matchInstance++;
hasChanged = true;
}
else if(!strncmp((char *)argv[OEM_KONTRON_NAME_ARG_POS],
(const char *)(fru_data+offset), nameLen)){
printf ("Skipped : %s [instance %d]\n",argv[OEM_KONTRON_NAME_ARG_POS],
(unsigned int)matchInstance);
matchInstance++;
offset+=nameLen;
}
else {
offset+=nameLen;
}
if( version == 0 )
{
offset+= sizeof(tOemKontronInformationRecordV0);
}
else
{
offset+= sizeof(tOemKontronInformationRecordV1);
}
offset++;
}
else
{
printf (" Unsupported version %d\n",version);
}
}
}
else{
printf(" Version: %d\n",version);
}
}
if( hasChanged ){
uint8_t record_checksum =0;
uint8_t header_checksum =0;
int index;
lprintf(LOG_DEBUG,"Initial record checksum : %x",h->record_checksum);
lprintf(LOG_DEBUG,"Initial header checksum : %x",h->header_checksum);
for(index=0;index<length;index++){
record_checksum+= fru_data[start+index];
}
/* Update Record checksum */
h->record_checksum = ~record_checksum + 1;
for(index=0;index<(sizeof(struct fru_multirec_header) -1);index++){
uint8_t data= *( (uint8_t *)h+ index);
header_checksum+=data;
}
/* Update header checksum */
h->header_checksum = ~header_checksum + 1;
lprintf(LOG_DEBUG,"Final record checksum : %x",h->record_checksum);
lprintf(LOG_DEBUG,"Final header checksum : %x",h->header_checksum);
/* write back data */
}
}
return hasChanged;
}
/* ipmi_fru_picmg_ext_edit - Query new values to replace original FRU content
*
* @data: FRU data
* @offset: start of the current multi record (start of header)
* @len: len of the current record (excluding header)
* @h: pointer to record header
* @oh: pointer to OEM /PICMG header
*
* returns: TRUE if data changed
* returns: FALSE if data not changed
*/
static
bool
ipmi_fru_picmg_ext_edit(uint8_t * fru_data,
int off,int len,
struct fru_multirec_header *h,
struct fru_multirec_oem_header *oh)
{
bool hasChanged = false;
int start = off;
int offset = start;
int length = len;
offset += sizeof(struct fru_multirec_oem_header);
switch (oh->record_id)
{
case FRU_AMC_ACTIVATION:
printf(" FRU_AMC_ACTIVATION\n");
{
int index=offset;
uint16_t max_current;
max_current = fru_data[offset];
max_current |= fru_data[++offset]<<8;
printf(" Maximum Internal Current(@12V): %.2f A (0x%02x)\n",
(float)max_current / 10.0f, max_current);
if( ipmi_fru_query_new_value(fru_data,index,2) ){
max_current = fru_data[index];
max_current |= fru_data[++index]<<8;
printf(" New Maximum Internal Current(@12V): %.2f A (0x%02x)\n",
(float)max_current / 10.0f, max_current);
hasChanged = true;
}
printf(" Module Activation Readiness: %i sec.\n", fru_data[++offset]);
printf(" Descriptor Count: %i\n", fru_data[++offset]);
printf("\n");
for (++offset;
offset < (off + length);
offset += sizeof(struct fru_picmgext_activation_record)) {
struct fru_picmgext_activation_record * a =
(struct fru_picmgext_activation_record *) &fru_data[offset];
printf(" IPMB-Address: 0x%x\n", a->ibmb_addr);
printf(" Max. Module Current: %.2f A\n", (float)a->max_module_curr / 10.0f);
printf("\n");
}
}
break;
case FRU_AMC_CURRENT:
printf(" FRU_AMC_CURRENT\n");
{
int index=offset;
unsigned char current;
current = fru_data[index];
printf(" Current draw(@12V): %.2f A (0x%02x)\n",
(float)current / 10.0f, current);
if( ipmi_fru_query_new_value(fru_data, index, 1) ){
current = fru_data[index];
printf(" New Current draw(@12V): %.2f A (0x%02x)\n",
(float)current / 10.0f, current);
hasChanged = true;
}
}
break;
}
if( hasChanged ){
uint8_t record_checksum =0;
uint8_t header_checksum =0;
int index;
lprintf(LOG_DEBUG,"Initial record checksum : %x",h->record_checksum);
lprintf(LOG_DEBUG,"Initial header checksum : %x",h->header_checksum);
for(index=0;index<length;index++){
record_checksum+= fru_data[start+index];
}
/* Update Record checksum */
h->record_checksum = ~record_checksum + 1;
for(index=0;index<(sizeof(struct fru_multirec_header) -1);index++){
uint8_t data= *( (uint8_t *)h+ index);
header_checksum+=data;
}
/* Update header checksum */
h->header_checksum = ~header_checksum + 1;
lprintf(LOG_DEBUG,"Final record checksum : %x",h->record_checksum);
lprintf(LOG_DEBUG,"Final header checksum : %x",h->header_checksum);
/* write back data */
}
return hasChanged;
}
/* ipmi_fru_picmg_ext_print - prints OEM fru record (PICMG)
*
* @fru_data: FRU data
* @offset: offset of the bytes to be modified in data
* @length: size of the record
*
* returns : n/a
*/
static void ipmi_fru_picmg_ext_print(uint8_t * fru_data, int off, int length)
{
struct fru_multirec_oem_header *h;
int guid_count;
int offset = off;
int start_offset = off;
int i;
h = (struct fru_multirec_oem_header *) &fru_data[offset];
offset += sizeof(struct fru_multirec_oem_header);
switch (h->record_id)
{
case FRU_PICMG_BACKPLANE_P2P:
{
uint8_t index;
unsigned int data;
struct fru_picmgext_slot_desc *slot_d;
slot_d =
(struct fru_picmgext_slot_desc*)&fru_data[offset];
offset += sizeof(struct fru_picmgext_slot_desc);
printf(" FRU_PICMG_BACKPLANE_P2P\n");
while (offset <= (start_offset+length)) {
printf("\n");
printf(" Channel Type: ");
switch (slot_d->chan_type)
{
case 0x00:
case 0x07:
printf("PICMG 2.9\n");
break;
case 0x08:
printf("Single Port Fabric IF\n");
break;
case 0x09:
printf("Double Port Fabric IF\n");
break;
case 0x0a:
printf("Full Channel Fabric IF\n");
break;
case 0x0b:
printf("Base IF\n");
break;
case 0x0c:
printf("Update Channel IF\n");
break;
case 0x0d:
printf("ShMC Cross Connect\n");
break;
default:
printf("Unknown IF (0x%x)\n",
slot_d->chan_type);
break;
}
printf(" Slot Addr. : %02x\n",
slot_d->slot_addr );
printf(" Channel Count: %i\n",
slot_d->chn_count);
for (index = 0;
index < (slot_d->chn_count);
index++) {
struct fru_picmgext_chn_desc *d;
data = (fru_data[offset+0]) |
(fru_data[offset+1] << 8) |
(fru_data[offset+2] << 16);
d = (struct fru_picmgext_chn_desc *)&data;
if (verbose) {
printf( " "
"Chn: %02x -> "
"Chn: %02x in "
"Slot: %02x\n",
d->local_chn,
d->remote_chn,
d->remote_slot);
}
offset += FRU_PICMGEXT_CHN_DESC_RECORD_SIZE;
}
slot_d = (struct fru_picmgext_slot_desc*)&fru_data[offset];
offset += sizeof(struct fru_picmgext_slot_desc);
}
}
break;
case FRU_PICMG_ADDRESS_TABLE:
{
unsigned int hwaddr;
unsigned int sitetype;
unsigned int sitenum;
unsigned int entries;
unsigned int i;
char *picmg_site_type_strings[] = {
"AdvancedTCA Board",
"Power Entry",
"Shelf FRU Information",
"Dedicated ShMC",
"Fan Tray",
"Fan Filter Tray",
"Alarm",
"AdvancedMC Module",
"PMC",
"Rear Transition Module"};
printf(" FRU_PICMG_ADDRESS_TABLE\n");
printf(" Type/Len: 0x%02x\n", fru_data[offset++]);
printf(" Shelf Addr: ");
for (i=0;i<20;i++) {
printf("0x%02x ", fru_data[offset++]);
}
printf("\n");
entries = fru_data[offset++];
printf(" Addr Table Entries: 0x%02x\n", entries);
for (i=0; i<entries; i++) {
hwaddr = fru_data[offset];
sitenum = fru_data[offset + 1];
sitetype = fru_data[offset + 2];
printf(
" HWAddr: 0x%02x (0x%02x) SiteNum: %d SiteType: 0x%02x %s\n",
hwaddr, hwaddr * 2,
sitenum, sitetype,
(sitetype < 0xa) ?
picmg_site_type_strings[sitetype] :
"Reserved");
offset += 3;
}
}
break;
case FRU_PICMG_SHELF_POWER_DIST:
{
unsigned int entries;
unsigned int feeds;
unsigned int hwaddr;
unsigned int i;
unsigned int id;
unsigned int j;
unsigned int maxext;
unsigned int maxint;
unsigned int minexp;
printf(" FRU_PICMG_SHELF_POWER_DIST\n");
feeds = fru_data[offset++];
printf(" Number of Power Feeds: 0x%02x\n",
feeds);
for (i=0; i<feeds; i++) {
printf(" Feed %d:\n", i);
maxext = fru_data[offset] |
(fru_data[offset+1] << 8);
offset += 2;
maxint = fru_data[offset] |
(fru_data[offset+1] << 8);
offset += 2;
minexp = fru_data[offset];
offset += 1;
entries = fru_data[offset];
offset += 1;
printf(
" Max External Current: %d.%d Amps (0x%04x)\n",
maxext / 10, maxext % 10, maxext);
if (maxint < 0xffff) {
printf(
" Max Internal Current: %d.%d Amps (0x%04x)\n",
maxint / 10, maxint % 10,
maxint);
} else {
printf(
" Max Internal Current: Not Specified\n");
}
if (minexp >= 0x48 && minexp <= 0x90) {
printf(
" Min Expected Voltage: -%02d.%dV\n",
minexp / 2, (minexp % 2) * 5);
} else {
printf(
" Min Expected Voltage: -%dV (actual invalid value 0x%x)\n",
36, minexp);
}
for (j=0; j < entries; j++) {
hwaddr = fru_data[offset++];
id = fru_data[offset++];
printf(
" FRU HW Addr: 0x%02x (0x%02x)",
hwaddr, hwaddr * 2);
printf(
" FRU ID: 0x%02x\n",
id);
}
}
}
break;
case FRU_PICMG_SHELF_ACTIVATION:
{
unsigned int i;
unsigned int count = 0;
printf(" FRU_PICMG_SHELF_ACTIVATION\n");
printf(
" Allowance for FRU Act Readiness: 0x%02x\n",
fru_data[offset++]);
count = fru_data[offset++];
printf(
" FRU activation and Power Desc Cnt: 0x%02x\n",
count);
for (i=0; i<count; i++) {
printf(" HW Addr: 0x%02x ",
fru_data[offset++]);
printf(" FRU ID: 0x%02x ",
fru_data[offset++]);
printf(" Max FRU Power: 0x%04x ",
fru_data[offset+0] |
(fru_data[offset+1]<<8));
offset += 2;
printf(" Config: 0x%02x \n",
fru_data[offset++]);
}
}
break;
case FRU_PICMG_SHMC_IP_CONN:
printf(" FRU_PICMG_SHMC_IP_CONN\n");
break;
case FRU_PICMG_BOARD_P2P:
printf(" FRU_PICMG_BOARD_P2P\n");
guid_count = fru_data[offset++];
printf(" GUID count: %2d\n", guid_count);
for (i = 0 ; i < guid_count; i++ ) {
int j;
printf(" GUID [%2d]: 0x", i);
for (j=0; j < sizeof(struct fru_picmgext_guid);
j++) {
printf("%02x", fru_data[offset+j]);
}
printf("\n");
offset += sizeof(struct fru_picmgext_guid);
}
printf("\n");
for (; offset < off + length;
offset += sizeof(struct fru_picmgext_link_desc)) {
/* to solve little endian /big endian problem */
struct fru_picmgext_link_desc *d;
unsigned int data = (fru_data[offset+0]) |
(fru_data[offset+1] << 8) |
(fru_data[offset+2] << 16) |
(fru_data[offset+3] << 24);
d = (struct fru_picmgext_link_desc *) &data;
printf(" Link Grouping ID: 0x%02x\n",
d->grouping);
printf(" Link Type Extension: 0x%02x - ",
d->ext);
if (d->type == FRU_PICMGEXT_LINK_TYPE_BASE) {
switch (d->ext) {
case 0:
printf("10/100/1000BASE-T Link (four-pair)\n");
break;
case 1:
printf("ShMC Cross-connect (two-pair)\n");
break;
default:
printf("Unknown\n");
break;
}
} else if (d->type == FRU_PICMGEXT_LINK_TYPE_FABRIC_ETHERNET) {
switch (d->ext) {
case 0:
printf("1000Base-BX\n");
break;
case 1:
printf("10GBase-BX4 [XAUI]\n");
break;
case 2:
printf("FC-PI\n");
break;
case 3:
printf("1000Base-KX\n");
break;
case 4:
printf("10GBase-KX4\n");
break;
default:
printf("Unknown\n");
break;
}
} else if (d->type == FRU_PICMGEXT_LINK_TYPE_FABRIC_ETHERNET_10GBD) {
switch (d->ext) {
case 0:
printf("10GBase-KR\n");
break;
case 1:
printf("40GBase-KR4\n");
break;
default:
printf("Unknown\n");
break;
}
} else if (d->type == FRU_PICMGEXT_LINK_TYPE_FABRIC_INFINIBAND) {
printf("Unknown\n");
} else if (d->type == FRU_PICMGEXT_LINK_TYPE_FABRIC_STAR) {
printf("Unknown\n");
} else if (d->type == FRU_PICMGEXT_LINK_TYPE_PCIE) {
printf("Unknown\n");
} else {
printf("Unknown\n");
}
printf(" Link Type: 0x%02x - ",
d->type);
switch (d->type) {
case FRU_PICMGEXT_LINK_TYPE_BASE:
printf("PICMG 3.0 Base Interface 10/100/1000\n");
break;
case FRU_PICMGEXT_LINK_TYPE_FABRIC_ETHERNET:
printf("PICMG 3.1 Ethernet Fabric Interface\n");
printf(" Base signaling Link Class\n");
break;
case FRU_PICMGEXT_LINK_TYPE_FABRIC_INFINIBAND:
printf("PICMG 3.2 Infiniband Fabric Interface\n");
break;
case FRU_PICMGEXT_LINK_TYPE_FABRIC_STAR:
printf("PICMG 3.3 Star Fabric Interface\n");
break;
case FRU_PICMGEXT_LINK_TYPE_PCIE:
printf("PICMG 3.4 PCI Express Fabric Interface\n");
break;
case FRU_PICMGEXT_LINK_TYPE_FABRIC_ETHERNET_10GBD:
printf("PICMG 3.1 Ethernet Fabric Interface\n");
printf(" 10.3125Gbd signaling Link Class\n");
break;
default:
if (d->type == 0 || d->type == 0xff) {
printf("Reserved\n");
} else if (d->type >= 0x06 && d->type <= 0xef) {
printf("Reserved\n");
} else if (d->type >= 0xf0 && d->type <= 0xfe) {
printf("OEM GUID Definition\n");
} else {
printf("Invalid\n");
}
break;
}
printf(" Link Designator: \n");
printf(" Port Flag: 0x%02x\n",
d->desig_port);
printf(" Interface: 0x%02x - ",
d->desig_if);
switch (d->desig_if) {
case FRU_PICMGEXT_DESIGN_IF_BASE:
printf("Base Interface\n");
break;
case FRU_PICMGEXT_DESIGN_IF_FABRIC:
printf("Fabric Interface\n");
break;
case FRU_PICMGEXT_DESIGN_IF_UPDATE_CHANNEL:
printf("Update Channel\n");
break;
case FRU_PICMGEXT_DESIGN_IF_RESERVED:
printf("Reserved\n");
break;
default:
printf("Invalid");
break;
}
printf(" Channel Number: 0x%02x\n",
d->desig_channel);
printf("\n");
}
break;
case FRU_AMC_CURRENT:
{
unsigned char current;
printf(" FRU_AMC_CURRENT\n");
current = fru_data[offset];
printf(" Current draw(@12V): %.2f A [ %.2f Watt ]\n",
(float)current / 10.0f,
(float)current / 10.0f * 12.0f);
printf("\n");
}
break;
case FRU_AMC_ACTIVATION:
printf(" FRU_AMC_ACTIVATION\n");
{
uint16_t max_current;
max_current = fru_data[offset];
max_current |= fru_data[++offset]<<8;
printf(" Maximum Internal Current(@12V): %.2f A [ %.2f Watt ]\n",
(float)max_current / 10.0f,
(float)max_current / 10.0f * 12.0f);
printf(" Module Activation Readiness: %i sec.\n", fru_data[++offset]);
printf(" Descriptor Count: %i\n", fru_data[++offset]);
printf("\n");
for(++offset; offset < off + length;
offset += sizeof(struct fru_picmgext_activation_record))
{
struct fru_picmgext_activation_record *a;
a = (struct fru_picmgext_activation_record *)&fru_data[offset];
printf(" IPMB-Address: 0x%x\n",
a->ibmb_addr);
printf(" Max. Module Current: %.2f A\n",
(float)a->max_module_curr / 10.0f);
printf("\n");
}
}
break;
case FRU_AMC_CARRIER_P2P:
{
uint16_t index;
printf(" FRU_CARRIER_P2P\n");
for(; offset < off + length; ) {
struct fru_picmgext_carrier_p2p_record * h =
(struct fru_picmgext_carrier_p2p_record *)&fru_data[offset];
printf("\n");
printf(" Resource ID: %i",
(h->resource_id & 0x07));
printf(" Type: ");
if ((h->resource_id>>7) == 1) {
printf("AMC\n");
} else {
printf("Local\n");
}
printf(" Descriptor Count: %i\n",
h->p2p_count);
offset += sizeof(struct fru_picmgext_carrier_p2p_record);
for (index = 0; index < h->p2p_count; index++) {
/* to solve little endian /big endian problem */
unsigned char data[3];
struct fru_picmgext_carrier_p2p_descriptor * desc;
# ifndef WORDS_BIGENDIAN
data[0] = fru_data[offset+0];
data[1] = fru_data[offset+1];
data[2] = fru_data[offset+2];
# else
data[0] = fru_data[offset+2];
data[1] = fru_data[offset+1];
data[2] = fru_data[offset+0];
# endif
desc = (struct fru_picmgext_carrier_p2p_descriptor*)&data;
printf(" Port: %02d\t-> Remote Port: %02d\t",
desc->local_port, desc->remote_port);
if ((desc->remote_resource_id >> 7) == 1) {
printf("[ AMC ID: %02d ]\n",
desc->remote_resource_id & 0x0F);
} else {
printf("[ local ID: %02d ]\n",
desc->remote_resource_id & 0x0F);
}
offset += sizeof(struct fru_picmgext_carrier_p2p_descriptor);
}
}
}
break;
case FRU_AMC_P2P:
{
unsigned int index;
unsigned char channel_count;
struct fru_picmgext_amc_p2p_record * h;
printf(" FRU_AMC_P2P\n");
guid_count = fru_data[offset];
printf(" GUID count: %2d\n", guid_count);
for (i = 0 ; i < guid_count; i++) {
int j;
printf(" GUID %2d: ", i);
for (j=0; j < sizeof(struct fru_picmgext_guid);
j++) {
printf("%02x", fru_data[offset+j]);
offset += sizeof(struct fru_picmgext_guid);
printf("\n");
}
h = (struct fru_picmgext_amc_p2p_record *)&fru_data[++offset];
printf(" %s",
(h->record_type ?
"AMC Module:" : "On-Carrier Device"));
printf(" Resource ID: %i\n", h->resource_id);
offset += sizeof(struct fru_picmgext_amc_p2p_record);
channel_count = fru_data[offset++];
printf(" Descriptor Count: %i\n",
channel_count);
for (index = 0; index < channel_count; index++) {
unsigned int data;
struct fru_picmgext_amc_channel_desc_record *d;
/* pack the data in little endian format.
* Stupid intel...
*/
data = fru_data[offset] |
(fru_data[offset + 1] << 8) |
(fru_data[offset + 2] << 16);
d = (struct fru_picmgext_amc_channel_desc_record *)&data;
printf(" Lane 0 Port: %i\n",
d->lane0port);
printf(" Lane 1 Port: %i\n",
d->lane1port);
printf(" Lane 2 Port: %i\n",
d->lane2port);
printf(" Lane 3 Port: %i\n\n",
d->lane3port);
offset += FRU_PICMGEXT_AMC_CHANNEL_DESC_RECORD_SIZE;
}
for (; offset < off + length;) {
unsigned int data[2];
struct fru_picmgext_amc_link_desc_record *l;
l = (struct fru_picmgext_amc_link_desc_record *)&data[0];
data[0] = fru_data[offset] |
(fru_data[offset + 1] << 8) |
(fru_data[offset + 2] << 16) |
(fru_data[offset + 3] << 24);
data[1] = fru_data[offset + 4];
printf( " Link Designator: Channel ID: %i\n"
" Port Flag 0: %s%s%s%s\n",
l->channel_id,
(l->port_flag_0)?"o":"-",
(l->port_flag_1)?"o":"-",
(l->port_flag_2)?"o":"-",
(l->port_flag_3)?"o":"-" );
switch (l->type) {
case FRU_PICMGEXT_AMC_LINK_TYPE_PCIE:
/* AMC.1 */
printf( " Link Type: %02x - "
"AMC.1 PCI Express\n", l->type);
switch (l->type_ext) {
case AMC_LINK_TYPE_EXT_PCIE_G1_NSSC:
printf( " Link Type Ext: %i - "
" Gen 1 capable - non SSC\n",
l->type_ext);
break;
case AMC_LINK_TYPE_EXT_PCIE_G1_SSC:
printf( " Link Type Ext: %i - "
" Gen 1 capable - SSC\n",
l->type_ext);
break;
case AMC_LINK_TYPE_EXT_PCIE_G2_NSSC:
printf( " Link Type Ext: %i - "
" Gen 2 capable - non SSC\n",
l->type_ext);
break;
case AMC_LINK_TYPE_EXT_PCIE_G2_SSC:
printf( " Link Type Ext: %i - "
" Gen 2 capable - SSC\n",
l->type_ext);
break;
default:
printf( " Link Type Ext: %i - "
" Invalid\n",
l->type_ext);
break;
}
break;
case FRU_PICMGEXT_AMC_LINK_TYPE_PCIE_AS1:
case FRU_PICMGEXT_AMC_LINK_TYPE_PCIE_AS2:
/* AMC.1 */
printf( " Link Type: %02x - "
"AMC.1 PCI Express Advanced Switching\n",
l->type);
printf(" Link Type Ext: %i\n",
l->type_ext);
break;
case FRU_PICMGEXT_AMC_LINK_TYPE_ETHERNET:
/* AMC.2 */
printf( " Link Type: %02x - "
"AMC.2 Ethernet\n",
l->type);
switch (l->type_ext) {
case AMC_LINK_TYPE_EXT_ETH_1000_BX:
printf( " Link Type Ext: %i - "
" 1000Base-Bx (SerDES Gigabit) Ethernet Link\n",
l->type_ext);
break;
case AMC_LINK_TYPE_EXT_ETH_10G_XAUI:
printf( " Link Type Ext: %i - "
" 10Gbit XAUI Ethernet Link\n",
l->type_ext);
break;
default:
printf( " Link Type Ext: %i - "
" Invalid\n",
l->type_ext);
break;
}
break;
case FRU_PICMGEXT_AMC_LINK_TYPE_STORAGE:
/* AMC.3 */
printf( " Link Type: %02x - "
"AMC.3 Storage\n",
l->type);
switch (l->type_ext) {
case AMC_LINK_TYPE_EXT_STORAGE_FC:
printf( " Link Type Ext: %i - "
" Fibre Channel\n",
l->type_ext);
break;
case AMC_LINK_TYPE_EXT_STORAGE_SATA:
printf( " Link Type Ext: %i - "
" Serial ATA\n",
l->type_ext);
break;
case AMC_LINK_TYPE_EXT_STORAGE_SAS:
printf( " Link Type Ext: %i - "
" Serial Attached SCSI\n",
l->type_ext);
break;
default:
printf( " Link Type Ext: %i - "
" Invalid\n",
l->type_ext);
break;
}
break;
case FRU_PICMGEXT_AMC_LINK_TYPE_RAPIDIO:
/* AMC.4 */
printf( " Link Type: %02x - "
"AMC.4 Serial Rapid IO\n",
l->type);
printf(" Link Type Ext: %i\n",
l->type_ext);
break;
default:
printf( " Link Type: %02x - "
"reserved or OEM GUID",
l->type);
printf(" Link Type Ext: %i\n",
l->type_ext);
break;
}
printf(" Link group Id: %i\n",
l->group_id);
printf(" Link Asym Match: %i\n\n",
l->asym_match);
offset += FRU_PICMGEXT_AMC_LINK_DESC_RECORD_SIZE;
}
}
}
break;
case FRU_AMC_CARRIER_INFO:
{
unsigned char extVersion;
unsigned char siteCount;
printf(" FRU_CARRIER_INFO\n");
extVersion = fru_data[offset++];
siteCount = fru_data[offset++];
printf(" AMC.0 extension version: R%d.%d\n",
(extVersion >> 0)& 0x0F,
(extVersion >> 4)& 0x0F );
printf(" Carrier Sie Number Cnt: %d\n", siteCount);
for (i = 0 ; i < siteCount; i++ ){
printf(" Site ID: %i \n", fru_data[offset++]);
}
printf("\n");
}
break;
case FRU_PICMG_CLK_CARRIER_P2P:
{
unsigned char desc_count;
int i,j;
printf(" FRU_PICMG_CLK_CARRIER_P2P\n");
desc_count = fru_data[offset++];
for(i=0; i<desc_count; i++){
unsigned char resource_id;
unsigned char channel_count;
resource_id = fru_data[offset++];
channel_count = fru_data[offset++];
printf("\n");
printf(" Clock Resource ID: 0x%02x Type: ", resource_id);
if((resource_id & 0xC0)>>6 == 0) {printf("On-Carrier-Device\n");}
else if((resource_id & 0xC0)>>6 == 1) {printf("AMC slot\n");}
else if((resource_id & 0xC0)>>6 == 2) {printf("Backplane\n");}
else{ printf("reserved\n");}
printf(" Channel Count: 0x%02x\n", channel_count);
for(j=0; j<channel_count; j++){
unsigned char loc_channel, rem_channel, rem_resource;
loc_channel = fru_data[offset++];
rem_channel = fru_data[offset++];
rem_resource = fru_data[offset++];
printf(" CLK-ID: 0x%02x ->", loc_channel);
printf(" remote CLKID: 0x%02x ", rem_channel);
if((rem_resource & 0xC0)>>6 == 0) {printf("[ Carrier-Dev");}
else if((rem_resource & 0xC0)>>6 == 1) {printf("[ AMC slot ");}
else if((rem_resource & 0xC0)>>6 == 2) {printf("[ Backplane ");}
else{ printf("reserved ");}
printf(" 0x%02x ]\n", rem_resource&0xF);
}
}
printf("\n");
}
break;
case FRU_PICMG_CLK_CONFIG:
{
unsigned char resource_id, descr_count;
int i,j;
printf(" FRU_PICMG_CLK_CONFIG\n");
resource_id = fru_data[offset++];
descr_count = fru_data[offset++];
printf("\n");
printf(" Clock Resource ID: 0x%02x\n", resource_id);
printf(" Descr. Count: 0x%02x\n", descr_count);
for(i=0; i<descr_count; i++){
unsigned char channel_id, control;
unsigned char indirect_cnt, direct_cnt;
channel_id = fru_data[offset++];
control = fru_data[offset++];
printf(" CLK-ID: 0x%02x - ", channel_id);
printf("CTRL 0x%02x [ %12s ]\n",
control,
((control&0x1)==0)?"Carrier IPMC":"Application");
indirect_cnt = fru_data[offset++];
direct_cnt = fru_data[offset++];
printf(" Cnt: Indirect 0x%02x / Direct 0x%02x\n",
indirect_cnt,
direct_cnt);
/* indirect desc */
for(j=0; j<indirect_cnt; j++){
unsigned char feature;
unsigned char dep_chn_id;
feature = fru_data[offset++];
dep_chn_id = fru_data[offset++];
printf(" Feature: 0x%02x [%8s] - ", feature, (feature&0x1)==1?"Source":"Receiver");
printf(" Dep. CLK-ID: 0x%02x\n", dep_chn_id);
}
/* direct desc */
for(j=0; j<direct_cnt; j++){
unsigned char feature, family, accuracy;
unsigned int freq, min_freq, max_freq;
feature = fru_data[offset++];
family = fru_data[offset++];
accuracy = fru_data[offset++];
freq = (fru_data[offset+0] << 0 ) | (fru_data[offset+1] << 8 )
| (fru_data[offset+2] << 16) | (fru_data[offset+3] << 24);
offset += 4;
min_freq = (fru_data[offset+0] << 0 ) | (fru_data[offset+1] << 8 )
| (fru_data[offset+2] << 16) | (fru_data[offset+3] << 24);
offset += 4;
max_freq = (fru_data[offset+0] << 0 ) | (fru_data[offset+1] << 8 )
| (fru_data[offset+2] << 16) | (fru_data[offset+3] << 24);
offset += 4;
printf(" - Feature: 0x%02x - PLL: %x / Asym: %s\n",
feature,
(feature > 1) & 1,
(feature&1)?"Source":"Receiver");
printf(" Family: 0x%02x - AccLVL: 0x%02x\n", family, accuracy);
printf(" FRQ: %-9ld - min: %-9ld - max: %-9ld\n",
freq, min_freq, max_freq);
}
printf("\n");
}
printf("\n");
}
break;
case FRU_UTCA_FRU_INFO_TABLE:
case FRU_UTCA_CARRIER_MNG_IP:
case FRU_UTCA_CARRIER_INFO:
case FRU_UTCA_CARRIER_LOCATION:
case FRU_UTCA_SHMC_IP_LINK:
case FRU_UTCA_POWER_POLICY:
case FRU_UTCA_ACTIVATION:
case FRU_UTCA_PM_CAPABILTY:
case FRU_UTCA_FAN_GEOGRAPHY:
case FRU_UTCA_CLOCK_MAPPING:
case FRU_UTCA_MSG_BRIDGE_POLICY:
case FRU_UTCA_OEM_MODULE_DESC:
printf(" Not implemented yet. uTCA specific record found!!\n");
printf(" - Record ID: 0x%02x\n", h->record_id);
break;
default:
printf(" Unknown OEM Extension Record ID: %x\n", h->record_id);
break;
}
}
/* __ipmi_fru_print - Do actual work to print a FRU by its ID
*
* @intf: ipmi interface
* @id: fru id
*
* returns -1 on error
* returns 0 if successful
* returns 1 if device not present
*/
static int
__ipmi_fru_print(struct ipmi_intf * intf, uint8_t id)
{
struct ipmi_rs * rsp;
struct ipmi_rq req;
struct fru_info fru;
struct fru_header header;
uint8_t msg_data[4];
memset(&fru, 0, sizeof(struct fru_info));
memset(&header, 0, sizeof(struct fru_header));
/*
* get info about this FRU
*/
memset(msg_data, 0, 4);
msg_data[0] = id;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
printf(" Device not present (No Response)\n");
return -1;
}
if (rsp->ccode) {
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
return -1;
}
memset(&fru, 0, sizeof(fru));
fru.size = (rsp->data[1] << 8) | rsp->data[0];
fru.access = rsp->data[2] & 0x1;
lprintf(LOG_DEBUG, "fru.size = %d bytes (accessed by %s)",
fru.size, fru.access ? "words" : "bytes");
if (fru.size < 1) {
lprintf(LOG_ERR, " Invalid FRU size %d", fru.size);
return -1;
}
/*
* retrieve the FRU header
*/
msg_data[0] = id;
msg_data[1] = 0;
msg_data[2] = 0;
msg_data[3] = 8;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_DATA;
req.msg.data = msg_data;
req.msg.data_len = 4;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
printf(" Device not present (No Response)\n");
return 1;
}
if (rsp->ccode) {
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
return 1;
}
if (verbose > 1)
printbuf(rsp->data, rsp->data_len, "FRU DATA");
memcpy(&header, rsp->data + 1, 8);
if (header.version != 1) {
lprintf(LOG_ERR, " Unknown FRU header version 0x%02x",
header.version);
return -1;
}
/* offsets need converted to bytes
* but that conversion is not done to the structure
* because we may end up with offset > 255
* which would overflow our 1-byte offset field */
lprintf(LOG_DEBUG, "fru.header.version: 0x%x",
header.version);
lprintf(LOG_DEBUG, "fru.header.offset.internal: 0x%x",
header.offset.internal * 8);
lprintf(LOG_DEBUG, "fru.header.offset.chassis: 0x%x",
header.offset.chassis * 8);
lprintf(LOG_DEBUG, "fru.header.offset.board: 0x%x",
header.offset.board * 8);
lprintf(LOG_DEBUG, "fru.header.offset.product: 0x%x",
header.offset.product * 8);
lprintf(LOG_DEBUG, "fru.header.offset.multi: 0x%x",
header.offset.multi * 8);
/*
* rather than reading the entire part
* only read the areas we'll format
*/
/* chassis area */
if ((header.offset.chassis*8) >= sizeof(struct fru_header))
fru_area_print_chassis(intf, &fru, id, header.offset.chassis*8);
/* board area */
if ((header.offset.board*8) >= sizeof(struct fru_header))
fru_area_print_board(intf, &fru, id, header.offset.board*8);
/* product area */
if ((header.offset.product*8) >= sizeof(struct fru_header))
fru_area_print_product(intf, &fru, id, header.offset.product*8);
/* multirecord area */
if( verbose==0 ) /* scipp parsing multirecord */
return 0;
if ((header.offset.multi*8) >= sizeof(struct fru_header))
fru_area_print_multirec(intf, &fru, id, header.offset.multi*8);
return 0;
}
/* ipmi_fru_print - Print a FRU from its SDR locator record
*
* @intf: ipmi interface
* @fru: SDR FRU Locator Record
*
* returns -1 on error
*/
int
ipmi_fru_print(struct ipmi_intf * intf, struct sdr_record_fru_locator * fru)
{
char desc[17];
uint8_t bridged_request = 0;
uint32_t save_addr;
uint32_t save_channel;
int rc = 0;
if (!fru)
return __ipmi_fru_print(intf, 0);
/* Logical FRU Device
* dev_type == 0x10
* modifier
* 0x00 = IPMI FRU Inventory
* 0x01 = DIMM Memory ID
* 0x02 = IPMI FRU Inventory
* 0x03 = System Processor FRU
* 0xff = unspecified
*
* EEPROM 24C01 or equivalent
* dev_type >= 0x08 && dev_type <= 0x0f
* modifier
* 0x00 = unspecified
* 0x01 = DIMM Memory ID
* 0x02 = IPMI FRU Inventory
* 0x03 = System Processor Cartridge
*/
if (fru->dev_type != 0x10 &&
(fru->dev_type_modifier != 0x02 ||
fru->dev_type < 0x08 || fru->dev_type > 0x0f))
return -1;
if (fru->dev_slave_addr == IPMI_BMC_SLAVE_ADDR &&
fru->device_id == 0)
return 0;
memset(desc, 0, sizeof(desc));
memcpy(desc, fru->id_string, fru->id_code & 0x01f);
desc[fru->id_code & 0x01f] = 0;
printf("FRU Device Description : %s (ID %d)\n", desc, fru->device_id);
switch (fru->dev_type_modifier) {
case 0x00:
case 0x02:
if (BRIDGE_TO_SENSOR(intf, fru->dev_slave_addr,
fru->channel_num)) {
bridged_request = 1;
save_addr = intf->target_addr;
intf->target_addr = fru->dev_slave_addr;
save_channel = intf->target_channel;
intf->target_channel = fru->channel_num;
}
/* print FRU */
rc = __ipmi_fru_print(intf, fru->device_id);
if (bridged_request) {
intf->target_addr = save_addr;
intf->target_channel = save_channel;
}
break;
case 0x01:
rc = ipmi_spd_print_fru(intf, fru->device_id);
break;
default:
if (verbose)
printf(" Unsupported device 0x%02x "
"type 0x%02x with modifier 0x%02x\n",
fru->device_id, fru->dev_type,
fru->dev_type_modifier);
else
printf(" Unsupported device\n");
}
printf("\n");
return rc;
}
/* ipmi_fru_print_all - Print builtin FRU + SDR FRU Locator records
*
* @intf: ipmi interface
*
* returns -1 on error
*/
static int
ipmi_fru_print_all(struct ipmi_intf * intf)
{
struct ipmi_sdr_iterator * itr;
struct sdr_get_rs * header;
struct sdr_record_fru_locator * fru;
int rc;
struct ipmi_rs * rsp;
struct ipmi_rq req;
struct ipm_devid_rsp *devid;
struct sdr_record_mc_locator * mc;
uint32_t save_addr;
printf("FRU Device Description : Builtin FRU Device (ID 0)\n");
/* TODO: Figure out if FRU device 0 may show up in SDR records. */
/* Do a Get Device ID command to determine device support */
memset (&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_APP;
req.msg.cmd = BMC_GET_DEVICE_ID;
req.msg.data_len = 0;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
lprintf(LOG_ERR, "Get Device ID command failed");
return -1;
}
if (rsp->ccode) {
lprintf(LOG_ERR, "Get Device ID command failed: %s",
val2str(rsp->ccode, completion_code_vals));
return -1;
}
devid = (struct ipm_devid_rsp *) rsp->data;
/* Check the FRU inventory device bit to decide whether various */
/* FRU commands can be issued to FRU device #0 LUN 0 */
if (devid->adtl_device_support & 0x08) { /* FRU Inventory Device bit? */
rc = ipmi_fru_print(intf, NULL);
printf("\n");
}
itr = ipmi_sdr_start(intf, 0);
if (!itr)
return -1;
/* Walk the SDRs looking for FRU Devices and Management Controller Devices. */
/* For FRU devices, print the FRU from the SDR locator record. */
/* For MC devices, issue FRU commands to the satellite controller to print */
/* FRU data. */
while ((header = ipmi_sdr_get_next_header(intf, itr)))
{
if (header->type == SDR_RECORD_TYPE_MC_DEVICE_LOCATOR ) {
/* Check the capabilities of the Management Controller Device */
mc = (struct sdr_record_mc_locator *)
ipmi_sdr_get_record(intf, header, itr);
/* Does this MC device support FRU inventory device? */
if (mc && (mc->dev_support & 0x08) && /* FRU inventory device? */
intf->target_addr != mc->dev_slave_addr) {
/* Yes. Prepare to issue FRU commands to FRU device #0 LUN 0 */
/* using the slave address specified in the MC record. */
/* save current target address */
save_addr = intf->target_addr;
/* set new target address to satellite controller */
intf->target_addr = mc->dev_slave_addr;
printf("FRU Device Description : %-16s\n", mc->id_string);
/* print the FRU by issuing FRU commands to the satellite */
/* controller. */
rc = __ipmi_fru_print(intf, 0);
printf("\n");
/* restore previous target */
intf->target_addr = save_addr;
}
free_n(&mc);
continue;
}
if (header->type != SDR_RECORD_TYPE_FRU_DEVICE_LOCATOR)
continue;
/* Print the FRU from the SDR locator record. */
fru = (struct sdr_record_fru_locator *)
ipmi_sdr_get_record(intf, header, itr);
if (!fru || !fru->logical) {
free_n(&fru);
continue;
}
rc = ipmi_fru_print(intf, fru);
free_n(&fru);
}
ipmi_sdr_end(itr);
return rc;
}
/* ipmi_fru_read_help() - print help text for 'read'
*
* returns void
*/
void
ipmi_fru_read_help()
{
lprintf(LOG_NOTICE, "fru read <fru id> <fru file>");
lprintf(LOG_NOTICE, "Note: FRU ID and file(incl. full path) must be specified.");
lprintf(LOG_NOTICE, "Example: ipmitool fru read 0 /root/fru.bin");
} /* ipmi_fru_read_help() */
static void
ipmi_fru_read_to_bin(struct ipmi_intf * intf,
char * pFileName,
uint8_t fruId)
{
struct ipmi_rs * rsp;
struct ipmi_rq req;
struct fru_info fru;
uint8_t msg_data[4];
uint8_t * pFruBuf;
msg_data[0] = fruId;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp)
return;
if (rsp->ccode) {
if (rsp->ccode == IPMI_CC_TIMEOUT)
printf (" Timeout accessing FRU info. (Device not present?)\n");
return;
}
memset(&fru, 0, sizeof(fru));
fru.size = (rsp->data[1] << 8) | rsp->data[0];
fru.access = rsp->data[2] & 0x1;
if (verbose) {
printf("Fru Size = %d bytes\n",fru.size);
printf("Fru Access = %xh\n", fru.access);
}
pFruBuf = malloc(fru.size);
if (pFruBuf) {
printf("Fru Size : %d bytes\n",fru.size);
read_fru_area(intf, &fru, fruId, 0, fru.size, pFruBuf);
} else {
lprintf(LOG_ERR, "Cannot allocate %d bytes\n", fru.size);
return;
}
if(pFruBuf)
{
FILE * pFile;
pFile = fopen(pFileName,"wb");
if (pFile) {
fwrite(pFruBuf, fru.size, 1, pFile);
printf("Done\n");
} else {
lprintf(LOG_ERR, "Error opening file %s\n", pFileName);
free_n(&pFruBuf);
return;
}
fclose(pFile);
}
free_n(&pFruBuf);
}
static void
ipmi_fru_write_from_bin(struct ipmi_intf * intf,
char * pFileName,
uint8_t fruId)
{
struct ipmi_rs *rsp;
struct ipmi_rq req;
struct fru_info fru;
uint8_t msg_data[4];
uint8_t *pFruBuf;
uint16_t len = 0;
FILE *pFile;
msg_data[0] = fruId;
memset(&req, 0, sizeof (req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp)
return;
if (rsp->ccode) {
if (rsp->ccode == IPMI_CC_TIMEOUT)
printf(" Timeout accessing FRU info. (Device not present?)\n");
return;
}
memset(&fru, 0, sizeof(fru));
fru.size = (rsp->data[1] << 8) | rsp->data[0];
fru.access = rsp->data[2] & 0x1;
if (verbose) {
printf("Fru Size = %d bytes\n", fru.size);
printf("Fru Access = %xh\n", fru.access);
}
pFruBuf = malloc(fru.size);
if (!pFruBuf) {
lprintf(LOG_ERR, "Cannot allocate %d bytes\n", fru.size);
return;
}
pFile = fopen(pFileName, "rb");
if (pFile) {
len = fread(pFruBuf, 1, fru.size, pFile);
printf("Fru Size : %d bytes\n", fru.size);
printf("Size to Write : %d bytes\n", len);
fclose(pFile);
} else {
lprintf(LOG_ERR, "Error opening file %s\n", pFileName);
}
if (len != 0) {
write_fru_area(intf, &fru, fruId,0, 0, len, pFruBuf);
lprintf(LOG_INFO,"Done");
}
free_n(&pFruBuf);
}
/* ipmi_fru_write_help() - print help text for 'write'
*
* returns void
*/
void
ipmi_fru_write_help()
{
lprintf(LOG_NOTICE, "fru write <fru id> <fru file>");
lprintf(LOG_NOTICE, "Note: FRU ID and file(incl. full path) must be specified.");
lprintf(LOG_NOTICE, "Example: ipmitool fru write 0 /root/fru.bin");
} /* ipmi_fru_write_help() */
/* ipmi_fru_edit_help - print help text for 'fru edit' command
*
* returns void
*/
void
ipmi_fru_edit_help()
{
lprintf(LOG_NOTICE,
"fru edit <fruid> field <section> <index> <string> - edit FRU string");
lprintf(LOG_NOTICE,
"fru edit <fruid> oem iana <record> <format> <args> - limited OEM support");
} /* ipmi_fru_edit_help() */
/* ipmi_fru_edit_multirec - Query new values to replace original FRU content
*
* @intf: interface to use
* @id: FRU id to work on
*
* returns: nothing
*/
/* Work in progress, copy paste most of the stuff for other functions in this
file ... not elegant yet */
static int
ipmi_fru_edit_multirec(struct ipmi_intf * intf, uint8_t id ,
int argc, char ** argv)
{
struct ipmi_rs * rsp;
struct ipmi_rq req;
struct fru_info fru;
struct fru_header header;
uint8_t msg_data[4];
uint16_t retStatus = 0;
uint32_t offFruMultiRec;
uint32_t fruMultiRecSize = 0;
struct fru_info fruInfo;
retStatus = ipmi_fru_get_multirec_location_from_fru(intf, id, &fruInfo,
&offFruMultiRec,
&fruMultiRecSize);
if (retStatus != 0) {
return retStatus;
}
lprintf(LOG_DEBUG, "FRU Size : %lu\n", fruMultiRecSize);
lprintf(LOG_DEBUG, "Multi Rec offset: %lu\n", offFruMultiRec);
{
memset(&fru, 0, sizeof(struct fru_info));
memset(&header, 0, sizeof(struct fru_header));
/*
* get info about this FRU
*/
memset(msg_data, 0, 4);
msg_data[0] = id;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
printf(" Device not present (No Response)\n");
return -1;
}
if (rsp->ccode) {
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
return -1;
}
memset(&fru, 0, sizeof(fru));
fru.size = (rsp->data[1] << 8) | rsp->data[0];
fru.access = rsp->data[2] & 0x1;
lprintf(LOG_DEBUG, "fru.size = %d bytes (accessed by %s)",
fru.size, fru.access ? "words" : "bytes");
if (fru.size < 1) {
lprintf(LOG_ERR, " Invalid FRU size %d", fru.size);
return -1;
}
}
{
uint8_t * fru_data;
uint32_t i;
uint32_t offset= offFruMultiRec;
struct fru_multirec_header * h;
uint32_t last_off, len;
uint8_t error=0;
i = last_off = offset;
memset(&fru, 0, sizeof(fru));
fru_data = malloc(fru.size + 1);
if (!fru_data) {
lprintf(LOG_ERR, " Out of memory!");
return -1;
}
memset(fru_data, 0, fru.size + 1);
do {
h = (struct fru_multirec_header *) (fru_data + i);
/* read area in (at most) FRU_MULTIREC_CHUNK_SIZE bytes at a time */
if ((last_off < (i + sizeof(*h))) || (last_off < (i + h->len)))
{
len = fru.size - last_off;
if (len > FRU_MULTIREC_CHUNK_SIZE)
len = FRU_MULTIREC_CHUNK_SIZE;
if (read_fru_area(intf, &fru, id, last_off, len, fru_data) < 0)
break;
last_off += len;
}
if( h->type == FRU_RECORD_TYPE_OEM_EXTENSION ){
struct fru_multirec_oem_header *oh=(struct fru_multirec_oem_header *)
&fru_data[i + sizeof(struct fru_multirec_header)];
uint32_t iana = oh->mfg_id[0] | oh->mfg_id[1]<<8 | oh->mfg_id[2]<<16;
uint32_t suppliedIana = 0 ;
/* Now makes sure this is really PICMG record */
/* Default to PICMG for backward compatibility */
if( argc <=2 ) {
suppliedIana = IPMI_OEM_PICMG;
} else {
if( !strncmp( argv[2] , "oem" , 3 )) {
/* Expect IANA number next */
if( argc <= 3 ) {
lprintf(LOG_ERR, "oem iana <record> <format> [<args>]");
error = 1;
} else {
if (str2uint(argv[3], &suppliedIana) == 0) {
lprintf(LOG_DEBUG,
"using iana: %d",
suppliedIana);
} else {
lprintf(LOG_ERR,
"Given IANA '%s' is invalid.",
argv[3]);
error = 1;
}
}
}
}
if( suppliedIana == iana ) {
lprintf(LOG_DEBUG, "Matching record found" );
if( iana == IPMI_OEM_PICMG ){
if( ipmi_fru_picmg_ext_edit(fru_data,
i + sizeof(struct fru_multirec_header),
h->len, h, oh )){
/* The fru changed */
write_fru_area(intf,&fru,id, i,i,
h->len+ sizeof(struct fru_multirec_header), fru_data);
}
}
else if( iana == IPMI_OEM_KONTRON ) {
if( ipmi_fru_oemkontron_edit( argc,argv,fru_data,
i + sizeof(struct fru_multirec_header),
h->len, h, oh )){
/* The fru changed */
write_fru_area(intf,&fru,id, i,i,
h->len+ sizeof(struct fru_multirec_header), fru_data);
}
}
/* FIXME: Add OEM record support here */
else{
printf(" OEM IANA (%s) Record not support in this mode\n",
val2str( iana, ipmi_oem_info));
error = 1;
}
}
}
i += h->len + sizeof (struct fru_multirec_header);
} while (!(h->format & 0x80) && (error != 1));
free_n(&fru_data);
}
return 0;
}
/* ipmi_fru_get_help - print help text for 'fru get'
*
* returns void
*/
void
ipmi_fru_get_help()
{
lprintf(LOG_NOTICE,
"fru get <fruid> oem iana <record> <format> <args> - limited OEM support");
} /* ipmi_fru_get_help() */
void
ipmi_fru_internaluse_help()
{
lprintf(LOG_NOTICE,
"fru internaluse <fru id> info - get internal use area size");
lprintf(LOG_NOTICE,
"fru internaluse <fru id> print - print internal use area in hex");
lprintf(LOG_NOTICE,
"fru internaluse <fru id> read <fru file> - read internal use area to file");
lprintf(LOG_NOTICE,
"fru internaluse <fru id> write <fru file> - write internal use area from file");
} /* void ipmi_fru_internaluse_help() */
/* ipmi_fru_get_multirec - Query new values to replace original FRU content
*
* @intf: interface to use
* @id: FRU id to work on
*
* returns: nothing
*/
/* Work in progress, copy paste most of the stuff for other functions in this
file ... not elegant yet */
static int
ipmi_fru_get_multirec(struct ipmi_intf * intf, uint8_t id ,
int argc, char ** argv)
{
struct ipmi_rs * rsp;
struct ipmi_rq req;
struct fru_info fru;
struct fru_header header;
uint8_t msg_data[4];
uint16_t retStatus = 0;
uint32_t offFruMultiRec;
uint32_t fruMultiRecSize = 0;
struct fru_info fruInfo;
retStatus = ipmi_fru_get_multirec_location_from_fru(intf, id, &fruInfo,
&offFruMultiRec,
&fruMultiRecSize);
if (retStatus != 0) {
return retStatus;
}
lprintf(LOG_DEBUG, "FRU Size : %lu\n", fruMultiRecSize);
lprintf(LOG_DEBUG, "Multi Rec offset: %lu\n", offFruMultiRec);
{
memset(&fru, 0, sizeof(struct fru_info));
memset(&header, 0, sizeof(struct fru_header));
/*
* get info about this FRU
*/
memset(msg_data, 0, 4);
msg_data[0] = id;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
printf(" Device not present (No Response)\n");
return -1;
}
if (rsp->ccode) {
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
return -1;
}
memset(&fru, 0, sizeof(fru));
fru.size = (rsp->data[1] << 8) | rsp->data[0];
fru.access = rsp->data[2] & 0x1;
lprintf(LOG_DEBUG, "fru.size = %d bytes (accessed by %s)",
fru.size, fru.access ? "words" : "bytes");
if (fru.size < 1) {
lprintf(LOG_ERR, " Invalid FRU size %d", fru.size);
return -1;
}
}
{
uint8_t * fru_data;
uint32_t i;
uint32_t offset= offFruMultiRec;
struct fru_multirec_header * h;
uint32_t last_off, len;
uint8_t error=0;
i = last_off = offset;
fru_data = malloc(fru.size + 1);
if (!fru_data) {
lprintf(LOG_ERR, " Out of memory!");
return -1;
}
memset(fru_data, 0, fru.size + 1);
do {
h = (struct fru_multirec_header *) (fru_data + i);
/* read area in (at most) FRU_MULTIREC_CHUNK_SIZE bytes at a time */
if ((last_off < (i + sizeof(*h))) || (last_off < (i + h->len)))
{
len = fru.size - last_off;
if (len > FRU_MULTIREC_CHUNK_SIZE)
len = FRU_MULTIREC_CHUNK_SIZE;
if (read_fru_area(intf, &fru, id, last_off, len, fru_data) < 0)
break;
last_off += len;
}
if( h->type == FRU_RECORD_TYPE_OEM_EXTENSION ){
struct fru_multirec_oem_header *oh=(struct fru_multirec_oem_header *)
&fru_data[i + sizeof(struct fru_multirec_header)];
uint32_t iana = oh->mfg_id[0] | oh->mfg_id[1]<<8 | oh->mfg_id[2]<<16;
uint32_t suppliedIana = 0 ;
/* Now makes sure this is really PICMG record */
if( !strncmp( argv[2] , "oem" , 3 )) {
/* Expect IANA number next */
if( argc <= 3 ) {
lprintf(LOG_ERR, "oem iana <record> <format>");
error = 1;
} else {
if (str2uint(argv[3], &suppliedIana) == 0) {
lprintf(LOG_DEBUG,
"using iana: %d",
suppliedIana);
} else {
lprintf(LOG_ERR,
"Given IANA '%s' is invalid.",
argv[3]);
error = 1;
}
}
}
if( suppliedIana == iana ) {
lprintf(LOG_DEBUG, "Matching record found" );
if( iana == IPMI_OEM_KONTRON ) {
ipmi_fru_oemkontron_get(argc, argv, fru_data,
i + sizeof(struct fru_multirec_header),
oh);
}
/* FIXME: Add OEM record support here */
else{
printf(" OEM IANA (%s) Record not supported in this mode\n",
val2str( iana, ipmi_oem_info));
error = 1;
}
}
}
i += h->len + sizeof (struct fru_multirec_header);
} while (!(h->format & 0x80) && (error != 1));
free_n(&fru_data);
}
return 0;
}
#define ERR_EXIT do { rc = -1; goto exit; } while(0)
static
int
ipmi_fru_upg_ekeying(struct ipmi_intf *intf, char *pFileName, uint8_t fruId)
{
struct fru_info fruInfo = {0};
uint8_t *buf = NULL;
uint32_t offFruMultiRec = 0;
uint32_t fruMultiRecSize = 0;
uint32_t offFileMultiRec = 0;
uint32_t fileMultiRecSize = 0;
int rc = 0;
if (!pFileName) {
lprintf(LOG_ERR, "File expected, but none given.");
ERR_EXIT;
}
if (ipmi_fru_get_multirec_location_from_fru(intf, fruId, &fruInfo,
&offFruMultiRec, &fruMultiRecSize) != 0) {
lprintf(LOG_ERR, "Failed to get multirec location from FRU.");
ERR_EXIT;
}
lprintf(LOG_DEBUG, "FRU Size : %lu\n", fruMultiRecSize);
lprintf(LOG_DEBUG, "Multi Rec offset: %lu\n", offFruMultiRec);
if (ipmi_fru_get_multirec_size_from_file(pFileName, &fileMultiRecSize,
&offFileMultiRec) != 0) {
lprintf(LOG_ERR, "Failed to get multirec size from file '%s'.", pFileName);
ERR_EXIT;
}
buf = malloc(fileMultiRecSize);
if (!buf) {
lprintf(LOG_ERR, "ipmitool: malloc failure");
ERR_EXIT;
}
if (ipmi_fru_get_multirec_from_file(pFileName, buf, fileMultiRecSize,
offFileMultiRec) != 0) {
lprintf(LOG_ERR, "Failed to get multirec from file '%s'.", pFileName);
ERR_EXIT;
}
if (ipmi_fru_get_adjust_size_from_buffer(buf, &fileMultiRecSize) != 0) {
lprintf(LOG_ERR, "Failed to adjust size from buffer.");
ERR_EXIT;
}
if (write_fru_area(intf, &fruInfo, fruId, 0, offFruMultiRec,
fileMultiRecSize, buf) != 0) {
lprintf(LOG_ERR, "Failed to write FRU area.");
ERR_EXIT;
}
lprintf(LOG_INFO, "Done upgrading Ekey.");
exit:
free_n(&buf);
return rc;
}
/* ipmi_fru_upgekey_help - print help text for 'upgEkey'
*
* returns void
*/
void
ipmi_fru_upgekey_help()
{
lprintf(LOG_NOTICE, "fru upgEkey <fru id> <fru file>");
lprintf(LOG_NOTICE, "Note: FRU ID and file(incl. full path) must be specified.");
lprintf(LOG_NOTICE, "Example: ipmitool fru upgEkey 0 /root/fru.bin");
} /* ipmi_fru_upgekey_help() */
static int
ipmi_fru_get_multirec_size_from_file(char * pFileName,
uint32_t * pSize,
uint32_t * pOffset)
{
struct fru_header header;
FILE * pFile;
uint8_t len = 0;
uint32_t end = 0;
*pSize = 0;
pFile = fopen(pFileName,"rb");
if (pFile) {
rewind(pFile);
len = fread(&header, 1, 8, pFile);
fseek(pFile, 0, SEEK_END);
end = ftell(pFile);
fclose(pFile);
}
lprintf(LOG_DEBUG, "File Size = %lu\n", end);
lprintf(LOG_DEBUG, "Len = %u\n", len);
if (len != 8) {
printf("Error with file %s in getting size\n", pFileName);
return -1;
}
if (header.version != 0x01) {
printf ("Unknown FRU header version %02x.\n", header.version);
return -1;
}
/* Retrieve length */
if (((header.offset.internal * 8) > (header.offset.internal * 8)) &&
((header.offset.internal * 8) < end))
end = (header.offset.internal * 8);
if (((header.offset.chassis * 8) > (header.offset.chassis * 8)) &&
((header.offset.chassis * 8) < end))
end = (header.offset.chassis * 8);
if (((header.offset.board * 8) > (header.offset.board * 8)) &&
((header.offset.board * 8) < end))
end = (header.offset.board * 8);
if (((header.offset.product * 8) > (header.offset.product * 8)) &&
((header.offset.product * 8) < end))
end = (header.offset.product * 8);
*pSize = end - (header.offset.multi * 8);
*pOffset = (header.offset.multi * 8);
return 0;
}
int
ipmi_fru_get_adjust_size_from_buffer(uint8_t * fru_data, uint32_t *pSize)
{
struct fru_multirec_header * head;
int status = 0;
uint8_t checksum = 0;
uint8_t counter = 0;
uint16_t count = 0;
do {
checksum = 0;
head = (struct fru_multirec_header *) (fru_data + count);
if (verbose) {
printf("Adding (");
}
for (counter = 0; counter < sizeof(struct fru_multirec_header); counter++) {
if (verbose) {
printf(" %02X", *(fru_data + count + counter));
}
checksum += *(fru_data + count + counter);
}
if (verbose) {
printf(")");
}
if (checksum != 0) {
lprintf(LOG_ERR, "Bad checksum in Multi Records");
status = -1;
if (verbose) {
printf("--> FAIL");
}
} else if (verbose) {
printf("--> OK");
}
if (verbose > 1 && checksum == 0) {
for (counter = 0; counter < head->len; counter++) {
printf(" %02X", *(fru_data + count + counter
+ sizeof(struct fru_multirec_header)));
}
}
if (verbose) {
printf("\n");
}
count += head->len + sizeof (struct fru_multirec_header);
} while ((!(head->format & 0x80)) && (status == 0));
*pSize = count;
lprintf(LOG_DEBUG, "Size of multirec: %lu\n", *pSize);
return status;
}
static int
ipmi_fru_get_multirec_from_file(char * pFileName, uint8_t * pBufArea,
uint32_t size, uint32_t offset)
{
FILE * pFile;
uint32_t len = 0;
if (!pFileName) {
lprintf(LOG_ERR, "Invalid file name given.");
return -1;
}
errno = 0;
pFile = fopen(pFileName, "rb");
if (!pFile) {
lprintf(LOG_ERR, "Error opening file '%s': %i -> %s.", pFileName, errno,
strerror(errno));
return -1;
}
errno = 0;
if (fseek(pFile, offset, SEEK_SET) != 0) {
lprintf(LOG_ERR, "Failed to seek in file '%s': %i -> %s.", pFileName, errno,
strerror(errno));
fclose(pFile);
return -1;
}
len = fread(pBufArea, size, 1, pFile);
fclose(pFile);
if (len != 1) {
lprintf(LOG_ERR, "Error in file '%s'.", pFileName);
return -1;
}
return 0;
}
static int
ipmi_fru_get_multirec_location_from_fru(struct ipmi_intf * intf,
uint8_t fruId,
struct fru_info *pFruInfo,
uint32_t * pRetLocation,
uint32_t * pRetSize)
{
struct ipmi_rs * rsp;
struct ipmi_rq req;
uint8_t msg_data[4];
uint32_t end;
struct fru_header header;
*pRetLocation = 0;
msg_data[0] = fruId;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
if (verbose > 1)
printf("no response\n");
return -1;
}
if (rsp->ccode) {
if (rsp->ccode == IPMI_CC_TIMEOUT)
printf (" Timeout accessing FRU info. (Device not present?)\n");
else
printf (" CCODE = 0x%02x\n", rsp->ccode);
return -1;
}
pFruInfo->size = (rsp->data[1] << 8) | rsp->data[0];
pFruInfo->access = rsp->data[2] & 0x1;
if (verbose > 1)
printf("pFruInfo->size = %d bytes (accessed by %s)\n",
pFruInfo->size, pFruInfo->access ? "words" : "bytes");
if (!pFruInfo->size)
return -1;
msg_data[0] = fruId;
msg_data[1] = 0;
msg_data[2] = 0;
msg_data[3] = 8;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_DATA;
req.msg.data = msg_data;
req.msg.data_len = 4;
rsp = intf->sendrecv(intf, &req);
if (!rsp)
return -1;
if (rsp->ccode) {
if (rsp->ccode == IPMI_CC_TIMEOUT)
printf (" Timeout while reading FRU data. (Device not present?)\n");
return -1;
}
if (verbose > 1)
printbuf(rsp->data, rsp->data_len, "FRU DATA");
memcpy(&header, rsp->data + 1, 8);
if (header.version != 0x01) {
printf (" Unknown FRU header version %02x.\n", header.version);
return -1;
}
end = pFruInfo->size;
/* Retrieve length */
if (((header.offset.internal * 8) > (header.offset.internal * 8)) &&
((header.offset.internal * 8) < end))
end = (header.offset.internal * 8);
if (((header.offset.chassis * 8) > (header.offset.chassis * 8)) &&
((header.offset.chassis * 8) < end))
end = (header.offset.chassis * 8);
if (((header.offset.board * 8) > (header.offset.board * 8)) &&
((header.offset.board * 8) < end))
end = (header.offset.board * 8);
if (((header.offset.product * 8) > (header.offset.product * 8)) &&
((header.offset.product * 8) < end))
end = (header.offset.product * 8);
*pRetSize = end;
*pRetLocation = 8 * header.offset.multi;
return 0;
}
/* ipmi_fru_get_internal_use_offset - Retrieve internal use offset
*
* @intf: ipmi interface
* @id: fru id
*
* returns -1 on error
* returns 0 if successful
* returns 1 if device not present
*/
static int
ipmi_fru_get_internal_use_info( struct ipmi_intf * intf,
uint8_t id,
struct fru_info * fru,
uint16_t * size,
uint16_t * offset)
{
struct ipmi_rs * rsp;
struct ipmi_rq req;
struct fru_header header;
uint8_t msg_data[4];
// Init output value
* offset = 0;
* size = 0;
memset(fru, 0, sizeof(struct fru_info));
memset(&header, 0, sizeof(struct fru_header));
/*
* get info about this FRU
*/
memset(msg_data, 0, 4);
msg_data[0] = id;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
printf(" Device not present (No Response)\n");
return -1;
}
if (rsp->ccode) {
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
return -1;
}
fru->size = (rsp->data[1] << 8) | rsp->data[0];
fru->access = rsp->data[2] & 0x1;
lprintf(LOG_DEBUG, "fru.size = %d bytes (accessed by %s)",
fru->size, fru->access ? "words" : "bytes");
if (fru->size < 1) {
lprintf(LOG_ERR, " Invalid FRU size %d", fru->size);
return -1;
}
/*
* retrieve the FRU header
*/
msg_data[0] = id;
msg_data[1] = 0;
msg_data[2] = 0;
msg_data[3] = 8;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_DATA;
req.msg.data = msg_data;
req.msg.data_len = 4;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
printf(" Device not present (No Response)\n");
return 1;
}
if (rsp->ccode) {
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
return 1;
}
if (verbose > 1)
printbuf(rsp->data, rsp->data_len, "FRU DATA");
memcpy(&header, rsp->data + 1, 8);
if (header.version != 1) {
lprintf(LOG_ERR, " Unknown FRU header version 0x%02x",
header.version);
return -1;
}
lprintf(LOG_DEBUG, "fru.header.version: 0x%x",
header.version);
lprintf(LOG_DEBUG, "fru.header.offset.internal: 0x%x",
header.offset.internal * 8);
lprintf(LOG_DEBUG, "fru.header.offset.chassis: 0x%x",
header.offset.chassis * 8);
lprintf(LOG_DEBUG, "fru.header.offset.board: 0x%x",
header.offset.board * 8);
lprintf(LOG_DEBUG, "fru.header.offset.product: 0x%x",
header.offset.product * 8);
lprintf(LOG_DEBUG, "fru.header.offset.multi: 0x%x",
header.offset.multi * 8);
if((header.offset.internal*8) == 0)
{
* size = 0;
* offset = 0;
}
else
{
(* offset) = (header.offset.internal*8);
if(header.offset.chassis != 0)
{
(* size) = ((header.offset.chassis*8)-(* offset));
}
else if(header.offset.board != 0)
{
(* size) = ((header.offset.board*8)-(* offset));
}
else if(header.offset.product != 0)
{
(* size) = ((header.offset.product*8)-(* offset));
}
else if(header.offset.multi != 0)
{
(* size) = ((header.offset.multi*8)-(* offset));
}
else
{
(* size) = (fru->size - (* offset));
}
}
return 0;
}
/* ipmi_fru_info_internal_use - print internal use info
*
* @intf: ipmi interface
* @id: fru id
*
* returns -1 on error
* returns 0 if successful
* returns 1 if device not present
*/
static int
ipmi_fru_info_internal_use(struct ipmi_intf * intf, uint8_t id)
{
struct fru_info fru;
uint16_t size;
uint16_t offset;
int rc = 0;
rc = ipmi_fru_get_internal_use_info(intf, id, &fru, &size, &offset);
if(rc == 0)
{
lprintf(LOG_DEBUG, "Internal Use Area Offset: %i", offset);
printf( "Internal Use Area Size : %i\n", size);
}
else
{
lprintf(LOG_ERR, "Cannot access internal use area");
return -1;
}
return 0;
}
/* ipmi_fru_help - print help text for FRU subcommand
*
* returns void
*/
void
ipmi_fru_help()
{
lprintf(LOG_NOTICE,
"FRU Commands: print read write upgEkey edit internaluse get");
} /* ipmi_fru_help() */
/* ipmi_fru_read_internal_use - print internal use are in hex or file
*
* @intf: ipmi interface
* @id: fru id
*
* returns -1 on error
* returns 0 if successful
* returns 1 if device not present
*/
static int
ipmi_fru_read_internal_use(struct ipmi_intf * intf, uint8_t id, char * pFileName)
{
struct fru_info fru;
uint16_t size;
uint16_t offset;
int rc = 0;
rc = ipmi_fru_get_internal_use_info(intf, id, &fru, &size, &offset);
if(rc == 0)
{
uint8_t * frubuf;
lprintf(LOG_DEBUG, "Internal Use Area Offset: %i", offset);
printf( "Internal Use Area Size : %i\n", size);
frubuf = malloc( size );
if(frubuf)
{
rc = read_fru_area_section(intf, &fru, id, offset, size, frubuf);
if(rc == 0)
{
if(!pFileName)
{
uint16_t counter;
for(counter = 0; counter < size; counter ++)
{
if((counter % 16) == 0)
printf("\n%02i- ", (counter / 16));
printf("%02X ", frubuf[counter]);
}
}
else
{
FILE * pFile;
pFile = fopen(pFileName,"wb");
if (pFile)
{
fwrite(frubuf, size, 1, pFile);
printf("Done\n");
}
else
{
lprintf(LOG_ERR, "Error opening file %s\n", pFileName);
free_n(&frubuf);
return -1;
}
fclose(pFile);
}
}
printf("\n");
free_n(&frubuf);
}
}
else
{
lprintf(LOG_ERR, "Cannot access internal use area");
}
return 0;
}
/* ipmi_fru_write_internal_use - print internal use are in hex or file
*
* @intf: ipmi interface
* @id: fru id
*
* returns -1 on error
* returns 0 if successful
* returns 1 if device not present
*/
static int
ipmi_fru_write_internal_use(struct ipmi_intf * intf, uint8_t id, char * pFileName)
{
struct fru_info fru;
uint16_t size;
uint16_t offset;
int rc = 0;
rc = ipmi_fru_get_internal_use_info(intf, id, &fru, &size, &offset);
if(rc == 0)
{
uint8_t * frubuf;
FILE * fp;
uint32_t fileLength = 0;
lprintf(LOG_DEBUG, "Internal Use Area Offset: %i", offset);
printf( "Internal Use Area Size : %i\n", size);
fp = fopen(pFileName, "r");
if(fp)
{
/* Retrieve file length, check if it's fits the Eeprom Size */
fseek(fp, 0 ,SEEK_END);
fileLength = ftell(fp);
lprintf(LOG_ERR, "File Size: %i", fileLength);
lprintf(LOG_ERR, "Area Size: %i", size);
if(fileLength != size)
{
lprintf(LOG_ERR, "File size does not fit Eeprom Size");
fclose(fp);
fp = NULL;
}
else
{
fseek(fp, 0 ,SEEK_SET);
}
}
if(fp)
{
frubuf = malloc( size );
if(frubuf)
{
uint16_t fru_read_size;
fru_read_size = fread(frubuf, 1, size, fp);
if(fru_read_size == size)
{
rc = write_fru_area(intf, &fru, id, 0, offset, size, frubuf);
if(rc == 0)
{
lprintf(LOG_INFO, "Done\n");
}
}
else
{
lprintf(LOG_ERR, "Unable to read file: %i\n", fru_read_size);
}
free_n(&frubuf);
}
fclose(fp);
fp = NULL;
}
}
else
{
lprintf(LOG_ERR, "Cannot access internal use area");
}
return 0;
}
int
ipmi_fru_main(struct ipmi_intf * intf, int argc, char ** argv)
{
int rc = 0;
uint8_t fru_id = 0;
if (argc < 1) {
rc = ipmi_fru_print_all(intf);
}
else if (strncmp(argv[0], "help", 4) == 0) {
ipmi_fru_help();
return 0;
}
else if (strncmp(argv[0], "print", 5) == 0 ||
strncmp(argv[0], "list", 4) == 0) {
if (argc > 1) {
if (strcmp(argv[1], "help") == 0) {
lprintf(LOG_NOTICE, "fru print [fru id] - print information about FRU(s)");
return 0;
}
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
rc = __ipmi_fru_print(intf, fru_id);
} else {
rc = ipmi_fru_print_all(intf);
}
}
else if (!strncmp(argv[0], "read", 5)) {
if (argc > 1 && strcmp(argv[1], "help") == 0) {
ipmi_fru_read_help();
return 0;
} else if (argc < 3) {
lprintf(LOG_ERR, "Not enough parameters given.");
ipmi_fru_read_help();
return -1;
}
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
/* There is a file name in the parameters */
if (is_valid_filename(argv[2]) != 0)
return -1;
if (verbose) {
printf("FRU ID : %d\n", fru_id);
printf("FRU File : %s\n", argv[2]);
}
/* TODO - rc is missing */
ipmi_fru_read_to_bin(intf, argv[2], fru_id);
}
else if (!strncmp(argv[0], "write", 5)) {
if (argc > 1 && strcmp(argv[1], "help") == 0) {
ipmi_fru_write_help();
return 0;
} else if (argc < 3) {
lprintf(LOG_ERR, "Not enough parameters given.");
ipmi_fru_write_help();
return -1;
}
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
/* There is a file name in the parameters */
if (is_valid_filename(argv[2]) != 0)
return -1;
if (verbose) {
printf("FRU ID : %d\n", fru_id);
printf("FRU File : %s\n", argv[2]);
}
/* TODO - rc is missing */
ipmi_fru_write_from_bin(intf, argv[2], fru_id);
}
else if (!strncmp(argv[0], "upgEkey", 7)) {
if (argc > 1 && strcmp(argv[1], "help") == 0) {
ipmi_fru_upgekey_help();
return 0;
} else if (argc < 3) {
lprintf(LOG_ERR, "Not enough parameters given.");
ipmi_fru_upgekey_help();
return -1;
}
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
/* There is a file name in the parameters */
if (is_valid_filename(argv[2]) != 0)
return -1;
rc = ipmi_fru_upg_ekeying(intf, argv[2], fru_id);
}
else if (!strncmp(argv[0], "internaluse", 11)) {
if (argc > 1 && strcmp(argv[1], "help") == 0) {
ipmi_fru_internaluse_help();
return 0;
}
if ( (argc >= 3) && (!strncmp(argv[2], "info", 4)) ) {
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
rc = ipmi_fru_info_internal_use(intf, fru_id);
}
else if ( (argc >= 3) && (!strncmp(argv[2], "print", 5)) ) {
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
rc = ipmi_fru_read_internal_use(intf, fru_id, NULL);
}
else if ( (argc >= 4) && (!strncmp(argv[2], "read", 4)) ) {
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
/* There is a file name in the parameters */
if (is_valid_filename(argv[3]) != 0)
return -1;
lprintf(LOG_DEBUG, "FRU ID : %d", fru_id);
lprintf(LOG_DEBUG, "FRU File : %s", argv[3]);
rc = ipmi_fru_read_internal_use(intf, fru_id, argv[3]);
}
else if ( (argc >= 4) && (!strncmp(argv[2], "write", 5)) ) {
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
/* There is a file name in the parameters */
if (is_valid_filename(argv[3]) != 0)
return -1;
lprintf(LOG_DEBUG, "FRU ID : %d", fru_id);
lprintf(LOG_DEBUG, "FRU File : %s", argv[3]);
rc = ipmi_fru_write_internal_use(intf, fru_id, argv[3]);
} else {
lprintf(LOG_ERR,
"Either unknown command or not enough parameters given.");
ipmi_fru_internaluse_help();
return -1;
}
}
else if (!strncmp(argv[0], "edit", 4)) {
if (argc > 1 && strcmp(argv[1], "help") == 0) {
ipmi_fru_edit_help();
return 0;
} else if (argc < 2) {
lprintf(LOG_ERR, "Not enough parameters given.");
ipmi_fru_edit_help();
return -1;
}
if (argc >= 2) {
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
if (verbose) {
printf("FRU ID : %d\n", fru_id);
}
} else {
printf("Using default FRU ID: %d\n", fru_id);
}
if (argc >= 3) {
if (!strncmp(argv[2], "field", 5)) {
if (argc != 6) {
lprintf(LOG_ERR, "Not enough parameters given.");
ipmi_fru_edit_help();
return -1;
}
rc = ipmi_fru_set_field_string(intf, fru_id, *argv[3], *argv[4],
(char *) argv[5]);
} else if (!strncmp(argv[2], "oem", 3)) {
rc = ipmi_fru_edit_multirec(intf, fru_id, argc, argv);
} else {
lprintf(LOG_ERR, "Invalid command: %s", argv[2]);
ipmi_fru_edit_help();
return -1;
}
} else {
rc = ipmi_fru_edit_multirec(intf, fru_id, argc, argv);
}
}
else if (!strncmp(argv[0], "get", 4)) {
if (argc > 1 && (strncmp(argv[1], "help", 4) == 0)) {
ipmi_fru_get_help();
return 0;
} else if (argc < 2) {
lprintf(LOG_ERR, "Not enough parameters given.");
ipmi_fru_get_help();
return -1;
}
if (argc >= 2) {
if (is_fru_id(argv[1], &fru_id) != 0)
return -1;
if (verbose) {
printf("FRU ID : %d\n", fru_id);
}
} else {
printf("Using default FRU ID: %d\n", fru_id);
}
if (argc >= 3) {
if (!strncmp(argv[2], "oem", 3)) {
rc = ipmi_fru_get_multirec(intf, fru_id, argc, argv);
} else {
lprintf(LOG_ERR, "Invalid command: %s", argv[2]);
ipmi_fru_get_help();
return -1;
}
} else {
rc = ipmi_fru_get_multirec(intf, fru_id, argc, argv);
}
}
else {
lprintf(LOG_ERR, "Invalid FRU command: %s", argv[0]);
ipmi_fru_help();
return -1;
}
return rc;
}
/* ipmi_fru_set_field_string - Set a field string to a new value, Need to be the same size. If
* size if not equal, the function ipmi_fru_set_field_string_rebuild
* will be called.
*
* @intf: ipmi interface
* @id: fru id
* @f_type: Type of the Field : c=Chassis b=Board p=Product
* @f_index: findex of the field, zero indexed.
* @f_string: NULL terminated string
*
* returns -1 on error
* returns 1 if successful
*/
static int
ipmi_fru_set_field_string(struct ipmi_intf * intf, uint8_t fruId, uint8_t
f_type, uint8_t f_index, char *f_string)
{
struct ipmi_rs *rsp;
struct ipmi_rq req;
struct fru_info fru;
struct fru_header header;
uint8_t msg_data[4];
uint8_t checksum;
int i = 0;
int rc = 1;
uint8_t *fru_data = NULL;
uint8_t *fru_area = NULL;
uint32_t fru_field_offset, fru_field_offset_tmp;
uint32_t fru_section_len, header_offset;
memset(msg_data, 0, 4);
msg_data[0] = fruId;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_INFO;
req.msg.data = msg_data;
req.msg.data_len = 1;
rsp = intf->sendrecv(intf, &req);
if (!rsp) {
printf(" Device not present (No Response)\n");
rc = -1;
goto ipmi_fru_set_field_string_out;
}
if (rsp->ccode) {
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
rc = -1;
goto ipmi_fru_set_field_string_out;
}
memset(&fru, 0, sizeof(fru));
fru.size = (rsp->data[1] << 8) | rsp->data[0];
fru.access = rsp->data[2] & 0x1;
if (fru.size < 1) {
printf(" Invalid FRU size %d", fru.size);
rc = -1;
goto ipmi_fru_set_field_string_out;
}
/*
* retrieve the FRU header
*/
msg_data[0] = fruId;
msg_data[1] = 0;
msg_data[2] = 0;
msg_data[3] = 8;
memset(&req, 0, sizeof(req));
req.msg.netfn = IPMI_NETFN_STORAGE;
req.msg.cmd = GET_FRU_DATA;
req.msg.data = msg_data;
req.msg.data_len = 4;
rsp = intf->sendrecv(intf, &req);
if (!rsp)
{
printf(" Device not present (No Response)\n");
rc = -1;
goto ipmi_fru_set_field_string_out;
}
if (rsp->ccode)
{
printf(" Device not present (%s)\n",
val2str(rsp->ccode, completion_code_vals));
rc = -1;
goto ipmi_fru_set_field_string_out;
}
if (verbose > 1)
printbuf(rsp->data, rsp->data_len, "FRU DATA");
memcpy(&header, rsp->data + 1, 8);
if (header.version != 1) {
printf(" Unknown FRU header version 0x%02x",
header.version);
rc = -1;
goto ipmi_fru_set_field_string_out;
}
fru_data = malloc( fru.size );
if (!fru_data) {
printf("Out of memory!\n");
rc = -1;
goto ipmi_fru_set_field_string_out;
}
/* Setup offset from the field type */
/* Chassis type field */
if (f_type == 'c' ) {
header_offset = (header.offset.chassis * 8);
read_fru_area(intf ,&fru, fruId, header_offset , 3 , fru_data);
fru_field_offset = 3;
fru_section_len = *(fru_data + 1) * 8;
}
/* Board type field */
else if (f_type == 'b' ) {
header_offset = (header.offset.board * 8);
read_fru_area(intf ,&fru, fruId, header_offset , 3 , fru_data);
fru_field_offset = 6;
fru_section_len = *(fru_data + 1) * 8;
}
/* Product type field */
else if (f_type == 'p' ) {
header_offset = (header.offset.product * 8);
read_fru_area(intf ,&fru, fruId, header_offset , 3 , fru_data);
fru_field_offset = 3;
fru_section_len = *(fru_data + 1) * 8;
}
else
{
printf("Wrong field type.");
rc = -1;
goto ipmi_fru_set_field_string_out;
}
memset(fru_data, 0, fru.size);
if( read_fru_area(intf ,&fru, fruId, header_offset ,
fru_section_len , fru_data) < 0 )
{
rc = -1;
goto ipmi_fru_set_field_string_out;
}
/* Convert index from character to decimal */
f_index= f_index - 0x30;
/*Seek to field index */
for (i=0; i <= f_index; i++) {
fru_field_offset_tmp = fru_field_offset;
if (fru_area) {
free_n(&fru_area);
}
fru_area = (uint8_t *) get_fru_area_str(fru_data, &fru_field_offset);
}
if (!FRU_FIELD_VALID(fru_area)) {
printf("Field not found !\n");
rc = -1;
goto ipmi_fru_set_field_string_out;
}
if ( strlen((const char *)fru_area) == strlen((const char *)f_string) )
{
printf("Updating Field '%s' with '%s' ...\n", fru_area, f_string );
memcpy(fru_data + fru_field_offset_tmp + 1,
f_string, strlen(f_string));
checksum = 0;
/* Calculate Header Checksum */
for (i = 0; i < fru_section_len - 1; i++)
{
checksum += fru_data[i];
}
checksum = (~checksum) + 1;
fru_data[fru_section_len - 1] = checksum;
/* Write the updated section to the FRU data; source offset => 0 */
if( write_fru_area(intf, &fru, fruId, 0,
header_offset, fru_section_len, fru_data) < 0 )
{
printf("Write to FRU data failed.\n");
rc = -1;
goto ipmi_fru_set_field_string_out;
}
}
else {
printf("String size are not equal, resizing fru to fit new string\n");
if(
ipmi_fru_set_field_string_rebuild(intf,fruId,fru,header,f_type,f_index,f_string)
)
{
rc = -1;
goto ipmi_fru_set_field_string_out;
}
}
ipmi_fru_set_field_string_out:
free_n(&fru_data);
free_n(&fru_area);
return rc;
}
/*
This function can update a string within of the following section when the size is not equal:
Chassis
Product
Board
*/
/* ipmi_fru_set_field_string_rebuild - Set a field string to a new value, When size are not
* the same size.
*
* This function can update a string within of the following section when the size is not equal:
*
* - Chassis
* - Product
* - Board
*
* @intf: ipmi interface
* @fruId: fru id
* @fru: info about fru
* @header: contain the header of the FRU
* @f_type: Type of the Field : c=Chassis b=Board p=Product
* @f_index: findex of the field, zero indexed.
* @f_string: NULL terminated string
*
* returns -1 on error
* returns 1 if successful
*/
#define DBG_RESIZE_FRU
static int
ipmi_fru_set_field_string_rebuild(struct ipmi_intf * intf, uint8_t fruId,
struct fru_info fru, struct fru_header header,
uint8_t f_type, uint8_t f_index, char *f_string)
{
int i = 0;
uint8_t *fru_data_old = NULL;
uint8_t *fru_data_new = NULL;
uint8_t *fru_area = NULL;
uint32_t fru_field_offset, fru_field_offset_tmp;
uint32_t fru_section_len, header_offset;
uint32_t chassis_offset, board_offset, product_offset;
uint32_t chassis_len, board_len, product_len, product_len_new;
int num_byte_change = 0, padding_len = 0;
uint32_t counter;
unsigned char cksum;
int rc = 1;
fru_data_old = calloc( fru.size, sizeof(uint8_t) );
fru_data_new = malloc( fru.size );
if (!fru_data_old || !fru_data_new) {
printf("Out of memory!\n");
rc = -1;
goto ipmi_fru_set_field_string_rebuild_out;
}
/*************************
1) Read ALL FRU */
printf("Read All FRU area\n");
printf("Fru Size : %u bytes\n", fru.size);
/* Read current fru data */
read_fru_area(intf ,&fru, fruId, 0, fru.size , fru_data_old);
#ifdef DBG_RESIZE_FRU
printf("Copy to new FRU\n");
#endif
/*************************
2) Copy all FRU to new FRU */
memcpy(fru_data_new, fru_data_old, fru.size);
/* Build location of all modifiable components */
chassis_offset = (header.offset.chassis * 8);
board_offset = (header.offset.board * 8);
product_offset = (header.offset.product * 8);
/* Retrieve length of all modifiable components */
chassis_len = *(fru_data_old + chassis_offset + 1) * 8;
board_len = *(fru_data_old + board_offset + 1) * 8;
product_len = *(fru_data_old + product_offset + 1) * 8;
product_len_new = product_len;
/* Chassis type field */
if (f_type == 'c' )
{
header_offset = chassis_offset;
fru_field_offset = chassis_offset + 3;
fru_section_len = chassis_len;
}
/* Board type field */
else if (f_type == 'b' )
{
header_offset = board_offset;
fru_field_offset = board_offset + 6;
fru_section_len = board_len;
}
/* Product type field */
else if (f_type == 'p' )
{
header_offset = product_offset;
fru_field_offset = product_offset + 3;
fru_section_len = product_len;
}
else
{
printf("Wrong field type.");
rc = -1;
goto ipmi_fru_set_field_string_rebuild_out;
}
/*************************
3) Seek to field index */
for (i = 0;i <= f_index; i++) {
fru_field_offset_tmp = fru_field_offset;
free_n(&fru_area);
fru_area = (uint8_t *) get_fru_area_str(fru_data_old, &fru_field_offset);
}
if (!FRU_FIELD_VALID(fru_area)) {
printf("Field not found (1)!\n");
rc = -1;
goto ipmi_fru_set_field_string_rebuild_out;
}
#ifdef DBG_RESIZE_FRU
printf("Section Length: %u\n", fru_section_len);
#endif
/*************************
4) Check number of padding bytes and bytes changed */
for(counter = 2; counter < fru_section_len; counter ++)
{
if(*(fru_data_old + (header_offset + fru_section_len - counter)) == 0)
padding_len ++;
else
break;
}
num_byte_change = strlen(f_string) - strlen(fru_area);
#ifdef DBG_RESIZE_FRU
printf("Padding Length: %u\n", padding_len);
printf("NumByte Change: %i\n", num_byte_change);
printf("Start SecChnge: %x\n", *(fru_data_old + fru_field_offset_tmp));
printf("End SecChnge : %x\n", *(fru_data_old + fru_field_offset_tmp + strlen(f_string) + 1));
printf("Start Section : %x\n", *(fru_data_old + header_offset));
printf("End Sec wo Pad: %x\n", *(fru_data_old + header_offset + fru_section_len - 2 - padding_len));
printf("End Section : %x\n", *(fru_data_old + header_offset + fru_section_len - 1));
#endif
/* Calculate New Padding Length */
padding_len -= num_byte_change;
#ifdef DBG_RESIZE_FRU
printf("New Padding Length: %i\n", padding_len);
#endif
/*************************
5) Check if section must be resize. This occur when padding length is not between 0 and 7 */
if( (padding_len < 0) || (padding_len >= 8))
{
uint32_t remaining_offset = ((header.offset.product * 8) + product_len);
int change_size_by_8;
if(padding_len >= 8)
{
/* Section must be set smaller */
change_size_by_8 = ((padding_len) / 8) * (-1);
}
else
{
/* Section must be set bigger */
change_size_by_8 = 1 + (((padding_len+1) / 8) * (-1));
}
/* Recalculate padding and section length base on the section changes */
fru_section_len += (change_size_by_8 * 8);
padding_len += (change_size_by_8 * 8);
#ifdef DBG_RESIZE_FRU
printf("change_size_by_8: %i\n", change_size_by_8);
printf("New Padding Length: %i\n", padding_len);
printf("change_size_by_8: %i\n", change_size_by_8);
printf("header.offset.board: %i\n", header.offset.board);
#endif
/* Must move sections */
/* Section that can be modified are as follow
Chassis
Board
product */
/* Chassis type field */
if (f_type == 'c' )
{
printf("Moving Section Chassis, from %i to %i\n",
((header.offset.board) * 8),
((header.offset.board + change_size_by_8) * 8)
);
memcpy(
(fru_data_new + ((header.offset.board + change_size_by_8) * 8)),
(fru_data_old + (header.offset.board) * 8),
board_len
);
header.offset.board += change_size_by_8;
}
/* Board type field */
if ((f_type == 'c' ) || (f_type == 'b' ))
{
printf("Moving Section Product, from %i to %i\n",
((header.offset.product) * 8),
((header.offset.product + change_size_by_8) * 8)
);
memcpy(
(fru_data_new + ((header.offset.product + change_size_by_8) * 8)),
(fru_data_old + (header.offset.product) * 8),
product_len
);
header.offset.product += change_size_by_8;
}
if ((f_type == 'c' ) || (f_type == 'b' ) || (f_type == 'p' )) {
printf("Change multi offset from %d to %d\n", header.offset.multi, header.offset.multi + change_size_by_8);
header.offset.multi += change_size_by_8;
}
/* Adjust length of the section */
if (f_type == 'c')
{
*(fru_data_new + chassis_offset + 1) += change_size_by_8;
}
else if( f_type == 'b')
{
*(fru_data_new + board_offset + 1) += change_size_by_8;
}
else if( f_type == 'p')
{
*(fru_data_new + product_offset + 1) += change_size_by_8;
product_len_new = *(fru_data_new + product_offset + 1) * 8;
}
/* Rebuild Header checksum */
{
unsigned char * pfru_header = (unsigned char *) &header;
header.checksum = 0;
for(counter = 0; counter < (sizeof(struct fru_header) -1); counter ++)
{
header.checksum += pfru_header[counter];
}
header.checksum = (0 - header.checksum);
memcpy(fru_data_new, pfru_header, sizeof(struct fru_header));
}
/* Move remaining sections in 1 copy */
printf("Moving Remaining Bytes (Multi-Rec , etc..), from %i to %i\n",
remaining_offset,
((header.offset.product) * 8) + product_len_new
);
if(((header.offset.product * 8) + product_len_new - remaining_offset) < 0)
{
memcpy(
fru_data_new + (header.offset.product * 8) + product_len_new,
fru_data_old + remaining_offset,
fru.size - remaining_offset
);
}
else
{
memcpy(
fru_data_new + (header.offset.product * 8) + product_len_new,
fru_data_old + remaining_offset,
fru.size - ((header.offset.product * 8) + product_len_new)
);
}
}
/* Update only if it's fits padding length as defined in the spec, otherwise, it's an internal
error */
/*************************
6) Update Field and sections */
if( (padding_len >=0) && (padding_len < 8))
{
/* Do not requires any change in other section */
/* Change field length */
printf(
"Updating Field : '%s' with '%s' ... (Length from '%d' to '%d')\n",
fru_area, f_string,
(int)*(fru_data_old + fru_field_offset_tmp),
(int)(0xc0 + strlen(f_string)));
*(fru_data_new + fru_field_offset_tmp) = (0xc0 + strlen(f_string));
memcpy(fru_data_new + fru_field_offset_tmp + 1, f_string, strlen(f_string));
/* Copy remaining bytes in section */
#ifdef DBG_RESIZE_FRU
printf("Copying remaining of sections: %d \n",
(int)((fru_data_old + header_offset + fru_section_len - 1) -
(fru_data_old + fru_field_offset_tmp + strlen(f_string) + 1)));
#endif
memcpy((fru_data_new + fru_field_offset_tmp + 1 +
strlen(f_string)),
(fru_data_old + fru_field_offset_tmp + 1 +
strlen(fru_area)),
((fru_data_old + header_offset + fru_section_len - 1) -
(fru_data_old + fru_field_offset_tmp + strlen(f_string) + 1)));
/* Add Padding if required */
for(counter = 0; counter < padding_len; counter ++)
{
*(fru_data_new + header_offset + fru_section_len - 1 -
padding_len + counter) = 0;
}
/* Calculate New Checksum */
cksum = 0;
for( counter = 0; counter <fru_section_len-1; counter ++ )
{
cksum += *(fru_data_new + header_offset + counter);
}
*(fru_data_new + header_offset + fru_section_len - 1) = (0 - cksum);
#ifdef DBG_RESIZE_FRU
printf("Calculate New Checksum: %x\n", (0 - cksum));
#endif
}
else
{
printf( "Internal error, padding length %i (must be from 0 to 7) ", padding_len );
rc = -1;
goto ipmi_fru_set_field_string_rebuild_out;
}
/*************************
7) Finally, write new FRU */
printf("Writing new FRU.\n");
if( write_fru_area( intf, &fru, fruId, 0, 0, fru.size, fru_data_new ) < 0 )
{
printf("Write to FRU data failed.\n");
rc = -1;
goto ipmi_fru_set_field_string_rebuild_out;
}
printf("Done.\n");
ipmi_fru_set_field_string_rebuild_out:
free_n(&fru_area);
free_n(&fru_data_new);
free_n(&fru_data_old);
return rc;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_4522_0 |
crossvul-cpp_data_bad_4711_0 | /*
ettercap -- GTK+ GUI
Copyright (C) ALoR & NaGA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
$Id: ec_gtk_conf.c,v 1.2 2004/09/30 02:09:27 daten Exp $
*/
#include <ec.h>
#include <ec_gtk.h>
void gtkui_conf_set(char *name, short value);
short gtkui_conf_get(char *name);
void gtkui_conf_read(void);
void gtkui_conf_save(void);
static char *filename = NULL;
static struct gtk_conf_entry settings[] = {
{ "window_top", 0 },
{ "window_left", 0 },
{ "window_height", 440 },
{ "window_width", 600 },
{ NULL, 0 },
};
void gtkui_conf_set(char *name, short value) {
short c = 0;
DEBUG_MSG("gtkui_conf_set: name=%s value=%hu", name, value);
for(c = 0; settings[c].name != NULL; c++) {
if(!strcmp(name, settings[c].name)) {
settings[c].value = value;
break;
}
}
}
short gtkui_conf_get(char *name) {
unsigned short c = 0;
DEBUG_MSG("gtkui_conf_get: name=%s", name);
for(c = 0; settings[c].name != NULL; c++) {
if(!strcmp(name, settings[c].name))
return(settings[c].value);
}
return(0);
}
void gtkui_conf_read(void) {
FILE *fd;
const char *path;
char line[100], name[30];
short value;
#ifdef OS_WINDOWS
path = ec_win_get_user_dir();
#else
/* TODO: get the dopped privs home dir instead of "/root" */
/* path = g_get_home_dir(); */
path = g_get_tmp_dir();
#endif
filename = g_build_filename(path, ".ettercap_gtk", NULL);
DEBUG_MSG("gtkui_conf_read: %s", filename);
fd = fopen(filename, "r");
if(!fd)
return;
while(fgets(line, 100, fd)) {
sscanf(line, "%s = %hd", name, &value);
gtkui_conf_set(name, value);
}
fclose(fd);
}
void gtkui_conf_save(void) {
FILE *fd;
int c;
DEBUG_MSG("gtkui_conf_save");
if(!filename)
return;
fd = fopen(filename, "w");
if(fd != NULL) {
for(c = 0; settings[c].name != NULL; c++)
fprintf(fd, "%s = %hd\n", settings[c].name, settings[c].value);
fclose(fd);
}
free(filename);
filename = NULL;
}
/* EOF */
// vim:ts=3:expandtab
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_4711_0 |
crossvul-cpp_data_bad_998_0 | /*
* Marvell Wireless LAN device driver: management IE handling- setting and
* deleting IE.
*
* Copyright (C) 2012-2014, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "main.h"
/* This function checks if current IE index is used by any on other interface.
* Return: -1: yes, current IE index is used by someone else.
* 0: no, current IE index is NOT used by other interface.
*/
static int
mwifiex_ie_index_used_by_other_intf(struct mwifiex_private *priv, u16 idx)
{
int i;
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie *ie;
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i] != priv) {
ie = &adapter->priv[i]->mgmt_ie[idx];
if (ie->mgmt_subtype_mask && ie->ie_length)
return -1;
}
}
return 0;
}
/* Get unused IE index. This index will be used for setting new IE */
static int
mwifiex_ie_get_autoidx(struct mwifiex_private *priv, u16 subtype_mask,
struct mwifiex_ie *ie, u16 *index)
{
u16 mask, len, i;
for (i = 0; i < priv->adapter->max_mgmt_ie_index; i++) {
mask = le16_to_cpu(priv->mgmt_ie[i].mgmt_subtype_mask);
len = le16_to_cpu(ie->ie_length);
if (mask == MWIFIEX_AUTO_IDX_MASK)
continue;
if (mask == subtype_mask) {
if (len > IEEE_MAX_IE_SIZE)
continue;
*index = i;
return 0;
}
if (!priv->mgmt_ie[i].ie_length) {
if (mwifiex_ie_index_used_by_other_intf(priv, i))
continue;
*index = i;
return 0;
}
}
return -1;
}
/* This function prepares IE data buffer for command to be sent to FW */
static int
mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
struct mwifiex_ie_list *ie_list)
{
u16 travel_len, index, mask;
s16 input_len, tlv_len;
struct mwifiex_ie *ie;
u8 *tmp;
input_len = le16_to_cpu(ie_list->len);
travel_len = sizeof(struct mwifiex_ie_types_header);
ie_list->len = 0;
while (input_len >= sizeof(struct mwifiex_ie_types_header)) {
ie = (struct mwifiex_ie *)(((u8 *)ie_list) + travel_len);
tlv_len = le16_to_cpu(ie->ie_length);
travel_len += tlv_len + MWIFIEX_IE_HDR_SIZE;
if (input_len < tlv_len + MWIFIEX_IE_HDR_SIZE)
return -1;
index = le16_to_cpu(ie->ie_index);
mask = le16_to_cpu(ie->mgmt_subtype_mask);
if (index == MWIFIEX_AUTO_IDX_MASK) {
/* automatic addition */
if (mwifiex_ie_get_autoidx(priv, mask, ie, &index))
return -1;
if (index == MWIFIEX_AUTO_IDX_MASK)
return -1;
tmp = (u8 *)&priv->mgmt_ie[index].ie_buffer;
memcpy(tmp, &ie->ie_buffer, le16_to_cpu(ie->ie_length));
priv->mgmt_ie[index].ie_length = ie->ie_length;
priv->mgmt_ie[index].ie_index = cpu_to_le16(index);
priv->mgmt_ie[index].mgmt_subtype_mask =
cpu_to_le16(mask);
ie->ie_index = cpu_to_le16(index);
} else {
if (mask != MWIFIEX_DELETE_MASK)
return -1;
/*
* Check if this index is being used on any
* other interface.
*/
if (mwifiex_ie_index_used_by_other_intf(priv, index))
return -1;
ie->ie_length = 0;
memcpy(&priv->mgmt_ie[index], ie,
sizeof(struct mwifiex_ie));
}
le16_unaligned_add_cpu(&ie_list->len,
le16_to_cpu(
priv->mgmt_ie[index].ie_length) +
MWIFIEX_IE_HDR_SIZE);
input_len -= tlv_len + MWIFIEX_IE_HDR_SIZE;
}
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
return mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
HostCmd_ACT_GEN_SET,
UAP_CUSTOM_IE_I, ie_list, true);
return 0;
}
/* Copy individual custom IEs for beacon, probe response and assoc response
* and prepare single structure for IE setting.
* This function also updates allocated IE indices from driver.
*/
static int
mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
struct mwifiex_ie *beacon_ie, u16 *beacon_idx,
struct mwifiex_ie *pr_ie, u16 *probe_idx,
struct mwifiex_ie *ar_ie, u16 *assoc_idx)
{
struct mwifiex_ie_list *ap_custom_ie;
u8 *pos;
u16 len;
int ret;
ap_custom_ie = kzalloc(sizeof(*ap_custom_ie), GFP_KERNEL);
if (!ap_custom_ie)
return -ENOMEM;
ap_custom_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
pos = (u8 *)ap_custom_ie->ie_list;
if (beacon_ie) {
len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(beacon_ie->ie_length);
memcpy(pos, beacon_ie, len);
pos += len;
le16_unaligned_add_cpu(&ap_custom_ie->len, len);
}
if (pr_ie) {
len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(pr_ie->ie_length);
memcpy(pos, pr_ie, len);
pos += len;
le16_unaligned_add_cpu(&ap_custom_ie->len, len);
}
if (ar_ie) {
len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(ar_ie->ie_length);
memcpy(pos, ar_ie, len);
pos += len;
le16_unaligned_add_cpu(&ap_custom_ie->len, len);
}
ret = mwifiex_update_autoindex_ies(priv, ap_custom_ie);
pos = (u8 *)(&ap_custom_ie->ie_list[0].ie_index);
if (beacon_ie && *beacon_idx == MWIFIEX_AUTO_IDX_MASK) {
/* save beacon ie index after auto-indexing */
*beacon_idx = le16_to_cpu(ap_custom_ie->ie_list[0].ie_index);
len = sizeof(*beacon_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(beacon_ie->ie_length);
pos += len;
}
if (pr_ie && le16_to_cpu(pr_ie->ie_index) == MWIFIEX_AUTO_IDX_MASK) {
/* save probe resp ie index after auto-indexing */
*probe_idx = *((u16 *)pos);
len = sizeof(*pr_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(pr_ie->ie_length);
pos += len;
}
if (ar_ie && le16_to_cpu(ar_ie->ie_index) == MWIFIEX_AUTO_IDX_MASK)
/* save assoc resp ie index after auto-indexing */
*assoc_idx = *((u16 *)pos);
kfree(ap_custom_ie);
return ret;
}
/* This function checks if the vendor specified IE is present in passed buffer
* and copies it to mwifiex_ie structure.
* Function takes pointer to struct mwifiex_ie pointer as argument.
* If the vendor specified IE is present then memory is allocated for
* mwifiex_ie pointer and filled in with IE. Caller should take care of freeing
* this memory.
*/
static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
struct mwifiex_ie **ie_ptr, u16 mask,
unsigned int oui, u8 oui_type)
{
struct ieee_types_header *vs_ie;
struct mwifiex_ie *ie = *ie_ptr;
const u8 *vendor_ie;
vendor_ie = cfg80211_find_vendor_ie(oui, oui_type, ies, ies_len);
if (vendor_ie) {
if (!*ie_ptr) {
*ie_ptr = kzalloc(sizeof(struct mwifiex_ie),
GFP_KERNEL);
if (!*ie_ptr)
return -ENOMEM;
ie = *ie_ptr;
}
vs_ie = (struct ieee_types_header *)vendor_ie;
memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
vs_ie, vs_ie->len + 2);
le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
ie->mgmt_subtype_mask = cpu_to_le16(mask);
ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK);
}
*ie_ptr = ie;
return 0;
}
/* This function parses beacon IEs, probe response IEs, association response IEs
* from cfg80211_ap_settings->beacon and sets these IE to FW.
*/
static int mwifiex_set_mgmt_beacon_data_ies(struct mwifiex_private *priv,
struct cfg80211_beacon_data *data)
{
struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL, *ar_ie = NULL;
u16 beacon_idx = MWIFIEX_AUTO_IDX_MASK, pr_idx = MWIFIEX_AUTO_IDX_MASK;
u16 ar_idx = MWIFIEX_AUTO_IDX_MASK;
int ret = 0;
if (data->beacon_ies && data->beacon_ies_len) {
mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
&beacon_ie, MGMT_MASK_BEACON,
WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPS);
mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
&beacon_ie, MGMT_MASK_BEACON,
WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
}
if (data->proberesp_ies && data->proberesp_ies_len) {
mwifiex_update_vs_ie(data->proberesp_ies,
data->proberesp_ies_len, &pr_ie,
MGMT_MASK_PROBE_RESP, WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPS);
mwifiex_update_vs_ie(data->proberesp_ies,
data->proberesp_ies_len, &pr_ie,
MGMT_MASK_PROBE_RESP,
WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
}
if (data->assocresp_ies && data->assocresp_ies_len) {
mwifiex_update_vs_ie(data->assocresp_ies,
data->assocresp_ies_len, &ar_ie,
MGMT_MASK_ASSOC_RESP |
MGMT_MASK_REASSOC_RESP,
WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPS);
mwifiex_update_vs_ie(data->assocresp_ies,
data->assocresp_ies_len, &ar_ie,
MGMT_MASK_ASSOC_RESP |
MGMT_MASK_REASSOC_RESP, WLAN_OUI_WFA,
WLAN_OUI_TYPE_WFA_P2P);
}
if (beacon_ie || pr_ie || ar_ie) {
ret = mwifiex_update_uap_custom_ie(priv, beacon_ie,
&beacon_idx, pr_ie,
&pr_idx, ar_ie, &ar_idx);
if (ret)
goto done;
}
priv->beacon_idx = beacon_idx;
priv->proberesp_idx = pr_idx;
priv->assocresp_idx = ar_idx;
done:
kfree(beacon_ie);
kfree(pr_ie);
kfree(ar_ie);
return ret;
}
/* This function parses head and tail IEs, from cfg80211_beacon_data and sets
* these IE to FW.
*/
static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
struct cfg80211_beacon_data *info)
{
struct mwifiex_ie *gen_ie;
struct ieee_types_header *hdr;
struct ieee80211_vendor_ie *vendorhdr;
u16 gen_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
int left_len, parsed_len = 0;
unsigned int token_len;
int err = 0;
if (!info->tail || !info->tail_len)
return 0;
gen_ie = kzalloc(sizeof(*gen_ie), GFP_KERNEL);
if (!gen_ie)
return -ENOMEM;
left_len = info->tail_len;
/* Many IEs are generated in FW by parsing bss configuration.
* Let's not add them here; else we may end up duplicating these IEs
*/
while (left_len > sizeof(struct ieee_types_header)) {
hdr = (void *)(info->tail + parsed_len);
token_len = hdr->len + sizeof(struct ieee_types_header);
if (token_len > left_len) {
err = -EINVAL;
goto out;
}
switch (hdr->element_id) {
case WLAN_EID_SSID:
case WLAN_EID_SUPP_RATES:
case WLAN_EID_COUNTRY:
case WLAN_EID_PWR_CONSTRAINT:
case WLAN_EID_ERP_INFO:
case WLAN_EID_EXT_SUPP_RATES:
case WLAN_EID_HT_CAPABILITY:
case WLAN_EID_HT_OPERATION:
case WLAN_EID_VHT_CAPABILITY:
case WLAN_EID_VHT_OPERATION:
break;
case WLAN_EID_VENDOR_SPECIFIC:
/* Skip only Microsoft WMM IE */
if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
(const u8 *)hdr,
token_len))
break;
/* fall through */
default:
if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
err = -EINVAL;
goto out;
}
memcpy(gen_ie->ie_buffer + ie_len, hdr, token_len);
ie_len += token_len;
break;
}
left_len -= token_len;
parsed_len += token_len;
}
/* parse only WPA vendor IE from tail, WMM IE is configured by
* bss_config command
*/
vendorhdr = (void *)cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
info->tail, info->tail_len);
if (vendorhdr) {
token_len = vendorhdr->len + sizeof(struct ieee_types_header);
if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
err = -EINVAL;
goto out;
}
memcpy(gen_ie->ie_buffer + ie_len, vendorhdr, token_len);
ie_len += token_len;
}
if (!ie_len)
goto out;
gen_ie->ie_index = cpu_to_le16(gen_idx);
gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
MGMT_MASK_PROBE_RESP |
MGMT_MASK_ASSOC_RESP);
gen_ie->ie_length = cpu_to_le16(ie_len);
if (mwifiex_update_uap_custom_ie(priv, gen_ie, &gen_idx, NULL, NULL,
NULL, NULL)) {
err = -EINVAL;
goto out;
}
priv->gen_idx = gen_idx;
out:
kfree(gen_ie);
return err;
}
/* This function parses different IEs-head & tail IEs, beacon IEs,
* probe response IEs, association response IEs from cfg80211_ap_settings
* function and sets these IE to FW.
*/
int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
struct cfg80211_beacon_data *info)
{
int ret;
ret = mwifiex_uap_parse_tail_ies(priv, info);
if (ret)
return ret;
return mwifiex_set_mgmt_beacon_data_ies(priv, info);
}
/* This function removes management IE set */
int mwifiex_del_mgmt_ies(struct mwifiex_private *priv)
{
struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL;
struct mwifiex_ie *ar_ie = NULL, *gen_ie = NULL;
int ret = 0;
if (priv->gen_idx != MWIFIEX_AUTO_IDX_MASK) {
gen_ie = kmalloc(sizeof(*gen_ie), GFP_KERNEL);
if (!gen_ie)
return -ENOMEM;
gen_ie->ie_index = cpu_to_le16(priv->gen_idx);
gen_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
gen_ie->ie_length = 0;
if (mwifiex_update_uap_custom_ie(priv, gen_ie, &priv->gen_idx,
NULL, &priv->proberesp_idx,
NULL, &priv->assocresp_idx)) {
ret = -1;
goto done;
}
priv->gen_idx = MWIFIEX_AUTO_IDX_MASK;
}
if (priv->beacon_idx != MWIFIEX_AUTO_IDX_MASK) {
beacon_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
if (!beacon_ie) {
ret = -ENOMEM;
goto done;
}
beacon_ie->ie_index = cpu_to_le16(priv->beacon_idx);
beacon_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
beacon_ie->ie_length = 0;
}
if (priv->proberesp_idx != MWIFIEX_AUTO_IDX_MASK) {
pr_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
if (!pr_ie) {
ret = -ENOMEM;
goto done;
}
pr_ie->ie_index = cpu_to_le16(priv->proberesp_idx);
pr_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
pr_ie->ie_length = 0;
}
if (priv->assocresp_idx != MWIFIEX_AUTO_IDX_MASK) {
ar_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
if (!ar_ie) {
ret = -ENOMEM;
goto done;
}
ar_ie->ie_index = cpu_to_le16(priv->assocresp_idx);
ar_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
ar_ie->ie_length = 0;
}
if (beacon_ie || pr_ie || ar_ie)
ret = mwifiex_update_uap_custom_ie(priv,
beacon_ie, &priv->beacon_idx,
pr_ie, &priv->proberesp_idx,
ar_ie, &priv->assocresp_idx);
done:
kfree(gen_ie);
kfree(beacon_ie);
kfree(pr_ie);
kfree(ar_ie);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_998_0 |
crossvul-cpp_data_good_3871_3 | /*
* The Python Imaging Library.
* $Id: //modules/pil/libImaging/TiffDecode.c#1 $
*
* LibTiff-based Group3 and Group4 decoder
*
*
* started modding to use non-private tiff functions to port to libtiff 4.x
* eds 3/12/12
*
*/
#include "Imaging.h"
#ifdef HAVE_LIBTIFF
#ifndef uint
#define uint uint32
#endif
#include "TiffDecode.h"
void dump_state(const TIFFSTATE *state){
TRACE(("State: Location %u size %d eof %d data: %p ifd: %d\n", (uint)state->loc,
(int)state->size, (uint)state->eof, state->data, state->ifd));
}
/*
procs for TIFFOpenClient
*/
tsize_t _tiffReadProc(thandle_t hdata, tdata_t buf, tsize_t size) {
TIFFSTATE *state = (TIFFSTATE *)hdata;
tsize_t to_read;
TRACE(("_tiffReadProc: %d \n", (int)size));
dump_state(state);
to_read = min(size, min(state->size, (tsize_t)state->eof) - (tsize_t)state->loc);
TRACE(("to_read: %d\n", (int)to_read));
_TIFFmemcpy(buf, (UINT8 *)state->data + state->loc, to_read);
state->loc += (toff_t)to_read;
TRACE( ("location: %u\n", (uint)state->loc));
return to_read;
}
tsize_t _tiffWriteProc(thandle_t hdata, tdata_t buf, tsize_t size) {
TIFFSTATE *state = (TIFFSTATE *)hdata;
tsize_t to_write;
TRACE(("_tiffWriteProc: %d \n", (int)size));
dump_state(state);
to_write = min(size, state->size - (tsize_t)state->loc);
if (state->flrealloc && size>to_write) {
tdata_t new_data;
tsize_t newsize=state->size;
while (newsize < (size + state->size)) {
if (newsize > INT_MAX - 64*1024){
return 0;
}
newsize += 64*1024;
// newsize*=2; // UNDONE, by 64k chunks?
}
TRACE(("Reallocing in write to %d bytes\n", (int)newsize));
/* malloc check ok, overflow checked above */
new_data = realloc(state->data, newsize);
if (!new_data) {
// fail out
return 0;
}
state->data = new_data;
state->size = newsize;
to_write = size;
}
TRACE(("to_write: %d\n", (int)to_write));
_TIFFmemcpy((UINT8 *)state->data + state->loc, buf, to_write);
state->loc += (toff_t)to_write;
state->eof = max(state->loc, state->eof);
dump_state(state);
return to_write;
}
toff_t _tiffSeekProc(thandle_t hdata, toff_t off, int whence) {
TIFFSTATE *state = (TIFFSTATE *)hdata;
TRACE(("_tiffSeekProc: off: %u whence: %d \n", (uint)off, whence));
dump_state(state);
switch (whence) {
case 0:
state->loc = off;
break;
case 1:
state->loc += off;
break;
case 2:
state->loc = state->eof + off;
break;
}
dump_state(state);
return state->loc;
}
int _tiffCloseProc(thandle_t hdata) {
TIFFSTATE *state = (TIFFSTATE *)hdata;
TRACE(("_tiffCloseProc \n"));
dump_state(state);
return 0;
}
toff_t _tiffSizeProc(thandle_t hdata) {
TIFFSTATE *state = (TIFFSTATE *)hdata;
TRACE(("_tiffSizeProc \n"));
dump_state(state);
return (toff_t)state->size;
}
int _tiffMapProc(thandle_t hdata, tdata_t* pbase, toff_t* psize) {
TIFFSTATE *state = (TIFFSTATE *)hdata;
TRACE(("_tiffMapProc input size: %u, data: %p\n", (uint)*psize, *pbase));
dump_state(state);
*pbase = state->data;
*psize = state->size;
TRACE(("_tiffMapProc returning size: %u, data: %p\n", (uint)*psize, *pbase));
return (1);
}
int _tiffNullMapProc(thandle_t hdata, tdata_t* pbase, toff_t* psize) {
(void) hdata; (void) pbase; (void) psize;
return (0);
}
void _tiffUnmapProc(thandle_t hdata, tdata_t base, toff_t size) {
TRACE(("_tiffUnMapProc\n"));
(void) hdata; (void) base; (void) size;
}
int ImagingLibTiffInit(ImagingCodecState state, int fp, uint32 offset) {
TIFFSTATE *clientstate = (TIFFSTATE *)state->context;
TRACE(("initing libtiff\n"));
TRACE(("filepointer: %d \n", fp));
TRACE(("State: count %d, state %d, x %d, y %d, ystep %d\n", state->count, state->state,
state->x, state->y, state->ystep));
TRACE(("State: xsize %d, ysize %d, xoff %d, yoff %d \n", state->xsize, state->ysize,
state->xoff, state->yoff));
TRACE(("State: bits %d, bytes %d \n", state->bits, state->bytes));
TRACE(("State: context %p \n", state->context));
clientstate->loc = 0;
clientstate->size = 0;
clientstate->data = 0;
clientstate->fp = fp;
clientstate->ifd = offset;
clientstate->eof = 0;
return 1;
}
int ReadTile(TIFF* tiff, UINT32 col, UINT32 row, UINT32* buffer) {
uint16 photometric = 0;
TIFFGetField(tiff, TIFFTAG_PHOTOMETRIC, &photometric);
// To avoid dealing with YCbCr subsampling, let libtiff handle it
if (photometric == PHOTOMETRIC_YCBCR) {
UINT32 tile_width, tile_height, swap_line_size, i_row;
UINT32* swap_line;
TIFFGetField(tiff, TIFFTAG_TILEWIDTH, &tile_width);
TIFFGetField(tiff, TIFFTAG_TILELENGTH, &tile_height);
swap_line_size = tile_width * sizeof(UINT32);
if (tile_width != swap_line_size / sizeof(UINT32)) {
return -1;
}
/* Read the tile into an RGBA array */
if (!TIFFReadRGBATile(tiff, col, row, buffer)) {
return -1;
}
swap_line = (UINT32*)malloc(swap_line_size);
if (swap_line == NULL) {
return -1;
}
/*
* For some reason the TIFFReadRGBATile() function chooses the
* lower left corner as the origin. Vertically mirror scanlines.
*/
for(i_row = 0; i_row < tile_height / 2; i_row++) {
UINT32 *top_line, *bottom_line;
top_line = buffer + tile_width * i_row;
bottom_line = buffer + tile_width * (tile_height - i_row - 1);
memcpy(swap_line, top_line, 4*tile_width);
memcpy(top_line, bottom_line, 4*tile_width);
memcpy(bottom_line, swap_line, 4*tile_width);
}
free(swap_line);
return 0;
}
if (TIFFReadTile(tiff, (tdata_t)buffer, col, row, 0, 0) == -1) {
TRACE(("Decode Error, Tile at %dx%d\n", col, row));
return -1;
}
TRACE(("Successfully read tile at %dx%d; \n\n", col, row));
return 0;
}
int ReadStrip(TIFF* tiff, UINT32 row, UINT32* buffer) {
uint16 photometric = 0; // init to not PHOTOMETRIC_YCBCR
TIFFGetField(tiff, TIFFTAG_PHOTOMETRIC, &photometric);
// To avoid dealing with YCbCr subsampling, let libtiff handle it
if (photometric == PHOTOMETRIC_YCBCR) {
TIFFRGBAImage img;
char emsg[1024] = "";
UINT32 rows_per_strip, rows_to_read;
int ok;
TIFFGetFieldDefaulted(tiff, TIFFTAG_ROWSPERSTRIP, &rows_per_strip);
if ((row % rows_per_strip) != 0) {
TRACE(("Row passed to ReadStrip() must be first in a strip."));
return -1;
}
if (TIFFRGBAImageOK(tiff, emsg) && TIFFRGBAImageBegin(&img, tiff, 0, emsg)) {
TRACE(("Initialized RGBAImage\n"));
img.req_orientation = ORIENTATION_TOPLEFT;
img.row_offset = row;
img.col_offset = 0;
rows_to_read = min(rows_per_strip, img.height - row);
TRACE(("rows to read: %d\n", rows_to_read));
ok = TIFFRGBAImageGet(&img, buffer, img.width, rows_to_read);
TIFFRGBAImageEnd(&img);
} else {
ok = 0;
}
if (ok == 0) {
TRACE(("Decode Error, row %d; msg: %s\n", row, emsg));
return -1;
}
return 0;
}
if (TIFFReadEncodedStrip(tiff, TIFFComputeStrip(tiff, row, 0), (tdata_t)buffer, -1) == -1) {
TRACE(("Decode Error, strip %d\n", TIFFComputeStrip(tiff, row, 0)));
return -1;
}
return 0;
}
int ImagingLibTiffDecode(Imaging im, ImagingCodecState state, UINT8* buffer, Py_ssize_t bytes) {
TIFFSTATE *clientstate = (TIFFSTATE *)state->context;
char *filename = "tempfile.tif";
char *mode = "r";
TIFF *tiff;
/* buffer is the encoded file, bytes is the length of the encoded file */
/* it all ends up in state->buffer, which is a uint8* from Imaging.h */
TRACE(("in decoder: bytes %d\n", bytes));
TRACE(("State: count %d, state %d, x %d, y %d, ystep %d\n", state->count, state->state,
state->x, state->y, state->ystep));
TRACE(("State: xsize %d, ysize %d, xoff %d, yoff %d \n", state->xsize, state->ysize,
state->xoff, state->yoff));
TRACE(("State: bits %d, bytes %d \n", state->bits, state->bytes));
TRACE(("Buffer: %p: %c%c%c%c\n", buffer, (char)buffer[0], (char)buffer[1],(char)buffer[2], (char)buffer[3]));
TRACE(("State->Buffer: %c%c%c%c\n", (char)state->buffer[0], (char)state->buffer[1],(char)state->buffer[2], (char)state->buffer[3]));
TRACE(("Image: mode %s, type %d, bands: %d, xsize %d, ysize %d \n",
im->mode, im->type, im->bands, im->xsize, im->ysize));
TRACE(("Image: image8 %p, image32 %p, image %p, block %p \n",
im->image8, im->image32, im->image, im->block));
TRACE(("Image: pixelsize: %d, linesize %d \n",
im->pixelsize, im->linesize));
dump_state(clientstate);
clientstate->size = bytes;
clientstate->eof = clientstate->size;
clientstate->loc = 0;
clientstate->data = (tdata_t)buffer;
clientstate->flrealloc = 0;
dump_state(clientstate);
TIFFSetWarningHandler(NULL);
TIFFSetWarningHandlerExt(NULL);
if (clientstate->fp) {
TRACE(("Opening using fd: %d\n",clientstate->fp));
lseek(clientstate->fp,0,SEEK_SET); // Sometimes, I get it set to the end.
tiff = TIFFFdOpen(clientstate->fp, filename, mode);
} else {
TRACE(("Opening from string\n"));
tiff = TIFFClientOpen(filename, mode,
(thandle_t) clientstate,
_tiffReadProc, _tiffWriteProc,
_tiffSeekProc, _tiffCloseProc, _tiffSizeProc,
_tiffMapProc, _tiffUnmapProc);
}
if (!tiff){
TRACE(("Error, didn't get the tiff\n"));
state->errcode = IMAGING_CODEC_BROKEN;
return -1;
}
if (clientstate->ifd){
int rv;
uint32 ifdoffset = clientstate->ifd;
TRACE(("reading tiff ifd %u\n", ifdoffset));
rv = TIFFSetSubDirectory(tiff, ifdoffset);
if (!rv){
TRACE(("error in TIFFSetSubDirectory"));
return -1;
}
}
if (TIFFIsTiled(tiff)) {
UINT32 x, y, tile_y, row_byte_size;
UINT32 tile_width, tile_length, current_tile_width;
UINT8 *new_data;
TIFFGetField(tiff, TIFFTAG_TILEWIDTH, &tile_width);
TIFFGetField(tiff, TIFFTAG_TILELENGTH, &tile_length);
// We could use TIFFTileSize, but for YCbCr data it returns subsampled data size
row_byte_size = (tile_width * state->bits + 7) / 8;
/* overflow check for realloc */
if (INT_MAX / row_byte_size < tile_length) {
state->errcode = IMAGING_CODEC_MEMORY;
TIFFClose(tiff);
return -1;
}
state->bytes = row_byte_size * tile_length;
if (TIFFTileSize(tiff) > state->bytes) {
// If the strip size as expected by LibTiff isn't what we're expecting, abort.
state->errcode = IMAGING_CODEC_MEMORY;
TIFFClose(tiff);
return -1;
}
/* realloc to fit whole tile */
/* malloc check above */
new_data = realloc (state->buffer, state->bytes);
if (!new_data) {
state->errcode = IMAGING_CODEC_MEMORY;
TIFFClose(tiff);
return -1;
}
state->buffer = new_data;
TRACE(("TIFFTileSize: %d\n", state->bytes));
for (y = state->yoff; y < state->ysize; y += tile_length) {
for (x = state->xoff; x < state->xsize; x += tile_width) {
if (ReadTile(tiff, x, y, (UINT32*) state->buffer) == -1) {
TRACE(("Decode Error, Tile at %dx%d\n", x, y));
state->errcode = IMAGING_CODEC_BROKEN;
TIFFClose(tiff);
return -1;
}
TRACE(("Read tile at %dx%d; \n\n", x, y));
current_tile_width = min(tile_width, state->xsize - x);
// iterate over each line in the tile and stuff data into image
for (tile_y = 0; tile_y < min(tile_length, state->ysize - y); tile_y++) {
TRACE(("Writing tile data at %dx%d using tile_width: %d; \n", tile_y + y, x, current_tile_width));
// UINT8 * bbb = state->buffer + tile_y * row_byte_size;
// TRACE(("chars: %x%x%x%x\n", ((UINT8 *)bbb)[0], ((UINT8 *)bbb)[1], ((UINT8 *)bbb)[2], ((UINT8 *)bbb)[3]));
state->shuffle((UINT8*) im->image[tile_y + y] + x * im->pixelsize,
state->buffer + tile_y * row_byte_size,
current_tile_width
);
}
}
}
} else {
UINT32 strip_row, row_byte_size;
UINT8 *new_data;
UINT32 rows_per_strip;
int ret;
ret = TIFFGetField(tiff, TIFFTAG_ROWSPERSTRIP, &rows_per_strip);
if (ret != 1) {
rows_per_strip = state->ysize;
}
TRACE(("RowsPerStrip: %u \n", rows_per_strip));
// We could use TIFFStripSize, but for YCbCr data it returns subsampled data size
row_byte_size = (state->xsize * state->bits + 7) / 8;
/* overflow check for realloc */
if (INT_MAX / row_byte_size < rows_per_strip) {
state->errcode = IMAGING_CODEC_MEMORY;
TIFFClose(tiff);
return -1;
}
state->bytes = rows_per_strip * row_byte_size;
TRACE(("StripSize: %d \n", state->bytes));
if (TIFFStripSize(tiff) > state->bytes) {
// If the strip size as expected by LibTiff isn't what we're expecting, abort.
// man: TIFFStripSize returns the equivalent size for a strip of data as it would be returned in a
// call to TIFFReadEncodedStrip ...
state->errcode = IMAGING_CODEC_MEMORY;
TIFFClose(tiff);
return -1;
}
/* realloc to fit whole strip */
/* malloc check above */
new_data = realloc (state->buffer, state->bytes);
if (!new_data) {
state->errcode = IMAGING_CODEC_MEMORY;
TIFFClose(tiff);
return -1;
}
state->buffer = new_data;
for (; state->y < state->ysize; state->y += rows_per_strip) {
if (ReadStrip(tiff, state->y, (UINT32 *)state->buffer) == -1) {
TRACE(("Decode Error, strip %d\n", TIFFComputeStrip(tiff, state->y, 0)));
state->errcode = IMAGING_CODEC_BROKEN;
TIFFClose(tiff);
return -1;
}
TRACE(("Decoded strip for row %d \n", state->y));
// iterate over each row in the strip and stuff data into image
for (strip_row = 0; strip_row < min(rows_per_strip, state->ysize - state->y); strip_row++) {
TRACE(("Writing data into line %d ; \n", state->y + strip_row));
// UINT8 * bbb = state->buffer + strip_row * (state->bytes / rows_per_strip);
// TRACE(("chars: %x %x %x %x\n", ((UINT8 *)bbb)[0], ((UINT8 *)bbb)[1], ((UINT8 *)bbb)[2], ((UINT8 *)bbb)[3]));
state->shuffle((UINT8*) im->image[state->y + state->yoff + strip_row] +
state->xoff * im->pixelsize,
state->buffer + strip_row * row_byte_size,
state->xsize);
}
}
}
TIFFClose(tiff);
TRACE(("Done Decoding, Returning \n"));
// Returning -1 here to force ImageFile.load to break, rather than
// even think about looping back around.
return -1;
}
int ImagingLibTiffEncodeInit(ImagingCodecState state, char *filename, int fp) {
// Open the FD or the pointer as a tiff file, for writing.
// We may have to do some monkeying around to make this really work.
// If we have a fp, then we're good.
// If we have a memory string, we're probably going to have to malloc, then
// shuffle bytes into the writescanline process.
// Going to have to deal with the directory as well.
TIFFSTATE *clientstate = (TIFFSTATE *)state->context;
int bufsize = 64*1024;
char *mode = "w";
TRACE(("initing libtiff\n"));
TRACE(("Filename %s, filepointer: %d \n", filename, fp));
TRACE(("State: count %d, state %d, x %d, y %d, ystep %d\n", state->count, state->state,
state->x, state->y, state->ystep));
TRACE(("State: xsize %d, ysize %d, xoff %d, yoff %d \n", state->xsize, state->ysize,
state->xoff, state->yoff));
TRACE(("State: bits %d, bytes %d \n", state->bits, state->bytes));
TRACE(("State: context %p \n", state->context));
clientstate->loc = 0;
clientstate->size = 0;
clientstate->eof =0;
clientstate->data = 0;
clientstate->flrealloc = 0;
clientstate->fp = fp;
state->state = 0;
if (fp) {
TRACE(("Opening using fd: %d for writing \n",clientstate->fp));
clientstate->tiff = TIFFFdOpen(clientstate->fp, filename, mode);
} else {
// malloc a buffer to write the tif, we're going to need to realloc or something if we need bigger.
TRACE(("Opening a buffer for writing \n"));
/* malloc check ok, small constant allocation */
clientstate->data = malloc(bufsize);
clientstate->size = bufsize;
clientstate->flrealloc=1;
if (!clientstate->data) {
TRACE(("Error, couldn't allocate a buffer of size %d\n", bufsize));
return 0;
}
clientstate->tiff = TIFFClientOpen(filename, mode,
(thandle_t) clientstate,
_tiffReadProc, _tiffWriteProc,
_tiffSeekProc, _tiffCloseProc, _tiffSizeProc,
_tiffNullMapProc, _tiffUnmapProc); /*force no mmap*/
}
if (!clientstate->tiff) {
TRACE(("Error, couldn't open tiff file\n"));
return 0;
}
return 1;
}
int ImagingLibTiffMergeFieldInfo(ImagingCodecState state, TIFFDataType field_type, int key, int is_var_length){
// Refer to libtiff docs (http://www.simplesystems.org/libtiff/addingtags.html)
TIFFSTATE *clientstate = (TIFFSTATE *)state->context;
char field_name[10];
uint32 n;
int status = 0;
// custom fields added with ImagingLibTiffMergeFieldInfo are only used for
// decoding, ignore readcount;
int readcount = 0;
// we support writing a single value, or a variable number of values
int writecount = 1;
// whether the first value should encode the number of values.
int passcount = 0;
TIFFFieldInfo info[] = {
{ key, readcount, writecount, field_type, FIELD_CUSTOM, 1, passcount, field_name }
};
if (is_var_length) {
info[0].field_writecount = -1;
}
if (is_var_length && field_type != TIFF_ASCII) {
info[0].field_passcount = 1;
}
n = sizeof(info) / sizeof(info[0]);
// Test for libtiff 4.0 or later, excluding libtiff 3.9.6 and 3.9.7
#if TIFFLIB_VERSION >= 20111221 && TIFFLIB_VERSION != 20120218 && TIFFLIB_VERSION != 20120922
status = TIFFMergeFieldInfo(clientstate->tiff, info, n);
#else
TIFFMergeFieldInfo(clientstate->tiff, info, n);
#endif
return status;
}
int ImagingLibTiffSetField(ImagingCodecState state, ttag_t tag, ...){
// after tif_dir.c->TIFFSetField.
TIFFSTATE *clientstate = (TIFFSTATE *)state->context;
va_list ap;
int status;
va_start(ap, tag);
status = TIFFVSetField(clientstate->tiff, tag, ap);
va_end(ap);
return status;
}
int ImagingLibTiffEncode(Imaging im, ImagingCodecState state, UINT8* buffer, int bytes) {
/* One shot encoder. Encode everything to the tiff in the clientstate.
If we're running off of a FD, then run once, we're good, everything
ends up in the file, we close and we're done.
If we're going to memory, then we need to write the whole file into memory, then
parcel it back out to the pystring buffer bytes at a time.
*/
TIFFSTATE *clientstate = (TIFFSTATE *)state->context;
TIFF *tiff = clientstate->tiff;
TRACE(("in encoder: bytes %d\n", bytes));
TRACE(("State: count %d, state %d, x %d, y %d, ystep %d\n", state->count, state->state,
state->x, state->y, state->ystep));
TRACE(("State: xsize %d, ysize %d, xoff %d, yoff %d \n", state->xsize, state->ysize,
state->xoff, state->yoff));
TRACE(("State: bits %d, bytes %d \n", state->bits, state->bytes));
TRACE(("Buffer: %p: %c%c%c%c\n", buffer, (char)buffer[0], (char)buffer[1],(char)buffer[2], (char)buffer[3]));
TRACE(("State->Buffer: %c%c%c%c\n", (char)state->buffer[0], (char)state->buffer[1],(char)state->buffer[2], (char)state->buffer[3]));
TRACE(("Image: mode %s, type %d, bands: %d, xsize %d, ysize %d \n",
im->mode, im->type, im->bands, im->xsize, im->ysize));
TRACE(("Image: image8 %p, image32 %p, image %p, block %p \n",
im->image8, im->image32, im->image, im->block));
TRACE(("Image: pixelsize: %d, linesize %d \n",
im->pixelsize, im->linesize));
dump_state(clientstate);
if (state->state == 0) {
TRACE(("Encoding line bt line"));
while(state->y < state->ysize){
state->shuffle(state->buffer,
(UINT8*) im->image[state->y + state->yoff] +
state->xoff * im->pixelsize,
state->xsize);
if (TIFFWriteScanline(tiff, (tdata_t)(state->buffer), (uint32)state->y, 0) == -1) {
TRACE(("Encode Error, row %d\n", state->y));
state->errcode = IMAGING_CODEC_BROKEN;
TIFFClose(tiff);
if (!clientstate->fp){
free(clientstate->data);
}
return -1;
}
state->y++;
}
if (state->y == state->ysize) {
state->state=1;
TRACE(("Flushing \n"));
if (!TIFFFlush(tiff)) {
TRACE(("Error flushing the tiff"));
// likely reason is memory.
state->errcode = IMAGING_CODEC_MEMORY;
TIFFClose(tiff);
if (!clientstate->fp){
free(clientstate->data);
}
return -1;
}
TRACE(("Closing \n"));
TIFFClose(tiff);
// reset the clientstate metadata to use it to read out the buffer.
clientstate->loc = 0;
clientstate->size = clientstate->eof; // redundant?
}
}
if (state->state == 1 && !clientstate->fp) {
int read = (int)_tiffReadProc(clientstate, (tdata_t)buffer, (tsize_t)bytes);
TRACE(("Buffer: %p: %c%c%c%c\n", buffer, (char)buffer[0], (char)buffer[1],(char)buffer[2], (char)buffer[3]));
if (clientstate->loc == clientstate->eof) {
TRACE(("Hit EOF, calling an end, freeing data"));
state->errcode = IMAGING_CODEC_END;
free(clientstate->data);
}
return read;
}
state->errcode = IMAGING_CODEC_END;
return 0;
}
const char*
ImagingTiffVersion(void)
{
return TIFFGetVersion();
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_3871_3 |
crossvul-cpp_data_good_3862_1 | /*
* Copyright (c) 2016 Intel Corporation.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <tc_util.h>
#include <mqtt_internal.h>
#include <sys/util.h> /* for ARRAY_SIZE */
#include <ztest.h>
#define CLIENTID MQTT_UTF8_LITERAL("zephyr")
#define TOPIC MQTT_UTF8_LITERAL("sensors")
#define WILL_TOPIC MQTT_UTF8_LITERAL("quitting")
#define WILL_MSG MQTT_UTF8_LITERAL("bye")
#define USERNAME MQTT_UTF8_LITERAL("zephyr1")
#define PASSWORD MQTT_UTF8_LITERAL("password")
#define BUFFER_SIZE 128
static ZTEST_DMEM u8_t rx_buffer[BUFFER_SIZE];
static ZTEST_DMEM u8_t tx_buffer[BUFFER_SIZE];
static ZTEST_DMEM struct mqtt_client client;
static ZTEST_DMEM struct mqtt_topic topic_qos_0 = {
.qos = 0,
.topic = TOPIC,
};
static ZTEST_DMEM struct mqtt_topic topic_qos_1 = {
.qos = 1,
.topic = TOPIC,
};
static ZTEST_DMEM struct mqtt_topic topic_qos_2 = {
.qos = 2,
.topic = TOPIC,
};
static ZTEST_DMEM struct mqtt_topic will_topic_qos_0 = {
.qos = 0,
.topic = WILL_TOPIC,
};
static ZTEST_DMEM struct mqtt_topic will_topic_qos_1 = {
.qos = 1,
.topic = WILL_TOPIC,
};
static ZTEST_DMEM struct mqtt_utf8 will_msg = WILL_MSG;
static ZTEST_DMEM struct mqtt_utf8 username = USERNAME;
static ZTEST_DMEM struct mqtt_utf8 password = PASSWORD;
/**
* @brief MQTT test structure
*/
struct mqtt_test {
/* test name, for example: "test connect 1" */
const char *test_name;
/* cast to something like:
* struct mqtt_publish_param *msg_publish =
* (struct mqtt_publish_param *)ctx
*/
void *ctx;
/* pointer to the eval routine, for example:
* eval_fcn = eval_msg_connect
*/
int (*eval_fcn)(struct mqtt_test *);
/* expected result */
u8_t *expected;
/* length of 'expected' */
u16_t expected_len;
};
/**
* @brief eval_msg_connect Evaluate the given mqtt_test against the
* connect packing/unpacking routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_connect(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_publish Evaluate the given mqtt_test against the
* publish packing/unpacking routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_publish(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_corrupted_publish Evaluate the given mqtt_test against the
* corrupted publish message.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_corrupted_publish(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_subscribe Evaluate the given mqtt_test against the
* subscribe packing/unpacking routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_subscribe(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_suback Evaluate the given mqtt_test against the
* suback packing/unpacking routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_suback(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_pingreq Evaluate the given mqtt_test against the
* pingreq packing/unpacking routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_pingreq(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_puback Evaluate the given mqtt_test against the
* puback routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_puback(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_puback Evaluate the given mqtt_test against the
* pubcomp routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_pubcomp(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_pubrec Evaluate the given mqtt_test against the
* pubrec routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_pubrec(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_pubrel Evaluate the given mqtt_test against the
* pubrel routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_pubrel(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_unsuback Evaluate the given mqtt_test against the
* unsuback routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_unsuback(struct mqtt_test *mqtt_test);
/**
* @brief eval_msg_disconnect Evaluate the given mqtt_test against the
* disconnect routines.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_msg_disconnect(struct mqtt_test *mqtt_test);
/**
* @brief eval_max_pkt_len Evaluate header with maximum allowed packet
* length.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_max_pkt_len(struct mqtt_test *mqtt_test);
/**
* @brief eval_corrupted_pkt_len Evaluate header exceeding maximum
* allowed packet length.
* @param [in] mqtt_test MQTT test structure
* @return TC_PASS on success
* @return TC_FAIL on error
*/
static int eval_corrupted_pkt_len(struct mqtt_test *mqtt_test);
/**
* @brief eval_buffers Evaluate if two given buffers are equal
* @param [in] buf Input buffer 1, mostly used as the 'computed'
* buffer
* @param [in] expected Expected buffer
* @param [in] len 'expected' len
* @return TC_PASS on success
* @return TC_FAIL on error and prints both buffers
*/
static int eval_buffers(const struct buf_ctx *buf,
const u8_t *expected, u16_t len);
/**
* @brief print_array Prints the array 'a' of 'size' elements
* @param a The array
* @param size Array's size
*/
static void print_array(const u8_t *a, u16_t size);
/*
* MQTT CONNECT msg:
* Clean session: 1 Client id: [6] 'zephyr' Will flag: 0
* Will QoS: 0 Will retain: 0 Will topic: [0]
* Will msg: [0] Keep alive: 60 User name: [0]
* Password: [0]
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -k 60 -t sensors
*/
static ZTEST_DMEM
u8_t connect1[] = {0x10, 0x12, 0x00, 0x04, 0x4d, 0x51, 0x54, 0x54,
0x04, 0x02, 0x00, 0x3c, 0x00, 0x06, 0x7a, 0x65,
0x70, 0x68, 0x79, 0x72};
static ZTEST_DMEM struct mqtt_client client_connect1 = {
.clean_session = 1, .client_id = CLIENTID,
.will_retain = 0, .will_topic = NULL,
.will_message = NULL, .user_name = NULL,
.password = NULL
};
/*
* MQTT CONNECT msg:
* Clean session: 1 Client id: [6] 'zephyr' Will flag: 1
* Will QoS: 0 Will retain: 0 Will topic: [8] quitting
* Will msg: [3] bye Keep alive: 0
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -k 60 -t sensors --will-topic quitting \
* --will-qos 0 --will-payload bye
*/
static ZTEST_DMEM
u8_t connect2[] = {0x10, 0x21, 0x00, 0x04, 0x4d, 0x51, 0x54, 0x54,
0x04, 0x06, 0x00, 0x3c, 0x00, 0x06, 0x7a, 0x65,
0x70, 0x68, 0x79, 0x72, 0x00, 0x08, 0x71, 0x75,
0x69, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x00, 0x03,
0x62, 0x79, 0x65};
static ZTEST_DMEM struct mqtt_client client_connect2 = {
.clean_session = 1, .client_id = CLIENTID,
.will_retain = 0, .will_topic = &will_topic_qos_0,
.will_message = &will_msg, .user_name = NULL,
.password = NULL
};
/*
* MQTT CONNECT msg:
* Same message as connect3, but set Will retain: 1
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -k 60 -t sensors --will-topic quitting \
* --will-qos 0 --will-payload bye --will-retain
*/
static ZTEST_DMEM
u8_t connect3[] = {0x10, 0x21, 0x00, 0x04, 0x4d, 0x51, 0x54, 0x54,
0x04, 0x26, 0x00, 0x3c, 0x00, 0x06, 0x7a, 0x65,
0x70, 0x68, 0x79, 0x72, 0x00, 0x08, 0x71, 0x75,
0x69, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x00, 0x03,
0x62, 0x79, 0x65};
static ZTEST_DMEM struct mqtt_client client_connect3 = {
.clean_session = 1, .client_id = CLIENTID,
.will_retain = 1, .will_topic = &will_topic_qos_0,
.will_message = &will_msg, .user_name = NULL,
.password = NULL
};
/*
* MQTT CONNECT msg:
* Same message as connect3, but set Will QoS: 1
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -k 60 -t sensors --will-topic quitting \
* --will-qos 1 --will-payload bye
*/
static ZTEST_DMEM
u8_t connect4[] = {0x10, 0x21, 0x00, 0x04, 0x4d, 0x51, 0x54, 0x54,
0x04, 0x0e, 0x00, 0x3c, 0x00, 0x06, 0x7a, 0x65,
0x70, 0x68, 0x79, 0x72, 0x00, 0x08, 0x71, 0x75,
0x69, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x00, 0x03,
0x62, 0x79, 0x65};
static ZTEST_DMEM struct mqtt_client client_connect4 = {
.clean_session = 1, .client_id = CLIENTID,
.will_retain = 0, .will_topic = &will_topic_qos_1,
.will_message = &will_msg, .user_name = NULL,
.password = NULL
};
/*
* MQTT CONNECT msg:
* Same message as connect5, but set Will retain: 1
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -k 60 -t sensors --will-topic quitting \
* --will-qos 1 --will-payload bye --will-retain
*/
static ZTEST_DMEM
u8_t connect5[] = {0x10, 0x21, 0x00, 0x04, 0x4d, 0x51, 0x54, 0x54,
0x04, 0x2e, 0x00, 0x3c, 0x00, 0x06, 0x7a, 0x65,
0x70, 0x68, 0x79, 0x72, 0x00, 0x08, 0x71, 0x75,
0x69, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x00, 0x03,
0x62, 0x79, 0x65};
static ZTEST_DMEM struct mqtt_client client_connect5 = {
.clean_session = 1, .client_id = CLIENTID,
.will_retain = 1, .will_topic = &will_topic_qos_1,
.will_message = &will_msg, .user_name = NULL,
.password = NULL
};
/*
* MQTT CONNECT msg:
* Same message as connect6, but set username: zephyr1 and password: password
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -k 60 -t sensors --will-topic quitting \
* --will-qos 1 --will-payload bye --will-retain -u zephyr1 -P password
*/
static ZTEST_DMEM
u8_t connect6[] = {0x10, 0x34, 0x00, 0x04, 0x4d, 0x51, 0x54, 0x54,
0x04, 0xee, 0x00, 0x3c, 0x00, 0x06, 0x7a, 0x65,
0x70, 0x68, 0x79, 0x72, 0x00, 0x08, 0x71, 0x75,
0x69, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x00, 0x03,
0x62, 0x79, 0x65, 0x00, 0x07, 0x7a, 0x65, 0x70,
0x68, 0x79, 0x72, 0x31, 0x00, 0x08, 0x70, 0x61,
0x73, 0x73, 0x77, 0x6f, 0x72, 0x64};
static ZTEST_DMEM struct mqtt_client client_connect6 = {
.clean_session = 1, .client_id = CLIENTID,
.will_retain = 1, .will_topic = &will_topic_qos_1,
.will_message = &will_msg, .user_name = &username,
.password = &password
};
static ZTEST_DMEM
u8_t disconnect1[] = {0xe0, 0x00};
/*
* MQTT PUBLISH msg:
* DUP: 0, QoS: 0, Retain: 0, topic: sensors, message: OK
*
* Message can be generated by the following command:
* mosquitto_pub -V mqttv311 -i zephyr -t sensors -q 0 -m "OK"
*/
static ZTEST_DMEM
u8_t publish1[] = {0x30, 0x0b, 0x00, 0x07, 0x73, 0x65, 0x6e, 0x73,
0x6f, 0x72, 0x73, 0x4f, 0x4b};
static ZTEST_DMEM struct mqtt_publish_param msg_publish1 = {
.dup_flag = 0, .retain_flag = 0, .message_id = 0,
.message.topic.qos = 0,
.message.topic.topic = TOPIC,
.message.payload.data = (u8_t *)"OK",
.message.payload.len = 2,
};
/*
* MQTT PUBLISH msg:
* DUP: 0, QoS: 0, Retain: 1, topic: sensors, message: OK
*
* Message can be generated by the following command:
* mosquitto_pub -V mqttv311 -i zephyr -t sensors -q 0 -m "OK" -r
*/
static ZTEST_DMEM
u8_t publish2[] = {0x31, 0x0b, 0x00, 0x07, 0x73, 0x65, 0x6e, 0x73,
0x6f, 0x72, 0x73, 0x4f, 0x4b};
static ZTEST_DMEM struct mqtt_publish_param msg_publish2 = {
.dup_flag = 0, .retain_flag = 1, .message_id = 0,
.message.topic.qos = 0,
.message.topic.topic = TOPIC,
.message.payload.data = (u8_t *)"OK",
.message.payload.len = 2,
};
/*
* MQTT PUBLISH msg:
* DUP: 0, QoS: 1, Retain: 1, topic: sensors, message: OK, pkt_id: 1
*
* Message can be generated by the following command:
* mosquitto_pub -V mqttv311 -i zephyr -t sensors -q 1 -m "OK" -r
*/
static ZTEST_DMEM
u8_t publish3[] = {0x33, 0x0d, 0x00, 0x07, 0x73, 0x65, 0x6e, 0x73,
0x6f, 0x72, 0x73, 0x00, 0x01, 0x4f, 0x4b};
static ZTEST_DMEM struct mqtt_publish_param msg_publish3 = {
.dup_flag = 0, .retain_flag = 1, .message_id = 1,
.message.topic.qos = 1,
.message.topic.topic = TOPIC,
.message.payload.data = (u8_t *)"OK",
.message.payload.len = 2,
};
/*
* MQTT PUBLISH msg:
* DUP: 0, QoS: 2, Retain: 0, topic: sensors, message: OK, pkt_id: 1
*
* Message can be generated by the following command:
* mosquitto_pub -V mqttv311 -i zephyr -t sensors -q 2 -m "OK"
*/
static ZTEST_DMEM
u8_t publish4[] = {0x34, 0x0d, 0x00, 0x07, 0x73, 0x65, 0x6e, 0x73,
0x6f, 0x72, 0x73, 0x00, 0x01, 0x4f, 0x4b};
static ZTEST_DMEM struct mqtt_publish_param msg_publish4 = {
.dup_flag = 0, .retain_flag = 0, .message_id = 1,
.message.topic.qos = 2,
.message.topic.topic = TOPIC,
.message.payload.data = (u8_t *)"OK",
.message.payload.len = 2,
};
static ZTEST_DMEM
u8_t publish_corrupted[] = {0x30, 0x07, 0x00, 0x07, 0x73, 0x65, 0x6e, 0x73,
0x6f, 0x72, 0x73, 0x00, 0x01, 0x4f, 0x4b};
static ZTEST_DMEM struct buf_ctx publish_corrupted_buf = {
.cur = publish_corrupted,
.end = publish_corrupted + sizeof(publish_corrupted)
};
/*
* MQTT SUBSCRIBE msg:
* pkt_id: 1, topic: sensors, qos: 0
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -t sensors -q 0
*/
static ZTEST_DMEM
u8_t subscribe1[] = {0x82, 0x0c, 0x00, 0x01, 0x00, 0x07, 0x73, 0x65,
0x6e, 0x73, 0x6f, 0x72, 0x73, 0x00};
static ZTEST_DMEM struct mqtt_subscription_list msg_subscribe1 = {
.message_id = 1, .list_count = 1, .list = &topic_qos_0
};
/*
* MQTT SUBSCRIBE msg:
* pkt_id: 1, topic: sensors, qos: 1
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -t sensors -q 1
*/
static ZTEST_DMEM
u8_t subscribe2[] = {0x82, 0x0c, 0x00, 0x01, 0x00, 0x07, 0x73, 0x65,
0x6e, 0x73, 0x6f, 0x72, 0x73, 0x01};
static ZTEST_DMEM struct mqtt_subscription_list msg_subscribe2 = {
.message_id = 1, .list_count = 1, .list = &topic_qos_1
};
/*
* MQTT SUBSCRIBE msg:
* pkt_id: 1, topic: sensors, qos: 2
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -t sensors -q 2
*/
static ZTEST_DMEM
u8_t subscribe3[] = {0x82, 0x0c, 0x00, 0x01, 0x00, 0x07, 0x73, 0x65,
0x6e, 0x73, 0x6f, 0x72, 0x73, 0x02};
static ZTEST_DMEM struct mqtt_subscription_list msg_subscribe3 = {
.message_id = 1, .list_count = 1, .list = &topic_qos_2
};
/*
* MQTT SUBACK msg
* pkt_id: 1, qos: 0
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -t sensors -q 0
*/
static ZTEST_DMEM
u8_t suback1[] = {0x90, 0x03, 0x00, 0x01, 0x00};
static ZTEST_DMEM u8_t data_suback1[] = { MQTT_SUBACK_SUCCESS_QoS_0 };
static ZTEST_DMEM struct mqtt_suback_param msg_suback1 = {
.message_id = 1, .return_codes.len = 1,
.return_codes.data = data_suback1
};
/*
* MQTT SUBACK message
* pkt_id: 1, qos: 1
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -t sensors -q 1
*/
static ZTEST_DMEM
u8_t suback2[] = {0x90, 0x03, 0x00, 0x01, 0x01};
static ZTEST_DMEM u8_t data_suback2[] = { MQTT_SUBACK_SUCCESS_QoS_1 };
static ZTEST_DMEM struct mqtt_suback_param msg_suback2 = {
.message_id = 1, .return_codes.len = 1,
.return_codes.data = data_suback2
};
/*
* MQTT SUBACK message
* pkt_id: 1, qos: 2
*
* Message can be generated by the following command:
* mosquitto_sub -V mqttv311 -i zephyr -t sensors -q 2
*/
static ZTEST_DMEM
u8_t suback3[] = {0x90, 0x03, 0x00, 0x01, 0x02};
static ZTEST_DMEM u8_t data_suback3[] = { MQTT_SUBACK_SUCCESS_QoS_2 };
static ZTEST_DMEM struct mqtt_suback_param msg_suback3 = {
.message_id = 1, .return_codes.len = 1,
.return_codes.data = data_suback3
};
static ZTEST_DMEM
u8_t pingreq1[] = {0xc0, 0x00};
static ZTEST_DMEM
u8_t puback1[] = {0x40, 0x02, 0x00, 0x01};
static ZTEST_DMEM struct mqtt_puback_param msg_puback1 = {.message_id = 1};
static ZTEST_DMEM
u8_t pubrec1[] = {0x50, 0x02, 0x00, 0x01};
static ZTEST_DMEM struct mqtt_pubrec_param msg_pubrec1 = {.message_id = 1};
static ZTEST_DMEM
u8_t pubrel1[] = {0x62, 0x02, 0x00, 0x01};
static ZTEST_DMEM struct mqtt_pubrel_param msg_pubrel1 = {.message_id = 1};
static ZTEST_DMEM
u8_t pubcomp1[] = {0x70, 0x02, 0x00, 0x01};
static ZTEST_DMEM struct mqtt_pubcomp_param msg_pubcomp1 = {.message_id = 1};
static ZTEST_DMEM
u8_t unsuback1[] = {0xb0, 0x02, 0x00, 0x01};
static ZTEST_DMEM struct mqtt_unsuback_param msg_unsuback1 = {.message_id = 1};
static ZTEST_DMEM
u8_t max_pkt_len[] = {0x30, 0xff, 0xff, 0xff, 0x7f};
static ZTEST_DMEM struct buf_ctx max_pkt_len_buf = {
.cur = max_pkt_len, .end = max_pkt_len + sizeof(max_pkt_len)
};
static ZTEST_DMEM
u8_t corrupted_pkt_len[] = {0x30, 0xff, 0xff, 0xff, 0xff, 0x01};
static ZTEST_DMEM struct buf_ctx corrupted_pkt_len_buf = {
.cur = corrupted_pkt_len,
.end = corrupted_pkt_len + sizeof(corrupted_pkt_len)
};
static ZTEST_DMEM
struct mqtt_test mqtt_tests[] = {
{.test_name = "CONNECT, new session, zeros",
.ctx = &client_connect1, .eval_fcn = eval_msg_connect,
.expected = connect1, .expected_len = sizeof(connect1)},
{.test_name = "CONNECT, new session, will",
.ctx = &client_connect2, .eval_fcn = eval_msg_connect,
.expected = connect2, .expected_len = sizeof(connect2)},
{.test_name = "CONNECT, new session, will retain",
.ctx = &client_connect3, .eval_fcn = eval_msg_connect,
.expected = connect3, .expected_len = sizeof(connect3)},
{.test_name = "CONNECT, new session, will qos = 1",
.ctx = &client_connect4, .eval_fcn = eval_msg_connect,
.expected = connect4, .expected_len = sizeof(connect4)},
{.test_name = "CONNECT, new session, will qos = 1, will retain",
.ctx = &client_connect5, .eval_fcn = eval_msg_connect,
.expected = connect5, .expected_len = sizeof(connect5)},
{.test_name = "CONNECT, new session, username and password",
.ctx = &client_connect6, .eval_fcn = eval_msg_connect,
.expected = connect6, .expected_len = sizeof(connect6)},
{.test_name = "DISCONNECT",
.ctx = NULL, .eval_fcn = eval_msg_disconnect,
.expected = disconnect1, .expected_len = sizeof(disconnect1)},
{.test_name = "PUBLISH, qos = 0",
.ctx = &msg_publish1, .eval_fcn = eval_msg_publish,
.expected = publish1, .expected_len = sizeof(publish1)},
{.test_name = "PUBLISH, retain = 1",
.ctx = &msg_publish2, .eval_fcn = eval_msg_publish,
.expected = publish2, .expected_len = sizeof(publish2)},
{.test_name = "PUBLISH, retain = 1, qos = 1",
.ctx = &msg_publish3, .eval_fcn = eval_msg_publish,
.expected = publish3, .expected_len = sizeof(publish3)},
{.test_name = "PUBLISH, qos = 2",
.ctx = &msg_publish4, .eval_fcn = eval_msg_publish,
.expected = publish4, .expected_len = sizeof(publish4)},
{.test_name = "PUBLISH, corrupted message length (smaller than topic)",
.ctx = &publish_corrupted_buf, .eval_fcn = eval_msg_corrupted_publish},
{.test_name = "SUBSCRIBE, one topic, qos = 0",
.ctx = &msg_subscribe1, .eval_fcn = eval_msg_subscribe,
.expected = subscribe1, .expected_len = sizeof(subscribe1)},
{.test_name = "SUBSCRIBE, one topic, qos = 1",
.ctx = &msg_subscribe2, .eval_fcn = eval_msg_subscribe,
.expected = subscribe2, .expected_len = sizeof(subscribe2)},
{.test_name = "SUBSCRIBE, one topic, qos = 2",
.ctx = &msg_subscribe3, .eval_fcn = eval_msg_subscribe,
.expected = subscribe3, .expected_len = sizeof(subscribe3)},
{.test_name = "SUBACK, one topic, qos = 0",
.ctx = &msg_suback1, .eval_fcn = eval_msg_suback,
.expected = suback1, .expected_len = sizeof(suback1)},
{.test_name = "SUBACK, one topic, qos = 1",
.ctx = &msg_suback2, .eval_fcn = eval_msg_suback,
.expected = suback2, .expected_len = sizeof(suback2)},
{.test_name = "SUBACK, one topic, qos = 2",
.ctx = &msg_suback3, .eval_fcn = eval_msg_suback,
.expected = suback3, .expected_len = sizeof(suback3)},
{.test_name = "PINGREQ",
.ctx = NULL, .eval_fcn = eval_msg_pingreq,
.expected = pingreq1, .expected_len = sizeof(pingreq1)},
{.test_name = "PUBACK",
.ctx = &msg_puback1, .eval_fcn = eval_msg_puback,
.expected = puback1, .expected_len = sizeof(puback1)},
{.test_name = "PUBREC",
.ctx = &msg_pubrec1, .eval_fcn = eval_msg_pubrec,
.expected = pubrec1, .expected_len = sizeof(pubrec1)},
{.test_name = "PUBREL",
.ctx = &msg_pubrel1, .eval_fcn = eval_msg_pubrel,
.expected = pubrel1, .expected_len = sizeof(pubrel1)},
{.test_name = "PUBCOMP",
.ctx = &msg_pubcomp1, .eval_fcn = eval_msg_pubcomp,
.expected = pubcomp1, .expected_len = sizeof(pubcomp1)},
{.test_name = "UNSUBACK",
.ctx = &msg_unsuback1, .eval_fcn = eval_msg_unsuback,
.expected = unsuback1, .expected_len = sizeof(unsuback1)},
{.test_name = "Maximum packet length",
.ctx = &max_pkt_len_buf, .eval_fcn = eval_max_pkt_len},
{.test_name = "Corrupted packet length",
.ctx = &corrupted_pkt_len_buf, .eval_fcn = eval_corrupted_pkt_len},
/* last test case, do not remove it */
{.test_name = NULL}
};
static void print_array(const u8_t *a, u16_t size)
{
u16_t i;
TC_PRINT("\n");
for (i = 0U; i < size; i++) {
TC_PRINT("%x ", a[i]);
if ((i+1) % 8 == 0U) {
TC_PRINT("\n");
}
}
TC_PRINT("\n");
}
static
int eval_buffers(const struct buf_ctx *buf, const u8_t *expected, u16_t len)
{
if (buf->end - buf->cur != len) {
goto exit_eval;
}
if (memcmp(expected, buf->cur, buf->end - buf->cur) != 0) {
goto exit_eval;
}
return TC_PASS;
exit_eval:
TC_PRINT("FAIL\n");
TC_PRINT("Computed:");
print_array(buf->cur, buf->end - buf->cur);
TC_PRINT("Expected:");
print_array(expected, len);
return TC_FAIL;
}
static int eval_msg_connect(struct mqtt_test *mqtt_test)
{
struct mqtt_client *test_client;
int rc;
struct buf_ctx buf;
test_client = (struct mqtt_client *)mqtt_test->ctx;
client.clean_session = test_client->clean_session;
client.client_id = test_client->client_id;
client.will_topic = test_client->will_topic;
client.will_retain = test_client->will_retain;
client.will_message = test_client->will_message;
client.user_name = test_client->user_name;
client.password = test_client->password;
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = connect_request_encode(&client, &buf);
/**TESTPOINTS: Check connect_request_encode functions*/
zassert_false(rc, "connect_request_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
return TC_PASS;
}
static int eval_msg_disconnect(struct mqtt_test *mqtt_test)
{
int rc;
struct buf_ctx buf;
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = disconnect_encode(&buf);
/**TESTPOINTS: Check disconnect_encode functions*/
zassert_false(rc, "disconnect_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
return TC_PASS;
}
static int eval_msg_publish(struct mqtt_test *mqtt_test)
{
struct mqtt_publish_param *param =
(struct mqtt_publish_param *)mqtt_test->ctx;
struct mqtt_publish_param dec_param;
int rc;
u8_t type_and_flags;
u32_t length;
struct buf_ctx buf;
memset(&dec_param, 0, sizeof(dec_param));
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = publish_encode(param, &buf);
/* Payload is not copied, copy it manually just after the header.*/
memcpy(buf.end, param->message.payload.data,
param->message.payload.len);
buf.end += param->message.payload.len;
/**TESTPOINT: Check publish_encode function*/
zassert_false(rc, "publish_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
rc = fixed_header_decode(&buf, &type_and_flags, &length);
zassert_false(rc, "fixed_header_decode failed");
rc = publish_decode(type_and_flags, length, &buf, &dec_param);
/**TESTPOINT: Check publish_decode function*/
zassert_false(rc, "publish_decode failed");
zassert_equal(dec_param.message_id, param->message_id,
"message_id error");
zassert_equal(dec_param.dup_flag, param->dup_flag,
"dup flag error");
zassert_equal(dec_param.retain_flag, param->retain_flag,
"retain flag error");
zassert_equal(dec_param.message.topic.qos, param->message.topic.qos,
"topic qos error");
zassert_equal(dec_param.message.topic.topic.size,
param->message.topic.topic.size,
"topic len error");
if (memcmp(dec_param.message.topic.topic.utf8,
param->message.topic.topic.utf8,
dec_param.message.topic.topic.size) != 0) {
zassert_unreachable("topic content error");
}
zassert_equal(dec_param.message.payload.len,
param->message.payload.len,
"payload len error");
return TC_PASS;
}
static int eval_msg_corrupted_publish(struct mqtt_test *mqtt_test)
{
struct buf_ctx *buf = (struct buf_ctx *)mqtt_test->ctx;
int rc;
u8_t type_and_flags;
u32_t length;
struct mqtt_publish_param dec_param;
rc = fixed_header_decode(buf, &type_and_flags, &length);
zassert_equal(rc, 0, "fixed_header_decode failed");
rc = publish_decode(type_and_flags, length, buf, &dec_param);
zassert_equal(rc, -EINVAL, "publish_decode should fail");
return TC_PASS;
}
static int eval_msg_subscribe(struct mqtt_test *mqtt_test)
{
struct mqtt_subscription_list *param =
(struct mqtt_subscription_list *)mqtt_test->ctx;
int rc;
struct buf_ctx buf;
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = subscribe_encode(param, &buf);
/**TESTPOINT: Check subscribe_encode function*/
zassert_false(rc, "subscribe_encode failed");
return eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
}
static int eval_msg_suback(struct mqtt_test *mqtt_test)
{
struct mqtt_suback_param *param =
(struct mqtt_suback_param *)mqtt_test->ctx;
struct mqtt_suback_param dec_param;
int rc;
u8_t type_and_flags;
u32_t length;
struct buf_ctx buf;
buf.cur = mqtt_test->expected;
buf.end = mqtt_test->expected + mqtt_test->expected_len;
memset(&dec_param, 0, sizeof(dec_param));
rc = fixed_header_decode(&buf, &type_and_flags, &length);
zassert_false(rc, "fixed_header_decode failed");
rc = subscribe_ack_decode(&buf, &dec_param);
/**TESTPOINT: Check subscribe_ack_decode function*/
zassert_false(rc, "subscribe_ack_decode failed");
zassert_equal(dec_param.message_id, param->message_id,
"packet identifier error");
zassert_equal(dec_param.return_codes.len,
param->return_codes.len,
"topic count error");
if (memcmp(dec_param.return_codes.data, param->return_codes.data,
dec_param.return_codes.len) != 0) {
zassert_unreachable("subscribe result error");
}
return TC_PASS;
}
static int eval_msg_pingreq(struct mqtt_test *mqtt_test)
{
int rc;
struct buf_ctx buf;
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = ping_request_encode(&buf);
/**TESTPOINTS: Check eval_msg_pingreq functions*/
zassert_false(rc, "ping_request_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
return TC_PASS;
}
static int eval_msg_puback(struct mqtt_test *mqtt_test)
{
struct mqtt_puback_param *param =
(struct mqtt_puback_param *)mqtt_test->ctx;
struct mqtt_puback_param dec_param;
int rc;
u8_t type_and_flags;
u32_t length;
struct buf_ctx buf;
memset(&dec_param, 0, sizeof(dec_param));
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = publish_ack_encode(param, &buf);
/**TESTPOINTS: Check publish_ack_encode functions*/
zassert_false(rc, "publish_ack_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
rc = fixed_header_decode(&buf, &type_and_flags, &length);
zassert_false(rc, "fixed_header_decode failed");
rc = publish_ack_decode(&buf, &dec_param);
zassert_false(rc, "publish_ack_decode failed");
zassert_equal(dec_param.message_id, param->message_id,
"packet identifier error");
return TC_PASS;
}
static int eval_msg_pubcomp(struct mqtt_test *mqtt_test)
{
struct mqtt_pubcomp_param *param =
(struct mqtt_pubcomp_param *)mqtt_test->ctx;
struct mqtt_pubcomp_param dec_param;
int rc;
u32_t length;
u8_t type_and_flags;
struct buf_ctx buf;
memset(&dec_param, 0, sizeof(dec_param));
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = publish_complete_encode(param, &buf);
/**TESTPOINTS: Check publish_complete_encode functions*/
zassert_false(rc, "publish_complete_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
rc = fixed_header_decode(&buf, &type_and_flags, &length);
zassert_false(rc, "fixed_header_decode failed");
rc = publish_complete_decode(&buf, &dec_param);
zassert_false(rc, "publish_complete_decode failed");
zassert_equal(dec_param.message_id, param->message_id,
"packet identifier error");
return TC_PASS;
}
static int eval_msg_pubrec(struct mqtt_test *mqtt_test)
{
struct mqtt_pubrec_param *param =
(struct mqtt_pubrec_param *)mqtt_test->ctx;
struct mqtt_pubrec_param dec_param;
int rc;
u32_t length;
u8_t type_and_flags;
struct buf_ctx buf;
memset(&dec_param, 0, sizeof(dec_param));
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = publish_receive_encode(param, &buf);
/**TESTPOINTS: Check publish_receive_encode functions*/
zassert_false(rc, "publish_receive_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
rc = fixed_header_decode(&buf, &type_and_flags, &length);
zassert_false(rc, "fixed_header_decode failed");
rc = publish_receive_decode(&buf, &dec_param);
zassert_false(rc, "publish_receive_decode failed");
zassert_equal(dec_param.message_id, param->message_id,
"packet identifier error");
return TC_PASS;
}
static int eval_msg_pubrel(struct mqtt_test *mqtt_test)
{
struct mqtt_pubrel_param *param =
(struct mqtt_pubrel_param *)mqtt_test->ctx;
struct mqtt_pubrel_param dec_param;
int rc;
u32_t length;
u8_t type_and_flags;
struct buf_ctx buf;
memset(&dec_param, 0, sizeof(dec_param));
buf.cur = client.tx_buf;
buf.end = client.tx_buf + client.tx_buf_size;
rc = publish_release_encode(param, &buf);
/**TESTPOINTS: Check publish_release_encode functions*/
zassert_false(rc, "publish_release_encode failed");
rc = eval_buffers(&buf, mqtt_test->expected, mqtt_test->expected_len);
zassert_false(rc, "eval_buffers failed");
rc = fixed_header_decode(&buf, &type_and_flags, &length);
zassert_false(rc, "fixed_header_decode failed");
rc = publish_release_decode(&buf, &dec_param);
zassert_false(rc, "publish_release_decode failed");
zassert_equal(dec_param.message_id, param->message_id,
"packet identifier error");
return TC_PASS;
}
static int eval_msg_unsuback(struct mqtt_test *mqtt_test)
{
struct mqtt_unsuback_param *param =
(struct mqtt_unsuback_param *)mqtt_test->ctx;
struct mqtt_unsuback_param dec_param;
int rc;
u32_t length;
u8_t type_and_flags;
struct buf_ctx buf;
memset(&dec_param, 0, sizeof(dec_param));
buf.cur = mqtt_test->expected;
buf.end = mqtt_test->expected + mqtt_test->expected_len;
rc = fixed_header_decode(&buf, &type_and_flags, &length);
zassert_false(rc, "fixed_header_decode failed");
rc = unsubscribe_ack_decode(&buf, &dec_param);
zassert_false(rc, "unsubscribe_ack_decode failed");
zassert_equal(dec_param.message_id, param->message_id,
"packet identifier error");
return TC_PASS;
}
static int eval_max_pkt_len(struct mqtt_test *mqtt_test)
{
struct buf_ctx *buf = (struct buf_ctx *)mqtt_test->ctx;
int rc;
u8_t flags;
u32_t length;
rc = fixed_header_decode(buf, &flags, &length);
zassert_equal(rc, 0, "fixed_header_decode failed");
zassert_equal(length, MQTT_MAX_PAYLOAD_SIZE,
"Invalid packet length decoded");
return TC_PASS;
}
static int eval_corrupted_pkt_len(struct mqtt_test *mqtt_test)
{
struct buf_ctx *buf = (struct buf_ctx *)mqtt_test->ctx;
int rc;
u8_t flags;
u32_t length;
rc = fixed_header_decode(buf, &flags, &length);
zassert_equal(rc, -EINVAL, "fixed_header_decode should fail");
return TC_PASS;
}
void test_mqtt_packet(void)
{
TC_START("MQTT Library test");
int rc;
int i;
mqtt_client_init(&client);
client.protocol_version = MQTT_VERSION_3_1_1;
client.rx_buf = rx_buffer;
client.rx_buf_size = sizeof(rx_buffer);
client.tx_buf = tx_buffer;
client.tx_buf_size = sizeof(tx_buffer);
i = 0;
do {
struct mqtt_test *test = &mqtt_tests[i];
if (test->test_name == NULL) {
break;
}
rc = test->eval_fcn(test);
TC_PRINT("[%s] %d - %s\n", TC_RESULT_TO_STR(rc), i + 1,
test->test_name);
/**TESTPOINT: Check eval_fcn*/
zassert_false(rc, "mqtt_packet test error");
i++;
} while (1);
mqtt_abort(&client);
}
void test_main(void)
{
ztest_test_suite(test_mqtt_packet_fn,
ztest_user_unit_test(test_mqtt_packet));
ztest_run_test_suite(test_mqtt_packet_fn);
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_3862_1 |
crossvul-cpp_data_bad_4697_1 | /*
* irc-nick.c - nick management for IRC plugin
*
* Copyright (C) 2003-2020 Sébastien Helleu <flashcode@flashtux.org>
*
* This file is part of WeeChat, the extensible chat client.
*
* WeeChat is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* WeeChat is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with WeeChat. If not, see <https://www.gnu.org/licenses/>.
*/
#include <stdlib.h>
#include <stddef.h>
#include <stdio.h>
#include <string.h>
#include <limits.h>
#include "../weechat-plugin.h"
#include "irc.h"
#include "irc-nick.h"
#include "irc-color.h"
#include "irc-config.h"
#include "irc-mode.h"
#include "irc-server.h"
#include "irc-channel.h"
/*
* Checks if a nick pointer is valid.
*
* Returns:
* 1: nick exists in channel
* 0: nick does not exist in channel
*/
int
irc_nick_valid (struct t_irc_channel *channel, struct t_irc_nick *nick)
{
struct t_irc_nick *ptr_nick;
if (!channel || !nick)
return 0;
for (ptr_nick = channel->nicks; ptr_nick; ptr_nick = ptr_nick->next_nick)
{
if (ptr_nick == nick)
return 1;
}
/* nick not found */
return 0;
}
/*
* Checks if string is a valid nick string (RFC 1459).
*
* Returns:
* 1: string is a valid nick
* 0: string is not a valid nick
*/
int
irc_nick_is_nick (const char *string)
{
const char *ptr;
if (!string || !string[0])
return 0;
/* first char must not be a number or hyphen */
ptr = string;
if (strchr ("0123456789-", *ptr))
return 0;
while (ptr && ptr[0])
{
if (!strchr (IRC_NICK_VALID_CHARS, *ptr))
return 0;
ptr++;
}
return 1;
}
/*
* Finds a color code for a nick (according to nick letters).
*
* Returns a WeeChat color code (that can be used for display).
*/
char *
irc_nick_find_color (const char *nickname)
{
return weechat_info_get ("nick_color", nickname);
}
/*
* Finds a color name for a nick (according to nick letters).
*
* Returns the name of a color (for example: "green").
*/
char *
irc_nick_find_color_name (const char *nickname)
{
return weechat_info_get ("nick_color_name", nickname);
}
/*
* Sets current prefix, using higher prefix set in prefixes.
*/
void
irc_nick_set_current_prefix (struct t_irc_nick *nick)
{
char *ptr_prefixes;
if (!nick)
return;
nick->prefix[0] = ' ';
for (ptr_prefixes = nick->prefixes; ptr_prefixes[0]; ptr_prefixes++)
{
if (ptr_prefixes[0] != ' ')
{
nick->prefix[0] = ptr_prefixes[0];
break;
}
}
}
/*
* Sets/unsets a prefix in prefixes.
*
* If set == 1, sets prefix (prefix is used).
* If set == 0, unsets prefix (space is used).
*/
void
irc_nick_set_prefix (struct t_irc_server *server, struct t_irc_nick *nick,
int set, char prefix)
{
int index;
if (!nick)
return;
index = irc_server_get_prefix_char_index (server, prefix);
if (index >= 0)
{
nick->prefixes[index] = (set) ? prefix : ' ';
irc_nick_set_current_prefix (nick);
}
}
/*
* Sets prefixes for nick.
*/
void
irc_nick_set_prefixes (struct t_irc_server *server, struct t_irc_nick *nick,
const char *prefixes)
{
const char *ptr_prefixes;
if (!nick)
return;
/* reset all prefixes in nick */
memset (nick->prefixes, ' ', strlen (nick->prefixes));
/* add prefixes in nick */
if (prefixes)
{
for (ptr_prefixes = prefixes; ptr_prefixes[0]; ptr_prefixes++)
{
irc_nick_set_prefix (server, nick, 1, ptr_prefixes[0]);
}
}
/* set current prefix */
irc_nick_set_current_prefix (nick);
}
/*
* Sets host for nick.
*/
void
irc_nick_set_host (struct t_irc_nick *nick, const char *host)
{
if (!nick)
return;
/* if host is the same, just return */
if ((!nick->host && !host)
|| (nick->host && host && strcmp (nick->host, host) == 0))
{
return;
}
/* update the host in nick */
if (nick->host)
free (nick->host);
nick->host = (host) ? strdup (host) : NULL;
}
/*
* Checks if nick is "op" (or better than "op", for example channel admin or
* channel owner).
*
* Returns:
* 1: nick is "op" (or better)
* 0: nick is not op
*/
int
irc_nick_is_op (struct t_irc_server *server, struct t_irc_nick *nick)
{
int index;
if (nick->prefix[0] == ' ')
return 0;
index = irc_server_get_prefix_char_index (server, nick->prefix[0]);
if (index < 0)
return 0;
return (index <= irc_server_get_prefix_mode_index (server, 'o')) ? 1 : 0;
}
/*
* Checks if nick prefixes contains prefix for a given mode.
*
* For example if prefix_mode is 'o', searches for '@' in nick prefixes.
*
* Returns:
* 1: prefixes contains prefix for the given mode
* 0: prefixes does not contain prefix for the given mode.
*/
int
irc_nick_has_prefix_mode (struct t_irc_server *server, struct t_irc_nick *nick,
char prefix_mode)
{
char prefix_char;
prefix_char = irc_server_get_prefix_char_for_mode (server, prefix_mode);
if (prefix_char == ' ')
return 0;
return (strchr (nick->prefixes, prefix_char)) ? 1 : 0;
}
/*
* Gets nicklist group for a nick.
*/
struct t_gui_nick_group *
irc_nick_get_nicklist_group (struct t_irc_server *server,
struct t_gui_buffer *buffer,
struct t_irc_nick *nick)
{
int index;
char str_group[2];
const char *prefix_modes;
struct t_gui_nick_group *ptr_group;
if (!server || !buffer || !nick)
return NULL;
ptr_group = NULL;
index = irc_server_get_prefix_char_index (server, nick->prefix[0]);
if (index < 0)
{
ptr_group = weechat_nicklist_search_group (buffer, NULL,
IRC_NICK_GROUP_OTHER_NAME);
}
else
{
prefix_modes = irc_server_get_prefix_modes (server);
str_group[0] = prefix_modes[index];
str_group[1] = '\0';
ptr_group = weechat_nicklist_search_group (buffer, NULL, str_group);
}
return ptr_group;
}
/*
* Gets name of prefix color for a nick.
*/
const char *
irc_nick_get_prefix_color_name (struct t_irc_server *server, char prefix)
{
static char *default_color = "";
const char *prefix_modes, *color;
char mode[2];
int index;
if (irc_config_hashtable_nick_prefixes)
{
mode[0] = ' ';
mode[1] = '\0';
index = irc_server_get_prefix_char_index (server, prefix);
if (index >= 0)
{
prefix_modes = irc_server_get_prefix_modes (server);
mode[0] = prefix_modes[index];
color = weechat_hashtable_get (irc_config_hashtable_nick_prefixes,
mode);
if (color)
return color;
}
/* fallback to "*" if no color is found with mode */
mode[0] = '*';
color = weechat_hashtable_get (irc_config_hashtable_nick_prefixes,
mode);
if (color)
return color;
}
/* no color by default */
return default_color;
}
/*
* Gets nick color for nicklist.
*/
char *
irc_nick_get_color_for_nicklist (struct t_irc_server *server,
struct t_irc_nick *nick)
{
static char *nick_color_bar_fg = "bar_fg";
static char *nick_color_self = "weechat.color.chat_nick_self";
static char *nick_color_away = "weechat.color.nicklist_away";
if (nick->away)
return strdup (nick_color_away);
if (weechat_config_boolean (irc_config_look_color_nicks_in_nicklist))
{
if (irc_server_strcasecmp (server, nick->name, server->nick) == 0)
return strdup (nick_color_self);
else
return irc_nick_find_color_name (nick->name);
}
return strdup (nick_color_bar_fg);
}
/*
* Adds a nick to buffer nicklist.
*/
void
irc_nick_nicklist_add (struct t_irc_server *server,
struct t_irc_channel *channel,
struct t_irc_nick *nick)
{
struct t_gui_nick_group *ptr_group;
char *color;
ptr_group = irc_nick_get_nicklist_group (server, channel->buffer, nick);
color = irc_nick_get_color_for_nicklist (server, nick);
weechat_nicklist_add_nick (channel->buffer, ptr_group,
nick->name,
color,
nick->prefix,
irc_nick_get_prefix_color_name (server, nick->prefix[0]),
1);
if (color)
free (color);
}
/*
* Removes a nick from buffer nicklist.
*/
void
irc_nick_nicklist_remove (struct t_irc_server *server,
struct t_irc_channel *channel,
struct t_irc_nick *nick)
{
struct t_gui_nick_group *ptr_group;
ptr_group = irc_nick_get_nicklist_group (server, channel->buffer, nick);
weechat_nicklist_remove_nick (channel->buffer,
weechat_nicklist_search_nick (channel->buffer,
ptr_group,
nick->name));
}
/*
* Sets a property for nick in buffer nicklist.
*/
void
irc_nick_nicklist_set (struct t_irc_channel *channel,
struct t_irc_nick *nick,
const char *property, const char *value)
{
struct t_gui_nick *ptr_nick;
ptr_nick = weechat_nicklist_search_nick (channel->buffer, NULL, nick->name);
if (ptr_nick)
{
weechat_nicklist_nick_set (channel->buffer, ptr_nick, property, value);
}
}
/*
* Sets nick prefix colors in nicklist for all servers/channels.
*/
void
irc_nick_nicklist_set_prefix_color_all ()
{
struct t_irc_server *ptr_server;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
for (ptr_channel = ptr_server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
for (ptr_nick = ptr_channel->nicks; ptr_nick;
ptr_nick = ptr_nick->next_nick)
{
irc_nick_nicklist_set (ptr_channel, ptr_nick, "prefix_color",
irc_nick_get_prefix_color_name (ptr_server,
ptr_nick->prefix[0]));
}
}
}
}
/*
* Sets nick colors in nicklist for all servers/channels.
*/
void
irc_nick_nicklist_set_color_all ()
{
struct t_irc_server *ptr_server;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
char *color;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
for (ptr_channel = ptr_server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
for (ptr_nick = ptr_channel->nicks; ptr_nick;
ptr_nick = ptr_nick->next_nick)
{
color = irc_nick_get_color_for_nicklist (ptr_server, ptr_nick);
irc_nick_nicklist_set (ptr_channel, ptr_nick, "color", color);
if (color)
free (color);
}
}
}
}
/*
* Adds a new nick in channel.
*
* Returns pointer to new nick, NULL if error.
*/
struct t_irc_nick *
irc_nick_new (struct t_irc_server *server, struct t_irc_channel *channel,
const char *nickname, const char *host, const char *prefixes,
int away, const char *account, const char *realname)
{
struct t_irc_nick *new_nick, *ptr_nick;
int length;
if (!nickname || !nickname[0])
return NULL;
if (!channel->nicks)
irc_channel_add_nicklist_groups (server, channel);
/* nick already exists on this channel? */
ptr_nick = irc_nick_search (server, channel, nickname);
if (ptr_nick)
{
/* remove old nick from nicklist */
irc_nick_nicklist_remove (server, channel, ptr_nick);
/* update nick prefixes */
irc_nick_set_prefixes (server, ptr_nick, prefixes);
/* add new nick in nicklist */
irc_nick_nicklist_add (server, channel, ptr_nick);
return ptr_nick;
}
/* alloc memory for new nick */
if ((new_nick = malloc (sizeof (*new_nick))) == NULL)
return NULL;
/* initialize new nick */
new_nick->name = strdup (nickname);
new_nick->host = (host) ? strdup (host) : NULL;
new_nick->account = (account) ? strdup (account) : NULL;
new_nick->realname = (realname) ? strdup (realname) : NULL;
length = strlen (irc_server_get_prefix_chars (server));
new_nick->prefixes = malloc (length + 1);
new_nick->prefix = malloc (2);
if (!new_nick->name || !new_nick->prefixes || !new_nick->prefix)
{
if (new_nick->name)
free (new_nick->name);
if (new_nick->host)
free (new_nick->host);
if (new_nick->account)
free (new_nick->account);
if (new_nick->realname)
free (new_nick->realname);
if (new_nick->prefixes)
free (new_nick->prefixes);
if (new_nick->prefix)
free (new_nick->prefix);
free (new_nick);
return NULL;
}
memset (new_nick->prefixes, ' ', length);
new_nick->prefixes[length] = '\0';
new_nick->prefix[0] = ' ';
new_nick->prefix[1] = '\0';
irc_nick_set_prefixes (server, new_nick, prefixes);
new_nick->away = away;
if (irc_server_strcasecmp (server, new_nick->name, server->nick) == 0)
new_nick->color = strdup (IRC_COLOR_CHAT_NICK_SELF);
else
new_nick->color = irc_nick_find_color (new_nick->name);
/* add nick to end of list */
new_nick->prev_nick = channel->last_nick;
if (channel->last_nick)
channel->last_nick->next_nick = new_nick;
else
channel->nicks = new_nick;
channel->last_nick = new_nick;
new_nick->next_nick = NULL;
channel->nicks_count++;
channel->nick_completion_reset = 1;
/* add nick to buffer nicklist */
irc_nick_nicklist_add (server, channel, new_nick);
/* all is OK, return address of new nick */
return new_nick;
}
/*
* Changes nickname.
*/
void
irc_nick_change (struct t_irc_server *server, struct t_irc_channel *channel,
struct t_irc_nick *nick, const char *new_nick)
{
int nick_is_me;
/* remove nick from nicklist */
irc_nick_nicklist_remove (server, channel, nick);
/* update nicks speaking */
nick_is_me = (irc_server_strcasecmp (server, new_nick, server->nick) == 0) ? 1 : 0;
if (!nick_is_me)
irc_channel_nick_speaking_rename (channel, nick->name, new_nick);
/* change nickname */
if (nick->name)
free (nick->name);
nick->name = strdup (new_nick);
if (nick->color)
free (nick->color);
if (nick_is_me)
nick->color = strdup (IRC_COLOR_CHAT_NICK_SELF);
else
nick->color = irc_nick_find_color (nick->name);
/* add nick in nicklist */
irc_nick_nicklist_add (server, channel, nick);
}
/*
* Sets a mode for a nick.
*/
void
irc_nick_set_mode (struct t_irc_server *server, struct t_irc_channel *channel,
struct t_irc_nick *nick, int set, char mode)
{
int index;
const char *prefix_chars;
index = irc_server_get_prefix_mode_index (server, mode);
if (index < 0)
return;
/* remove nick from nicklist */
irc_nick_nicklist_remove (server, channel, nick);
/* set flag */
prefix_chars = irc_server_get_prefix_chars (server);
irc_nick_set_prefix (server, nick, set, prefix_chars[index]);
/* add nick in nicklist */
irc_nick_nicklist_add (server, channel, nick);
if (irc_server_strcasecmp (server, nick->name, server->nick) == 0)
{
weechat_bar_item_update ("input_prompt");
weechat_bar_item_update ("irc_nick");
weechat_bar_item_update ("irc_nick_host");
}
}
/*
* Removes a nick from a channel.
*/
void
irc_nick_free (struct t_irc_server *server, struct t_irc_channel *channel,
struct t_irc_nick *nick)
{
struct t_irc_nick *new_nicks;
if (!channel || !nick)
return;
/* remove nick from nicklist */
irc_nick_nicklist_remove (server, channel, nick);
/* remove nick */
if (channel->last_nick == nick)
channel->last_nick = nick->prev_nick;
if (nick->prev_nick)
{
(nick->prev_nick)->next_nick = nick->next_nick;
new_nicks = channel->nicks;
}
else
new_nicks = nick->next_nick;
if (nick->next_nick)
(nick->next_nick)->prev_nick = nick->prev_nick;
channel->nicks_count--;
/* free data */
if (nick->name)
free (nick->name);
if (nick->host)
free (nick->host);
if (nick->prefixes)
free (nick->prefixes);
if (nick->prefix)
free (nick->prefix);
if (nick->account)
free (nick->account);
if (nick->realname)
free (nick->realname);
if (nick->color)
free (nick->color);
free (nick);
channel->nicks = new_nicks;
channel->nick_completion_reset = 1;
}
/*
* Removes all nicks from a channel.
*/
void
irc_nick_free_all (struct t_irc_server *server, struct t_irc_channel *channel)
{
if (!channel)
return;
/* remove all nicks for the channel */
while (channel->nicks)
{
irc_nick_free (server, channel, channel->nicks);
}
/* remove all groups in nicklist */
weechat_nicklist_remove_all (channel->buffer);
/* should be zero, but prevent any bug :D */
channel->nicks_count = 0;
}
/*
* Searches for a nick in a channel.
*
* Returns pointer to nick found, NULL if error.
*/
struct t_irc_nick *
irc_nick_search (struct t_irc_server *server, struct t_irc_channel *channel,
const char *nickname)
{
struct t_irc_nick *ptr_nick;
if (!channel || !nickname)
return NULL;
for (ptr_nick = channel->nicks; ptr_nick;
ptr_nick = ptr_nick->next_nick)
{
if (irc_server_strcasecmp (server, ptr_nick->name, nickname) == 0)
return ptr_nick;
}
/* nick not found */
return NULL;
}
/*
* Returns number of nicks (total, op, halfop, voice, normal) on a channel.
*/
void
irc_nick_count (struct t_irc_server *server, struct t_irc_channel *channel,
int *total, int *count_op, int *count_halfop, int *count_voice,
int *count_normal)
{
struct t_irc_nick *ptr_nick;
(*total) = 0;
(*count_op) = 0;
(*count_halfop) = 0;
(*count_voice) = 0;
(*count_normal) = 0;
for (ptr_nick = channel->nicks; ptr_nick;
ptr_nick = ptr_nick->next_nick)
{
(*total)++;
if (irc_nick_is_op (server, ptr_nick))
(*count_op)++;
else
{
if (irc_nick_has_prefix_mode (server, ptr_nick, 'h'))
(*count_halfop)++;
else
{
if (irc_nick_has_prefix_mode (server, ptr_nick, 'v'))
(*count_voice)++;
else
(*count_normal)++;
}
}
}
}
/*
* Sets/unsets away status for a nick.
*/
void
irc_nick_set_away (struct t_irc_server *server, struct t_irc_channel *channel,
struct t_irc_nick *nick, int is_away)
{
char *color;
if (is_away != nick->away)
{
nick->away = is_away;
color = irc_nick_get_color_for_nicklist (server, nick);
irc_nick_nicklist_set (channel, nick, "color", color);
if (color)
free (color);
}
}
/*
* Gets nick mode for display (color + mode).
*
* If prefix == 1, returns string for display in prefix, otherwise returns
* string for display in action message (/me).
*/
const char *
irc_nick_mode_for_display (struct t_irc_server *server, struct t_irc_nick *nick,
int prefix)
{
static char result[32];
char str_prefix[2];
int nick_mode;
const char *str_prefix_color;
str_prefix[0] = (nick) ? nick->prefix[0] : '\0';
str_prefix[1] = '\0';
nick_mode = weechat_config_integer (irc_config_look_nick_mode);
if ((nick_mode == IRC_CONFIG_LOOK_NICK_MODE_BOTH)
|| (prefix && (nick_mode == IRC_CONFIG_LOOK_NICK_MODE_PREFIX))
|| (!prefix && (nick_mode == IRC_CONFIG_LOOK_NICK_MODE_ACTION)))
{
if (nick)
{
if ((str_prefix[0] == ' ')
&& (!prefix || !weechat_config_boolean (irc_config_look_nick_mode_empty)))
{
str_prefix[0] = '\0';
}
str_prefix_color = weechat_color (
irc_nick_get_prefix_color_name (server, nick->prefix[0]));
}
else
{
str_prefix[0] = (prefix
&& weechat_config_boolean (irc_config_look_nick_mode_empty)) ?
' ' : '\0';
str_prefix_color = IRC_COLOR_RESET;
}
}
else
{
str_prefix[0] = '\0';
str_prefix_color = IRC_COLOR_RESET;
}
snprintf (result, sizeof (result), "%s%s", str_prefix_color, str_prefix);
return result;
}
/*
* Returns string with nick to display as prefix on buffer (returned string ends
* by a tab).
*/
const char *
irc_nick_as_prefix (struct t_irc_server *server, struct t_irc_nick *nick,
const char *nickname, const char *force_color)
{
static char result[256];
char *color;
if (force_color)
color = strdup (force_color);
else if (nick)
color = strdup (nick->color);
else if (nickname)
color = irc_nick_find_color (nickname);
else
color = strdup (IRC_COLOR_CHAT_NICK);
snprintf (result, sizeof (result), "%s%s%s\t",
irc_nick_mode_for_display (server, nick, 1),
color,
(nick) ? nick->name : nickname);
if (color)
free (color);
return result;
}
/*
* Returns WeeChat color code for a nick.
*/
const char *
irc_nick_color_for_msg (struct t_irc_server *server, int server_message,
struct t_irc_nick *nick, const char *nickname)
{
static char color[16][64];
static int index_color = 0;
char *color_found;
if (server_message
&& !weechat_config_boolean (irc_config_look_color_nicks_in_server_messages))
{
return IRC_COLOR_CHAT_NICK;
}
if (nick)
return nick->color;
if (nickname)
{
if (server
&& (irc_server_strcasecmp (server, nickname, server->nick) == 0))
{
return IRC_COLOR_CHAT_NICK_SELF;
}
color_found = irc_nick_find_color (nickname);
index_color = (index_color + 1) % 16;
snprintf (color[index_color], sizeof (color[index_color]),
"%s",
color_found);
if (color_found)
free (color_found);
return color[index_color];
}
return IRC_COLOR_CHAT_NICK;
}
/*
* Returns string with color of nick for private.
*/
const char *
irc_nick_color_for_pv (struct t_irc_channel *channel, const char *nickname)
{
if (weechat_config_boolean (irc_config_look_color_pv_nick_like_channel))
{
if (!channel->pv_remote_nick_color)
channel->pv_remote_nick_color = irc_nick_find_color (nickname);
if (channel->pv_remote_nick_color)
return channel->pv_remote_nick_color;
}
return IRC_COLOR_CHAT_NICK_OTHER;
}
/*
* Returns default ban mask for the nick.
*
* Note: result must be freed after use (if not NULL).
*/
char *
irc_nick_default_ban_mask (struct t_irc_nick *nick)
{
const char *ptr_ban_mask;
char *pos_hostname, user[128], ident[128], *res, *temp;
if (!nick)
return NULL;
ptr_ban_mask = weechat_config_string (irc_config_network_ban_mask_default);
pos_hostname = (nick->host) ? strchr (nick->host, '@') : NULL;
if (!nick->host || !pos_hostname || !ptr_ban_mask || !ptr_ban_mask[0])
return NULL;
if (pos_hostname - nick->host > (int)sizeof (user) - 1)
return NULL;
strncpy (user, nick->host, pos_hostname - nick->host);
user[pos_hostname - nick->host] = '\0';
strcpy (ident, (user[0] != '~') ? user : "*");
pos_hostname++;
/* replace nick */
temp = weechat_string_replace (ptr_ban_mask, "$nick", nick->name);
if (!temp)
return NULL;
res = temp;
/* replace user */
temp = weechat_string_replace (res, "$user", user);
free (res);
if (!temp)
return NULL;
res = temp;
/* replace ident */
temp = weechat_string_replace (res, "$ident", ident);
free (res);
if (!temp)
return NULL;
res = temp;
/* replace hostname */
temp = weechat_string_replace (res, "$host", pos_hostname);
free (res);
if (!temp)
return NULL;
res = temp;
return res;
}
/*
* Returns hdata for nick.
*/
struct t_hdata *
irc_nick_hdata_nick_cb (const void *pointer, void *data,
const char *hdata_name)
{
struct t_hdata *hdata;
/* make C compiler happy */
(void) pointer;
(void) data;
hdata = weechat_hdata_new (hdata_name, "prev_nick", "next_nick",
0, 0, NULL, NULL);
if (hdata)
{
WEECHAT_HDATA_VAR(struct t_irc_nick, name, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, host, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, prefixes, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, prefix, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, away, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, account, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, realname, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, color, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_nick, prev_nick, POINTER, 0, NULL, hdata_name);
WEECHAT_HDATA_VAR(struct t_irc_nick, next_nick, POINTER, 0, NULL, hdata_name);
}
return hdata;
}
/*
* Adds a nick in an infolist.
*
* Returns:
* 1: OK
* 0: error
*/
int
irc_nick_add_to_infolist (struct t_infolist *infolist,
struct t_irc_nick *nick)
{
struct t_infolist_item *ptr_item;
if (!infolist || !nick)
return 0;
ptr_item = weechat_infolist_new_item (infolist);
if (!ptr_item)
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "name", nick->name))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "host", nick->host))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "prefixes", nick->prefixes))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "prefix", nick->prefix))
return 0;
if (!weechat_infolist_new_var_integer (ptr_item, "away", nick->away))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "account", nick->account))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "realname", nick->realname))
return 0;
if (!weechat_infolist_new_var_string (ptr_item, "color", nick->color))
return 0;
return 1;
}
/*
* Prints nick infos in WeeChat log file (usually for crash dump).
*/
void
irc_nick_print_log (struct t_irc_nick *nick)
{
weechat_log_printf ("");
weechat_log_printf (" => nick %s (addr:0x%lx):", nick->name, nick);
weechat_log_printf (" host . . . . . : '%s'", nick->host);
weechat_log_printf (" prefixes . . . : '%s'", nick->prefixes);
weechat_log_printf (" prefix . . . . : '%s'", nick->prefix);
weechat_log_printf (" away . . . . . : %d", nick->away);
weechat_log_printf (" account. . . . : '%s'", nick->account);
weechat_log_printf (" realname . . . : '%s'", nick->realname);
weechat_log_printf (" color. . . . . : '%s'", nick->color);
weechat_log_printf (" prev_nick. . . : 0x%lx", nick->prev_nick);
weechat_log_printf (" next_nick. . . : 0x%lx", nick->next_nick);
}
| ./CrossVul/dataset_final_sorted/CWE-120/c/bad_4697_1 |
crossvul-cpp_data_good_1199_0 | /* FriBidi
* fribidi-bidi.c - bidirectional algorithm
*
* Authors:
* Behdad Esfahbod, 2001, 2002, 2004
* Dov Grobgeld, 1999, 2000, 2017
*
* Copyright (C) 2004 Sharif FarsiWeb, Inc
* Copyright (C) 2001,2002 Behdad Esfahbod
* Copyright (C) 1999,2000,2017 Dov Grobgeld
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library, in a file named COPYING; if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA
*
* For licensing issues, contact <fribidi.license@gmail.com>.
*/
#include "common.h"
#include <fribidi-bidi.h>
#include <fribidi-mirroring.h>
#include <fribidi-brackets.h>
#include <fribidi-unicode.h>
#include "bidi-types.h"
#include "run.h"
/*
* This file implements most of Unicode Standard Annex #9, Tracking Number 13.
*/
#ifndef MAX
# define MAX(a,b) ((a) > (b) ? (a) : (b))
#endif /* !MAX */
/* Some convenience macros */
#define RL_TYPE(list) ((list)->type)
#define RL_LEN(list) ((list)->len)
#define RL_LEVEL(list) ((list)->level)
/* "Within this scope, bidirectional types EN and AN are treated as R" */
#define RL_TYPE_AN_EN_AS_RTL(list) ( \
(((list)->type == FRIBIDI_TYPE_AN) || ((list)->type == FRIBIDI_TYPE_EN) | ((list)->type == FRIBIDI_TYPE_RTL)) ? FRIBIDI_TYPE_RTL : (list)->type)
#define RL_BRACKET_TYPE(list) ((list)->bracket_type)
#define RL_ISOLATE_LEVEL(list) ((list)->isolate_level)
#define LOCAL_BRACKET_SIZE 16
/* Pairing nodes are used for holding a pair of open/close brackets as
described in BD16. */
struct _FriBidiPairingNodeStruct {
FriBidiRun *open;
FriBidiRun *close;
struct _FriBidiPairingNodeStruct *next;
};
typedef struct _FriBidiPairingNodeStruct FriBidiPairingNode;
static FriBidiRun *
merge_with_prev (
FriBidiRun *second
)
{
FriBidiRun *first;
fribidi_assert (second);
fribidi_assert (second->next);
first = second->prev;
fribidi_assert (first);
first->next = second->next;
first->next->prev = first;
RL_LEN (first) += RL_LEN (second);
if (second->next_isolate)
second->next_isolate->prev_isolate = first;
first->next_isolate = second->next_isolate;
fribidi_free (second);
return first;
}
static void
compact_list (
FriBidiRun *list
)
{
fribidi_assert (list);
if (list->next)
for_run_list (list, list)
if (RL_TYPE (list->prev) == RL_TYPE (list)
&& RL_LEVEL (list->prev) == RL_LEVEL (list)
&& RL_BRACKET_TYPE(list) == FRIBIDI_NO_BRACKET /* Don't join brackets! */
&& RL_BRACKET_TYPE(list->prev) == FRIBIDI_NO_BRACKET
)
list = merge_with_prev (list);
}
static void
compact_neutrals (
FriBidiRun *list
)
{
fribidi_assert (list);
if (list->next)
{
for_run_list (list, list)
{
if (RL_LEVEL (list->prev) == RL_LEVEL (list)
&&
((RL_TYPE (list->prev) == RL_TYPE (list)
|| (FRIBIDI_IS_NEUTRAL (RL_TYPE (list->prev))
&& FRIBIDI_IS_NEUTRAL (RL_TYPE (list)))))
&& RL_BRACKET_TYPE(list) == FRIBIDI_NO_BRACKET /* Don't join brackets! */
&& RL_BRACKET_TYPE(list->prev) == FRIBIDI_NO_BRACKET
)
list = merge_with_prev (list);
}
}
}
/* Search for an adjacent run in the forward or backward direction.
It uses the next_isolate and prev_isolate run for short circuited searching.
*/
/* The static sentinel is used to signal the end of an isolating
sequence */
static FriBidiRun sentinel = { NULL, NULL, 0,0, FRIBIDI_TYPE_SENTINEL, -1,-1,FRIBIDI_NO_BRACKET, NULL, NULL };
static FriBidiRun *get_adjacent_run(FriBidiRun *list, fribidi_boolean forward, fribidi_boolean skip_neutral)
{
FriBidiRun *ppp = forward ? list->next_isolate : list->prev_isolate;
if (!ppp)
return &sentinel;
while (ppp)
{
FriBidiCharType ppp_type = RL_TYPE (ppp);
if (ppp_type == FRIBIDI_TYPE_SENTINEL)
break;
/* Note that when sweeping forward we continue one run
beyond the PDI to see what lies behind. When looking
backwards, this is not necessary as the leading isolate
run has already been assigned the resolved level. */
if (ppp->isolate_level > list->isolate_level /* <- How can this be true? */
|| (forward && ppp_type == FRIBIDI_TYPE_PDI)
|| (skip_neutral && !FRIBIDI_IS_STRONG(ppp_type)))
{
ppp = forward ? ppp->next_isolate : ppp->prev_isolate;
if (!ppp)
ppp = &sentinel;
continue;
}
break;
}
return ppp;
}
#ifdef DEBUG
/*======================================================================
* For debugging, define some functions for printing the types and the
* levels.
*----------------------------------------------------------------------*/
static char char_from_level_array[] = {
'$', /* -1 == FRIBIDI_SENTINEL, indicating
* start or end of string. */
/* 0-61 == 0-9,a-z,A-Z are the the only valid levels before resolving
* implicits. after that the level @ may be appear too. */
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D',
'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
'Y', 'Z',
/* TBD - insert another 125-64 levels */
'@', /* 62 == only must appear after resolving
* implicits. */
'!', /* 63 == FRIBIDI_LEVEL_INVALID, internal error,
* this level shouldn't be seen. */
'*', '*', '*', '*', '*' /* >= 64 == overflows, this levels and higher
* levels show a real bug!. */
};
#define fribidi_char_from_level(level) char_from_level_array[(level) + 1]
static void
print_types_re (
const FriBidiRun *pp
)
{
fribidi_assert (pp);
MSG (" Run types : ");
for_run_list (pp, pp)
{
MSG6 ("%d:%d(%s)[%d,%d] ",
pp->pos, pp->len, fribidi_get_bidi_type_name (pp->type), pp->level, pp->isolate_level);
}
MSG ("\n");
}
static void
print_resolved_levels (
const FriBidiRun *pp
)
{
fribidi_assert (pp);
MSG (" Res. levels: ");
for_run_list (pp, pp)
{
register FriBidiStrIndex i;
for (i = RL_LEN (pp); i; i--)
MSG2 ("%c", fribidi_char_from_level (RL_LEVEL (pp)));
}
MSG ("\n");
}
static void
print_resolved_types (
const FriBidiRun *pp
)
{
fribidi_assert (pp);
MSG (" Res. types : ");
for_run_list (pp, pp)
{
FriBidiStrIndex i;
for (i = RL_LEN (pp); i; i--)
MSG2 ("%s ", fribidi_get_bidi_type_name (pp->type));
}
MSG ("\n");
}
static void
print_bidi_string (
/* input */
const FriBidiCharType *bidi_types,
const FriBidiStrIndex len
)
{
register FriBidiStrIndex i;
fribidi_assert (bidi_types);
MSG (" Org. types : ");
for (i = 0; i < len; i++)
MSG2 ("%s ", fribidi_get_bidi_type_name (bidi_types[i]));
MSG ("\n");
}
static void print_pairing_nodes(FriBidiPairingNode *nodes)
{
MSG ("Pairs: ");
while (nodes)
{
MSG3 ("(%d, %d) ", nodes->open->pos, nodes->close->pos);
nodes = nodes->next;
}
MSG ("\n");
}
#endif /* DEBUG */
/*=========================================================================
* define macros for push and pop the status in to / out of the stack
*-------------------------------------------------------------------------*/
/* There are a few little points in pushing into and popping from the status
stack:
1. when the embedding level is not valid (more than
FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL=125), you must reject it, and not to push
into the stack, but when you see a PDF, you must find the matching code,
and if it was pushed in the stack, pop it, it means you must pop if and
only if you have pushed the matching code, the over_pushed var counts the
number of rejected codes so far.
2. there's a more confusing point too, when the embedding level is exactly
FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL-1=124, an LRO, LRE, or LRI is rejected
because the new level would be FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL+1=126, that
is invalid; but an RLO, RLE, or RLI is accepted because the new level is
FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL=125, that is valid, so the rejected codes
may be not continuous in the logical order, in fact there are at most two
continuous intervals of codes, with an RLO, RLE, or RLI between them. To
support this case, the first_interval var counts the number of rejected
codes in the first interval, when it is 0, means that there is only one
interval.
*/
/* a. If this new level would be valid, then this embedding code is valid.
Remember (push) the current embedding level and override status.
Reset current level to this new level, and reset the override status to
new_override.
b. If the new level would not be valid, then this code is invalid. Don't
change the current level or override status.
*/
#define PUSH_STATUS \
FRIBIDI_BEGIN_STMT \
if LIKELY(over_pushed == 0 \
&& isolate_overflow == 0 \
&& new_level <= FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL) \
{ \
if UNLIKELY(level == FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL - 1) \
first_interval = over_pushed; \
status_stack[stack_size].level = level; \
status_stack[stack_size].isolate_level = isolate_level; \
status_stack[stack_size].isolate = isolate; \
status_stack[stack_size].override = override; \
stack_size++; \
level = new_level; \
override = new_override; \
} else if LIKELY(isolate_overflow == 0) \
over_pushed++; \
FRIBIDI_END_STMT
/* If there was a valid matching code, restore (pop) the last remembered
(pushed) embedding level and directional override.
*/
#define POP_STATUS \
FRIBIDI_BEGIN_STMT \
if (stack_size) \
{ \
if UNLIKELY(over_pushed > first_interval) \
over_pushed--; \
else \
{ \
if LIKELY(over_pushed == first_interval) \
first_interval = 0; \
stack_size--; \
level = status_stack[stack_size].level; \
override = status_stack[stack_size].override; \
isolate = status_stack[stack_size].isolate; \
isolate_level = status_stack[stack_size].isolate_level; \
} \
} \
FRIBIDI_END_STMT
/* Return the type of previous run or the SOR, if already at the start of
a level run. */
#define PREV_TYPE_OR_SOR(pp) \
( \
RL_LEVEL(pp->prev) == RL_LEVEL(pp) ? \
RL_TYPE(pp->prev) : \
FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(pp->prev), RL_LEVEL(pp))) \
)
/* Return the type of next run or the EOR, if already at the end of
a level run. */
#define NEXT_TYPE_OR_EOR(pp) \
( \
RL_LEVEL(pp->next) == RL_LEVEL(pp) ? \
RL_TYPE(pp->next) : \
FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(pp->next), RL_LEVEL(pp))) \
)
/* Return the embedding direction of a link. */
#define FRIBIDI_EMBEDDING_DIRECTION(link) \
FRIBIDI_LEVEL_TO_DIR(RL_LEVEL(link))
FRIBIDI_ENTRY FriBidiParType
fribidi_get_par_direction (
/* input */
const FriBidiCharType *bidi_types,
const FriBidiStrIndex len
)
{
register FriBidiStrIndex i;
fribidi_assert (bidi_types);
for (i = 0; i < len; i++)
if (FRIBIDI_IS_LETTER (bidi_types[i]))
return FRIBIDI_IS_RTL (bidi_types[i]) ? FRIBIDI_PAR_RTL :
FRIBIDI_PAR_LTR;
return FRIBIDI_PAR_ON;
}
/* Push a new entry to the pairing linked list */
static FriBidiPairingNode * pairing_nodes_push(FriBidiPairingNode *nodes,
FriBidiRun *open,
FriBidiRun *close)
{
FriBidiPairingNode *node = fribidi_malloc(sizeof(FriBidiPairingNode));
node->open = open;
node->close = close;
node->next = nodes;
nodes = node;
return nodes;
}
/* Sort by merge sort */
static void pairing_nodes_front_back_split(FriBidiPairingNode *source,
/* output */
FriBidiPairingNode **front,
FriBidiPairingNode **back)
{
FriBidiPairingNode *pfast, *pslow;
if (!source || !source->next)
{
*front = source;
*back = NULL;
}
else
{
pslow = source;
pfast = source->next;
while (pfast)
{
pfast= pfast->next;
if (pfast)
{
pfast = pfast->next;
pslow = pslow->next;
}
}
*front = source;
*back = pslow->next;
pslow->next = NULL;
}
}
static FriBidiPairingNode *
pairing_nodes_sorted_merge(FriBidiPairingNode *nodes1,
FriBidiPairingNode *nodes2)
{
FriBidiPairingNode *res = NULL;
if (!nodes1)
return nodes2;
if (!nodes2)
return nodes1;
if (nodes1->open->pos < nodes2->open->pos)
{
res = nodes1;
res->next = pairing_nodes_sorted_merge(nodes1->next, nodes2);
}
else
{
res = nodes2;
res->next = pairing_nodes_sorted_merge(nodes1, nodes2->next);
}
return res;
}
static void sort_pairing_nodes(FriBidiPairingNode **nodes)
{
FriBidiPairingNode *front, *back;
/* 0 or 1 node case */
if (!*nodes || !(*nodes)->next)
return;
pairing_nodes_front_back_split(*nodes, &front, &back);
sort_pairing_nodes(&front);
sort_pairing_nodes(&back);
*nodes = pairing_nodes_sorted_merge(front, back);
}
static void free_pairing_nodes(FriBidiPairingNode *nodes)
{
while (nodes)
{
FriBidiPairingNode *p = nodes;
nodes = nodes->next;
fribidi_free(p);
}
}
FRIBIDI_ENTRY FriBidiLevel
fribidi_get_par_embedding_levels_ex (
/* input */
const FriBidiCharType *bidi_types,
const FriBidiBracketType *bracket_types,
const FriBidiStrIndex len,
/* input and output */
FriBidiParType *pbase_dir,
/* output */
FriBidiLevel *embedding_levels
)
{
FriBidiLevel base_level_per_iso_level[FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL];
FriBidiLevel base_level, max_level = 0;
FriBidiParType base_dir;
FriBidiRun *main_run_list = NULL, *explicits_list = NULL, *pp;
fribidi_boolean status = false;
int max_iso_level = 0;
if UNLIKELY
(!len)
{
status = true;
goto out;
}
DBG ("in fribidi_get_par_embedding_levels");
fribidi_assert (bidi_types);
fribidi_assert (pbase_dir);
fribidi_assert (embedding_levels);
/* Determinate character types */
{
/* Get run-length encoded character types */
main_run_list = run_list_encode_bidi_types (bidi_types, bracket_types, len);
if UNLIKELY
(!main_run_list) goto out;
}
/* Find base level */
/* If no strong base_dir was found, resort to the weak direction
that was passed on input. */
base_level = FRIBIDI_DIR_TO_LEVEL (*pbase_dir);
if (!FRIBIDI_IS_STRONG (*pbase_dir))
/* P2. P3. Search for first strong character and use its direction as
base direction */
{
int valid_isolate_count = 0;
for_run_list (pp, main_run_list)
{
if (RL_TYPE(pp) == FRIBIDI_TYPE_PDI)
{
/* Ignore if there is no matching isolate */
if (valid_isolate_count>0)
valid_isolate_count--;
}
else if (FRIBIDI_IS_ISOLATE(RL_TYPE(pp)))
valid_isolate_count++;
else if (valid_isolate_count==0 && FRIBIDI_IS_LETTER (RL_TYPE (pp)))
{
base_level = FRIBIDI_DIR_TO_LEVEL (RL_TYPE (pp));
*pbase_dir = FRIBIDI_LEVEL_TO_DIR (base_level);
break;
}
}
}
base_dir = FRIBIDI_LEVEL_TO_DIR (base_level);
DBG2 (" base level : %c", fribidi_char_from_level (base_level));
DBG2 (" base dir : %s", fribidi_get_bidi_type_name (base_dir));
base_level_per_iso_level[0] = base_level;
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_types_re (main_run_list);
}
# endif /* DEBUG */
/* Explicit Levels and Directions */
DBG ("explicit levels and directions");
{
FriBidiLevel level, new_level = 0;
int isolate_level = 0;
FriBidiCharType override, new_override;
FriBidiStrIndex i;
int stack_size, over_pushed, first_interval;
int valid_isolate_count = 0;
int isolate_overflow = 0;
int isolate = 0; /* The isolate status flag */
struct
{
FriBidiCharType override; /* only LTR, RTL and ON are valid */
FriBidiLevel level;
int isolate;
int isolate_level;
} status_stack[FRIBIDI_BIDI_MAX_RESOLVED_LEVELS];
FriBidiRun temp_link;
FriBidiRun *run_per_isolate_level[FRIBIDI_BIDI_MAX_RESOLVED_LEVELS];
memset(run_per_isolate_level, 0, sizeof(run_per_isolate_level[0])
* FRIBIDI_BIDI_MAX_RESOLVED_LEVELS);
/* explicits_list is a list like main_run_list, that holds the explicit
codes that are removed from main_run_list, to reinsert them later by
calling the shadow_run_list.
*/
explicits_list = new_run_list ();
if UNLIKELY
(!explicits_list) goto out;
/* X1. Begin by setting the current embedding level to the paragraph
embedding level. Set the directional override status to neutral,
and directional isolate status to false.
Process each character iteratively, applying rules X2 through X8.
Only embedding levels from 0 to 123 are valid in this phase. */
level = base_level;
override = FRIBIDI_TYPE_ON;
/* stack */
stack_size = 0;
over_pushed = 0;
first_interval = 0;
valid_isolate_count = 0;
isolate_overflow = 0;
for_run_list (pp, main_run_list)
{
FriBidiCharType this_type = RL_TYPE (pp);
RL_ISOLATE_LEVEL (pp) = isolate_level;
if (FRIBIDI_IS_EXPLICIT_OR_BN (this_type))
{
if (FRIBIDI_IS_STRONG (this_type))
{ /* LRE, RLE, LRO, RLO */
/* 1. Explicit Embeddings */
/* X2. With each RLE, compute the least greater odd
embedding level. */
/* X3. With each LRE, compute the least greater even
embedding level. */
/* 2. Explicit Overrides */
/* X4. With each RLO, compute the least greater odd
embedding level. */
/* X5. With each LRO, compute the least greater even
embedding level. */
new_override = FRIBIDI_EXPLICIT_TO_OVERRIDE_DIR (this_type);
for (i = RL_LEN (pp); i; i--)
{
new_level =
((level + FRIBIDI_DIR_TO_LEVEL (this_type) + 2) & ~1) -
FRIBIDI_DIR_TO_LEVEL (this_type);
isolate = 0;
PUSH_STATUS;
}
}
else if (this_type == FRIBIDI_TYPE_PDF)
{
/* 3. Terminating Embeddings and overrides */
/* X7. With each PDF, determine the matching embedding or
override code. */
for (i = RL_LEN (pp); i; i--)
{
if (stack_size && status_stack[stack_size-1].isolate != 0)
break;
POP_STATUS;
}
}
/* X9. Remove all RLE, LRE, RLO, LRO, PDF, and BN codes. */
/* Remove element and add it to explicits_list */
RL_LEVEL (pp) = FRIBIDI_SENTINEL;
temp_link.next = pp->next;
move_node_before (pp, explicits_list);
pp = &temp_link;
}
else if (this_type == FRIBIDI_TYPE_PDI)
/* X6a. pop the direction of the stack */
{
for (i = RL_LEN (pp); i; i--)
{
if (isolate_overflow > 0)
{
isolate_overflow--;
RL_LEVEL (pp) = level;
}
else if (valid_isolate_count > 0)
{
/* Pop away all LRE,RLE,LRO, RLO levels
from the stack, as these are implicitly
terminated by the PDI */
while (stack_size && !status_stack[stack_size-1].isolate)
POP_STATUS;
over_pushed = 0; /* The PDI resets the overpushed! */
POP_STATUS;
isolate_level-- ;
valid_isolate_count--;
RL_LEVEL (pp) = level;
RL_ISOLATE_LEVEL (pp) = isolate_level;
}
else
{
/* Ignore isolated PDI's by turning them into ON's */
RL_TYPE (pp) = FRIBIDI_TYPE_ON;
RL_LEVEL (pp) = level;
}
}
}
else if (FRIBIDI_IS_ISOLATE(this_type))
{
/* TBD support RL_LEN > 1 */
new_override = FRIBIDI_TYPE_ON;
isolate = 1;
if (this_type == FRIBIDI_TYPE_LRI)
new_level = level + 2 - (level%2);
else if (this_type == FRIBIDI_TYPE_RLI)
new_level = level + 1 + (level%2);
else if (this_type == FRIBIDI_TYPE_FSI)
{
/* Search for a local strong character until we
meet the corresponding PDI or the end of the
paragraph */
FriBidiRun *fsi_pp;
int isolate_count = 0;
int fsi_base_level = 0;
for_run_list (fsi_pp, pp)
{
if (RL_TYPE(fsi_pp) == FRIBIDI_TYPE_PDI)
{
isolate_count--;
if (valid_isolate_count < 0)
break;
}
else if (FRIBIDI_IS_ISOLATE(RL_TYPE(fsi_pp)))
isolate_count++;
else if (isolate_count==0 && FRIBIDI_IS_LETTER (RL_TYPE (fsi_pp)))
{
fsi_base_level = FRIBIDI_DIR_TO_LEVEL (RL_TYPE (fsi_pp));
break;
}
}
/* Same behavior like RLI and LRI above */
if (FRIBIDI_LEVEL_IS_RTL (fsi_base_level))
new_level = level + 1 + (level%2);
else
new_level = level + 2 - (level%2);
}
RL_LEVEL (pp) = level;
RL_ISOLATE_LEVEL (pp) = isolate_level;
if (isolate_level < FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL-1)
isolate_level++;
base_level_per_iso_level[isolate_level] = new_level;
if (!FRIBIDI_IS_NEUTRAL (override))
RL_TYPE (pp) = override;
if (new_level <= FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL)
{
valid_isolate_count++;
PUSH_STATUS;
level = new_level;
}
else
isolate_overflow += 1;
}
else if (this_type == FRIBIDI_TYPE_BS)
{
/* X8. All explicit directional embeddings and overrides are
completely terminated at the end of each paragraph. Paragraph
separators are not included in the embedding. */
break;
}
else
{
/* X6. For all types besides RLE, LRE, RLO, LRO, and PDF:
a. Set the level of the current character to the current
embedding level.
b. Whenever the directional override status is not neutral,
reset the current character type to the directional override
status. */
RL_LEVEL (pp) = level;
if (!FRIBIDI_IS_NEUTRAL (override))
RL_TYPE (pp) = override;
}
}
/* Build the isolate_level connections */
for_run_list (pp, main_run_list)
{
int isolate_level = RL_ISOLATE_LEVEL (pp);
if (run_per_isolate_level[isolate_level])
{
run_per_isolate_level[isolate_level]->next_isolate = pp;
pp->prev_isolate = run_per_isolate_level[isolate_level];
}
run_per_isolate_level[isolate_level] = pp;
}
/* Implementing X8. It has no effect on a single paragraph! */
level = base_level;
override = FRIBIDI_TYPE_ON;
stack_size = 0;
over_pushed = 0;
}
/* X10. The remaining rules are applied to each run of characters at the
same level. For each run, determine the start-of-level-run (sor) and
end-of-level-run (eor) type, either L or R. This depends on the
higher of the two levels on either side of the boundary (at the start
or end of the paragraph, the level of the 'other' run is the base
embedding level). If the higher level is odd, the type is R, otherwise
it is L. */
/* Resolving Implicit Levels can be done out of X10 loop, so only change
of Resolving Weak Types and Resolving Neutral Types is needed. */
compact_list (main_run_list);
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_types_re (main_run_list);
print_bidi_string (bidi_types, len);
print_resolved_levels (main_run_list);
print_resolved_types (main_run_list);
}
# endif /* DEBUG */
/* 4. Resolving weak types. Also calculate the maximum isolate level */
max_iso_level = 0;
DBG ("resolving weak types");
{
int last_strong_stack[FRIBIDI_BIDI_MAX_RESOLVED_LEVELS];
FriBidiCharType prev_type_orig;
fribidi_boolean w4;
last_strong_stack[0] = base_dir;
for_run_list (pp, main_run_list)
{
register FriBidiCharType prev_type, this_type, next_type;
FriBidiRun *ppp_prev, *ppp_next;
int iso_level;
ppp_prev = get_adjacent_run(pp, false, false);
ppp_next = get_adjacent_run(pp, true, false);
this_type = RL_TYPE (pp);
iso_level = RL_ISOLATE_LEVEL(pp);
if (iso_level > max_iso_level)
max_iso_level = iso_level;
if (RL_LEVEL(ppp_prev) == RL_LEVEL(pp))
prev_type = RL_TYPE(ppp_prev);
else
prev_type = FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(ppp_prev), RL_LEVEL(pp)));
if (RL_LEVEL(ppp_next) == RL_LEVEL(pp))
next_type = RL_TYPE(ppp_next);
else
next_type = FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(ppp_next), RL_LEVEL(pp)));
if (FRIBIDI_IS_STRONG (prev_type))
last_strong_stack[iso_level] = prev_type;
/* W1. NSM
Examine each non-spacing mark (NSM) in the level run, and change the
type of the NSM to the type of the previous character. If the NSM
is at the start of the level run, it will get the type of sor. */
/* Implementation note: it is important that if the previous character
is not sor, then we should merge this run with the previous,
because of rules like W5, that we assume all of a sequence of
adjacent ETs are in one FriBidiRun. */
if (this_type == FRIBIDI_TYPE_NSM)
{
/* New rule in Unicode 6.3 */
if (FRIBIDI_IS_ISOLATE (RL_TYPE (pp->prev)))
RL_TYPE(pp) = FRIBIDI_TYPE_ON;
if (RL_LEVEL (ppp_prev) == RL_LEVEL (pp))
{
if (ppp_prev == pp->prev)
pp = merge_with_prev (pp);
}
else
RL_TYPE (pp) = prev_type;
if (prev_type == next_type && RL_LEVEL (pp) == RL_LEVEL (pp->next))
{
if (ppp_next == pp->next)
pp = merge_with_prev (pp->next);
}
continue; /* As we know the next condition cannot be true. */
}
/* W2: European numbers. */
if (this_type == FRIBIDI_TYPE_EN && last_strong_stack[iso_level] == FRIBIDI_TYPE_AL)
{
RL_TYPE (pp) = FRIBIDI_TYPE_AN;
/* Resolving dependency of loops for rules W1 and W2, so we
can merge them in one loop. */
if (next_type == FRIBIDI_TYPE_NSM)
RL_TYPE (ppp_next) = FRIBIDI_TYPE_AN;
}
}
last_strong_stack[0] = base_dir;
/* Resolving dependency of loops for rules W4 and W5, W5 may
want to prevent W4 to take effect in the next turn, do this
through "w4". */
w4 = true;
/* Resolving dependency of loops for rules W4 and W5 with W7,
W7 may change an EN to L but it sets the prev_type_orig if needed,
so W4 and W5 in next turn can still do their works. */
prev_type_orig = FRIBIDI_TYPE_ON;
/* Each isolate level has its own memory of the last strong character */
for_run_list (pp, main_run_list)
{
register FriBidiCharType prev_type, this_type, next_type;
int iso_level;
FriBidiRun *ppp_prev, *ppp_next;
this_type = RL_TYPE (pp);
iso_level = RL_ISOLATE_LEVEL(pp);
ppp_prev = get_adjacent_run(pp, false, false);
ppp_next = get_adjacent_run(pp, true, false);
if (RL_LEVEL(ppp_prev) == RL_LEVEL(pp))
prev_type = RL_TYPE(ppp_prev);
else
prev_type = FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(ppp_prev), RL_LEVEL(pp)));
if (RL_LEVEL(ppp_next) == RL_LEVEL(pp))
next_type = RL_TYPE(ppp_next);
else
next_type = FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(ppp_next), RL_LEVEL(pp)));
if (FRIBIDI_IS_STRONG (prev_type))
last_strong_stack[iso_level] = prev_type;
/* W2 ??? */
/* W3: Change ALs to R. */
if (this_type == FRIBIDI_TYPE_AL)
{
RL_TYPE (pp) = FRIBIDI_TYPE_RTL;
w4 = true;
prev_type_orig = FRIBIDI_TYPE_ON;
continue;
}
/* W4. A single european separator changes to a european number.
A single common separator between two numbers of the same type
changes to that type. */
if (w4
&& RL_LEN (pp) == 1 && FRIBIDI_IS_ES_OR_CS (this_type)
&& FRIBIDI_IS_NUMBER (prev_type_orig)
&& prev_type_orig == next_type
&& (prev_type_orig == FRIBIDI_TYPE_EN
|| this_type == FRIBIDI_TYPE_CS))
{
RL_TYPE (pp) = prev_type;
this_type = RL_TYPE (pp);
}
w4 = true;
/* W5. A sequence of European terminators adjacent to European
numbers changes to All European numbers. */
if (this_type == FRIBIDI_TYPE_ET
&& (prev_type_orig == FRIBIDI_TYPE_EN
|| next_type == FRIBIDI_TYPE_EN))
{
RL_TYPE (pp) = FRIBIDI_TYPE_EN;
w4 = false;
this_type = RL_TYPE (pp);
}
/* W6. Otherwise change separators and terminators to other neutral. */
if (FRIBIDI_IS_NUMBER_SEPARATOR_OR_TERMINATOR (this_type))
RL_TYPE (pp) = FRIBIDI_TYPE_ON;
/* W7. Change european numbers to L. */
if (this_type == FRIBIDI_TYPE_EN && last_strong_stack[iso_level] == FRIBIDI_TYPE_LTR)
{
RL_TYPE (pp) = FRIBIDI_TYPE_LTR;
prev_type_orig = (RL_LEVEL (pp) == RL_LEVEL (pp->next) ?
FRIBIDI_TYPE_EN : FRIBIDI_TYPE_ON);
}
else
prev_type_orig = PREV_TYPE_OR_SOR (pp->next);
}
}
compact_neutrals (main_run_list);
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_resolved_levels (main_run_list);
print_resolved_types (main_run_list);
}
# endif /* DEBUG */
/* 5. Resolving Neutral Types */
DBG ("resolving neutral types - N0");
{
/* BD16 - Build list of all pairs*/
int num_iso_levels = max_iso_level + 1;
FriBidiPairingNode *pairing_nodes = NULL;
FriBidiRun *local_bracket_stack[FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL][LOCAL_BRACKET_SIZE];
FriBidiRun **bracket_stack[FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL];
int bracket_stack_size[FRIBIDI_BIDI_MAX_EXPLICIT_LEVEL];
int last_level = RL_LEVEL(main_run_list);
int last_iso_level = 0;
memset(bracket_stack, 0, sizeof(bracket_stack[0])*num_iso_levels);
memset(bracket_stack_size, 0, sizeof(bracket_stack_size[0])*num_iso_levels);
/* populate the bracket_size. The first LOCAL_BRACKET_SIZE entries
of the stack are one the stack. Allocate the rest of the entries.
*/
{
int iso_level;
for (iso_level=0; iso_level < LOCAL_BRACKET_SIZE; iso_level++)
bracket_stack[iso_level] = local_bracket_stack[iso_level];
for (iso_level=LOCAL_BRACKET_SIZE; iso_level < num_iso_levels; iso_level++)
bracket_stack[iso_level] = fribidi_malloc (sizeof (bracket_stack[0])
* FRIBIDI_BIDI_MAX_NESTED_BRACKET_PAIRS);
}
/* Build the bd16 pair stack. */
for_run_list (pp, main_run_list)
{
int level = RL_LEVEL(pp);
int iso_level = RL_ISOLATE_LEVEL(pp);
FriBidiBracketType brack_prop = RL_BRACKET_TYPE(pp);
/* Interpret the isolating run sequence as such that they
end at a change in the level, unless the iso_level has been
raised. */
if (level != last_level && last_iso_level == iso_level)
bracket_stack_size[last_iso_level] = 0;
if (brack_prop!= FRIBIDI_NO_BRACKET
&& RL_TYPE(pp)==FRIBIDI_TYPE_ON)
{
if (FRIBIDI_IS_BRACKET_OPEN(brack_prop))
{
if (bracket_stack_size[iso_level]==FRIBIDI_BIDI_MAX_NESTED_BRACKET_PAIRS)
break;
/* push onto the pair stack */
bracket_stack[iso_level][bracket_stack_size[iso_level]++] = pp;
}
else
{
int stack_idx = bracket_stack_size[iso_level] - 1;
while (stack_idx >= 0)
{
FriBidiBracketType se_brack_prop = RL_BRACKET_TYPE(bracket_stack[iso_level][stack_idx]);
if (FRIBIDI_BRACKET_ID(se_brack_prop) == FRIBIDI_BRACKET_ID(brack_prop))
{
bracket_stack_size[iso_level] = stack_idx;
pairing_nodes = pairing_nodes_push(pairing_nodes,
bracket_stack[iso_level][stack_idx],
pp);
break;
}
stack_idx--;
}
}
}
last_level = level;
last_iso_level = iso_level;
}
/* The list must now be sorted for the next algo to work! */
sort_pairing_nodes(&pairing_nodes);
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_pairing_nodes (pairing_nodes);
}
# endif /* DEBUG */
/* Start the N0 */
{
FriBidiPairingNode *ppairs = pairing_nodes;
while (ppairs)
{
int iso_level = ppairs->open->isolate_level;
int embedding_level = base_level_per_iso_level[iso_level];
/* Find matching strong. */
fribidi_boolean found = false;
FriBidiRun *ppn;
for (ppn = ppairs->open; ppn!= ppairs->close; ppn = ppn->next)
{
FriBidiCharType this_type = RL_TYPE_AN_EN_AS_RTL(ppn);
/* Calculate level like in resolve implicit levels below to prevent
embedded levels not to match the base_level */
int this_level = RL_LEVEL (ppn) +
(FRIBIDI_LEVEL_IS_RTL (RL_LEVEL(ppn)) ^ FRIBIDI_DIR_TO_LEVEL (this_type));
/* N0b */
if (FRIBIDI_IS_STRONG (this_type) && this_level == embedding_level)
{
RL_TYPE(ppairs->open) = RL_TYPE(ppairs->close) = this_level%2 ? FRIBIDI_TYPE_RTL : FRIBIDI_TYPE_LTR;
found = true;
break;
}
}
/* N0c */
/* Search for any strong type preceding and within the bracket pair */
if (!found)
{
/* Search for a preceding strong */
int prec_strong_level = embedding_level; /* TBDov! Extract from Isolate level in effect */
int iso_level = RL_ISOLATE_LEVEL(ppairs->open);
for (ppn = ppairs->open->prev; ppn->type != FRIBIDI_TYPE_SENTINEL; ppn=ppn->prev)
{
FriBidiCharType this_type = RL_TYPE_AN_EN_AS_RTL(ppn);
if (FRIBIDI_IS_STRONG (this_type) && RL_ISOLATE_LEVEL(ppn) == iso_level)
{
prec_strong_level = RL_LEVEL (ppn) +
(FRIBIDI_LEVEL_IS_RTL (RL_LEVEL(ppn)) ^ FRIBIDI_DIR_TO_LEVEL (this_type));
break;
}
}
for (ppn = ppairs->open; ppn!= ppairs->close; ppn = ppn->next)
{
FriBidiCharType this_type = RL_TYPE_AN_EN_AS_RTL(ppn);
if (FRIBIDI_IS_STRONG (this_type) && RL_ISOLATE_LEVEL(ppn) == iso_level)
{
/* By constraint this is opposite the embedding direction,
since we did not match the N0b rule. We must now
compare with the preceding strong to establish whether
to apply N0c1 (opposite) or N0c2 embedding */
RL_TYPE(ppairs->open) = RL_TYPE(ppairs->close) = prec_strong_level % 2 ? FRIBIDI_TYPE_RTL : FRIBIDI_TYPE_LTR;
RL_LEVEL(ppairs->open) = RL_LEVEL(ppairs->close) = prec_strong_level;
found = true;
break;
}
}
}
ppairs = ppairs->next;
}
free_pairing_nodes(pairing_nodes);
if (num_iso_levels >= LOCAL_BRACKET_SIZE)
{
int i;
/* Only need to free the non static members */
for (i=LOCAL_BRACKET_SIZE; i<num_iso_levels; i++)
fribidi_free(bracket_stack[i]);
}
/* Remove the bracket property and re-compact */
{
const FriBidiBracketType NoBracket = FRIBIDI_NO_BRACKET;
for_run_list (pp, main_run_list)
pp->bracket_type = NoBracket;
compact_neutrals (main_run_list);
}
}
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_resolved_levels (main_run_list);
print_resolved_types (main_run_list);
}
# endif /* DEBUG */
}
DBG ("resolving neutral types - N1+N2");
{
for_run_list (pp, main_run_list)
{
FriBidiCharType prev_type, this_type, next_type;
FriBidiRun *ppp_prev, *ppp_next;
ppp_prev = get_adjacent_run(pp, false, false);
ppp_next = get_adjacent_run(pp, true, false);
/* "European and Arabic numbers are treated as though they were R"
FRIBIDI_CHANGE_NUMBER_TO_RTL does this. */
this_type = FRIBIDI_CHANGE_NUMBER_TO_RTL (RL_TYPE (pp));
if (RL_LEVEL(ppp_prev) == RL_LEVEL(pp))
prev_type = FRIBIDI_CHANGE_NUMBER_TO_RTL (RL_TYPE(ppp_prev));
else
prev_type = FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(ppp_prev), RL_LEVEL(pp)));
if (RL_LEVEL(ppp_next) == RL_LEVEL(pp))
next_type = FRIBIDI_CHANGE_NUMBER_TO_RTL (RL_TYPE(ppp_next));
else
next_type = FRIBIDI_LEVEL_TO_DIR(MAX(RL_LEVEL(ppp_next), RL_LEVEL(pp)));
if (FRIBIDI_IS_NEUTRAL (this_type))
RL_TYPE (pp) = (prev_type == next_type) ?
/* N1. */ prev_type :
/* N2. */ FRIBIDI_EMBEDDING_DIRECTION (pp);
}
}
compact_list (main_run_list);
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_resolved_levels (main_run_list);
print_resolved_types (main_run_list);
}
# endif /* DEBUG */
/* 6. Resolving implicit levels */
DBG ("resolving implicit levels");
{
max_level = base_level;
for_run_list (pp, main_run_list)
{
FriBidiCharType this_type;
int level;
this_type = RL_TYPE (pp);
level = RL_LEVEL (pp);
/* I1. Even */
/* I2. Odd */
if (FRIBIDI_IS_NUMBER (this_type))
RL_LEVEL (pp) = (level + 2) & ~1;
else
RL_LEVEL (pp) =
level +
(FRIBIDI_LEVEL_IS_RTL (level) ^ FRIBIDI_DIR_TO_LEVEL (this_type));
if (RL_LEVEL (pp) > max_level)
max_level = RL_LEVEL (pp);
}
}
compact_list (main_run_list);
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_bidi_string (bidi_types, len);
print_resolved_levels (main_run_list);
print_resolved_types (main_run_list);
}
# endif /* DEBUG */
/* Reinsert the explicit codes & BN's that are already removed, from the
explicits_list to main_run_list. */
DBG ("reinserting explicit codes");
if UNLIKELY
(explicits_list->next != explicits_list)
{
register FriBidiRun *p;
register fribidi_boolean stat =
shadow_run_list (main_run_list, explicits_list, true);
explicits_list = NULL;
if UNLIKELY
(!stat) goto out;
/* Set level of inserted explicit chars to that of their previous
* char, such that they do not affect reordering. */
p = main_run_list->next;
if (p != main_run_list && p->level == FRIBIDI_SENTINEL)
p->level = base_level;
for_run_list (p, main_run_list) if (p->level == FRIBIDI_SENTINEL)
p->level = p->prev->level;
}
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_types_re (main_run_list);
print_resolved_levels (main_run_list);
print_resolved_types (main_run_list);
}
# endif /* DEBUG */
DBG ("reset the embedding levels, 1, 2, 3.");
{
register int j, state, pos;
register FriBidiCharType char_type;
register FriBidiRun *p, *q, *list;
/* L1. Reset the embedding levels of some chars:
1. segment separators,
2. paragraph separators,
3. any sequence of whitespace characters preceding a segment
separator or paragraph separator, and
4. any sequence of whitespace characters and/or isolate formatting
characters at the end of the line.
... (to be continued in fribidi_reorder_line()). */
list = new_run_list ();
if UNLIKELY
(!list) goto out;
q = list;
state = 1;
pos = len - 1;
for (j = len - 1; j >= -1; j--)
{
/* close up the open link at the end */
if (j >= 0)
char_type = bidi_types[j];
else
char_type = FRIBIDI_TYPE_ON;
if (!state && FRIBIDI_IS_SEPARATOR (char_type))
{
state = 1;
pos = j;
}
else if (state &&
!(FRIBIDI_IS_EXPLICIT_OR_SEPARATOR_OR_BN_OR_WS(char_type)
|| FRIBIDI_IS_ISOLATE(char_type)))
{
state = 0;
p = new_run ();
if UNLIKELY
(!p)
{
free_run_list (list);
goto out;
}
p->pos = j + 1;
p->len = pos - j;
p->type = base_dir;
p->level = base_level;
move_node_before (p, q);
q = p;
}
}
if UNLIKELY
(!shadow_run_list (main_run_list, list, false)) goto out;
}
# if DEBUG
if UNLIKELY
(fribidi_debug_status ())
{
print_types_re (main_run_list);
print_resolved_levels (main_run_list);
print_resolved_types (main_run_list);
}
# endif /* DEBUG */
{
FriBidiStrIndex pos = 0;
for_run_list (pp, main_run_list)
{
register FriBidiStrIndex l;
register FriBidiLevel level = pp->level;
for (l = pp->len; l; l--)
embedding_levels[pos++] = level;
}
}
status = true;
out:
DBG ("leaving fribidi_get_par_embedding_levels");
if (main_run_list)
free_run_list (main_run_list);
if UNLIKELY
(explicits_list) free_run_list (explicits_list);
return status ? max_level + 1 : 0;
}
static void
bidi_string_reverse (
FriBidiChar *str,
const FriBidiStrIndex len
)
{
FriBidiStrIndex i;
fribidi_assert (str);
for (i = 0; i < len / 2; i++)
{
FriBidiChar tmp = str[i];
str[i] = str[len - 1 - i];
str[len - 1 - i] = tmp;
}
}
static void
index_array_reverse (
FriBidiStrIndex *arr,
const FriBidiStrIndex len
)
{
FriBidiStrIndex i;
fribidi_assert (arr);
for (i = 0; i < len / 2; i++)
{
FriBidiStrIndex tmp = arr[i];
arr[i] = arr[len - 1 - i];
arr[len - 1 - i] = tmp;
}
}
FRIBIDI_ENTRY FriBidiLevel
fribidi_reorder_line (
/* input */
FriBidiFlags flags, /* reorder flags */
const FriBidiCharType *bidi_types,
const FriBidiStrIndex len,
const FriBidiStrIndex off,
const FriBidiParType base_dir,
/* input and output */
FriBidiLevel *embedding_levels,
FriBidiChar *visual_str,
/* output */
FriBidiStrIndex *map
)
{
fribidi_boolean status = false;
FriBidiLevel max_level = 0;
if UNLIKELY
(len == 0)
{
status = true;
goto out;
}
DBG ("in fribidi_reorder_line");
fribidi_assert (bidi_types);
fribidi_assert (embedding_levels);
DBG ("reset the embedding levels, 4. whitespace at the end of line");
{
register FriBidiStrIndex i;
/* L1. Reset the embedding levels of some chars:
4. any sequence of white space characters at the end of the line. */
for (i = off + len - 1; i >= off &&
FRIBIDI_IS_EXPLICIT_OR_BN_OR_WS (bidi_types[i]); i--)
embedding_levels[i] = FRIBIDI_DIR_TO_LEVEL (base_dir);
}
/* 7. Reordering resolved levels */
{
register FriBidiLevel level;
register FriBidiStrIndex i;
/* Reorder both the outstring and the order array */
{
if (FRIBIDI_TEST_BITS (flags, FRIBIDI_FLAG_REORDER_NSM))
{
/* L3. Reorder NSMs. */
for (i = off + len - 1; i >= off; i--)
if (FRIBIDI_LEVEL_IS_RTL (embedding_levels[i])
&& bidi_types[i] == FRIBIDI_TYPE_NSM)
{
register FriBidiStrIndex seq_end = i;
level = embedding_levels[i];
for (i--; i >= off &&
FRIBIDI_IS_EXPLICIT_OR_BN_OR_NSM (bidi_types[i])
&& embedding_levels[i] == level; i--)
;
if (i < off || embedding_levels[i] != level)
{
i++;
DBG ("warning: NSM(s) at the beginning of level run");
}
if (visual_str)
{
bidi_string_reverse (visual_str + i, seq_end - i + 1);
}
if (map)
{
index_array_reverse (map + i, seq_end - i + 1);
}
}
}
/* Find max_level of the line. We don't reuse the paragraph
* max_level, both for a cleaner API, and that the line max_level
* may be far less than paragraph max_level. */
for (i = off + len - 1; i >= off; i--)
if (embedding_levels[i] > max_level)
max_level = embedding_levels[i];
/* L2. Reorder. */
for (level = max_level; level > 0; level--)
for (i = off + len - 1; i >= off; i--)
if (embedding_levels[i] >= level)
{
/* Find all stretches that are >= level_idx */
register FriBidiStrIndex seq_end = i;
for (i--; i >= off && embedding_levels[i] >= level; i--)
;
if (visual_str)
bidi_string_reverse (visual_str + i + 1, seq_end - i);
if (map)
index_array_reverse (map + i + 1, seq_end - i);
}
}
}
status = true;
out:
return status ? max_level + 1 : 0;
}
/* Editor directions:
* vim:textwidth=78:tabstop=8:shiftwidth=2:autoindent:cindent
*/
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_1199_0 |
crossvul-cpp_data_good_1321_0 | /* NetHack 3.6 files.c $NHDT-Date: 1576626110 2019/12/17 23:41:50 $ $NHDT-Branch: NetHack-3.6 $:$NHDT-Revision: 1.276 $ */
/* Copyright (c) Stichting Mathematisch Centrum, Amsterdam, 1985. */
/*-Copyright (c) Derek S. Ray, 2015. */
/* NetHack may be freely redistributed. See license for details. */
#define NEED_VARARGS
#include "hack.h"
#include "dlb.h"
#ifdef TTY_GRAPHICS
#include "wintty.h" /* more() */
#endif
#if (!defined(MAC) && !defined(O_WRONLY) && !defined(AZTEC_C)) \
|| defined(USE_FCNTL)
#include <fcntl.h>
#endif
#include <errno.h>
#ifdef _MSC_VER /* MSC 6.0 defines errno quite differently */
#if (_MSC_VER >= 600)
#define SKIP_ERRNO
#endif
#else
#ifdef NHSTDC
#define SKIP_ERRNO
#endif
#endif
#ifndef SKIP_ERRNO
#ifdef _DCC
const
#endif
extern int errno;
#endif
#ifdef ZLIB_COMP /* RLC 09 Mar 1999: Support internal ZLIB */
#include "zlib.h"
#ifndef COMPRESS_EXTENSION
#define COMPRESS_EXTENSION ".gz"
#endif
#endif
#if defined(UNIX) && defined(QT_GRAPHICS)
#include <sys/types.h>
#include <dirent.h>
#include <stdlib.h>
#endif
#if defined(UNIX) || defined(VMS) || !defined(NO_SIGNAL)
#include <signal.h>
#endif
#if defined(MSDOS) || defined(OS2) || defined(TOS) || defined(WIN32)
#ifndef __DJGPP__
#include <sys\stat.h>
#else
#include <sys/stat.h>
#endif
#endif
#ifndef O_BINARY /* used for micros, no-op for others */
#define O_BINARY 0
#endif
#ifdef PREFIXES_IN_USE
#define FQN_NUMBUF 4
static char fqn_filename_buffer[FQN_NUMBUF][FQN_MAX_FILENAME];
#endif
#if !defined(MFLOPPY) && !defined(VMS) && !defined(WIN32)
char bones[] = "bonesnn.xxx";
char lock[PL_NSIZ + 14] = "1lock"; /* long enough for uid+name+.99 */
#else
#if defined(MFLOPPY)
char bones[FILENAME]; /* pathname of bones files */
char lock[FILENAME]; /* pathname of level files */
#endif
#if defined(VMS)
char bones[] = "bonesnn.xxx;1";
char lock[PL_NSIZ + 17] = "1lock"; /* long enough for _uid+name+.99;1 */
#endif
#if defined(WIN32)
char bones[] = "bonesnn.xxx";
char lock[PL_NSIZ + 25]; /* long enough for username+-+name+.99 */
#endif
#endif
#if defined(UNIX) || defined(__BEOS__)
#define SAVESIZE (PL_NSIZ + 13) /* save/99999player.e */
#else
#ifdef VMS
#define SAVESIZE (PL_NSIZ + 22) /* [.save]<uid>player.e;1 */
#else
#if defined(WIN32)
#define SAVESIZE (PL_NSIZ + 40) /* username-player.NetHack-saved-game */
#else
#define SAVESIZE FILENAME /* from macconf.h or pcconf.h */
#endif
#endif
#endif
#if !defined(SAVE_EXTENSION)
#ifdef MICRO
#define SAVE_EXTENSION ".sav"
#endif
#ifdef WIN32
#define SAVE_EXTENSION ".NetHack-saved-game"
#endif
#endif
char SAVEF[SAVESIZE]; /* holds relative path of save file from playground */
#ifdef MICRO
char SAVEP[SAVESIZE]; /* holds path of directory for save file */
#endif
#ifdef HOLD_LOCKFILE_OPEN
struct level_ftrack {
int init;
int fd; /* file descriptor for level file */
int oflag; /* open flags */
boolean nethack_thinks_it_is_open; /* Does NetHack think it's open? */
} lftrack;
#if defined(WIN32)
#include <share.h>
#endif
#endif /*HOLD_LOCKFILE_OPEN*/
#define WIZKIT_MAX 128
static char wizkit[WIZKIT_MAX];
STATIC_DCL FILE *NDECL(fopen_wizkit_file);
STATIC_DCL void FDECL(wizkit_addinv, (struct obj *));
#ifdef AMIGA
extern char PATH[]; /* see sys/amiga/amidos.c */
extern char bbs_id[];
static int lockptr;
#ifdef __SASC_60
#include <proto/dos.h>
#endif
#include <libraries/dos.h>
extern void FDECL(amii_set_text_font, (char *, int));
#endif
#if defined(WIN32) || defined(MSDOS)
static int lockptr;
#ifdef MSDOS
#define Delay(a) msleep(a)
#endif
#define Close close
#ifndef WIN_CE
#define DeleteFile unlink
#endif
#ifdef WIN32
/*from windmain.c */
extern char *FDECL(translate_path_variables, (const char *, char *));
#endif
#endif
#ifdef MAC
#undef unlink
#define unlink macunlink
#endif
#if (defined(macintosh) && (defined(__SC__) || defined(__MRC__))) \
|| defined(__MWERKS__)
#define PRAGMA_UNUSED
#endif
#ifdef USER_SOUNDS
extern char *sounddir;
#endif
extern int n_dgns; /* from dungeon.c */
#if defined(UNIX) && defined(QT_GRAPHICS)
#define SELECTSAVED
#endif
#ifdef SELECTSAVED
STATIC_PTR int FDECL(CFDECLSPEC strcmp_wrap, (const void *, const void *));
#endif
STATIC_DCL char *FDECL(set_bonesfile_name, (char *, d_level *));
STATIC_DCL char *NDECL(set_bonestemp_name);
#ifdef COMPRESS
STATIC_DCL void FDECL(redirect, (const char *, const char *, FILE *,
BOOLEAN_P));
#endif
#if defined(COMPRESS) || defined(ZLIB_COMP)
STATIC_DCL void FDECL(docompress_file, (const char *, BOOLEAN_P));
#endif
#if defined(ZLIB_COMP)
STATIC_DCL boolean FDECL(make_compressed_name, (const char *, char *));
#endif
#ifndef USE_FCNTL
STATIC_DCL char *FDECL(make_lockname, (const char *, char *));
#endif
STATIC_DCL void FDECL(set_configfile_name, (const char *));
STATIC_DCL FILE *FDECL(fopen_config_file, (const char *, int));
STATIC_DCL int FDECL(get_uchars, (char *, uchar *, BOOLEAN_P,
int, const char *));
boolean FDECL(proc_wizkit_line, (char *));
boolean FDECL(parse_config_line, (char *));
STATIC_DCL boolean FDECL(parse_conf_file, (FILE *, boolean (*proc)(char *)));
STATIC_DCL FILE *NDECL(fopen_sym_file);
boolean FDECL(proc_symset_line, (char *));
STATIC_DCL void FDECL(set_symhandling, (char *, int));
#ifdef NOCWD_ASSUMPTIONS
STATIC_DCL void FDECL(adjust_prefix, (char *, int));
#endif
STATIC_DCL boolean FDECL(config_error_nextline, (const char *));
STATIC_DCL void NDECL(free_config_sections);
STATIC_DCL char *FDECL(choose_random_part, (char *, CHAR_P));
STATIC_DCL boolean FDECL(is_config_section, (const char *));
STATIC_DCL boolean FDECL(handle_config_section, (char *));
#ifdef SELF_RECOVER
STATIC_DCL boolean FDECL(copy_bytes, (int, int));
#endif
#ifdef HOLD_LOCKFILE_OPEN
STATIC_DCL int FDECL(open_levelfile_exclusively, (const char *, int, int));
#endif
static char *config_section_chosen = (char *) 0;
static char *config_section_current = (char *) 0;
/*
* fname_encode()
*
* Args:
* legal zero-terminated list of acceptable file name characters
* quotechar lead-in character used to quote illegal characters as
* hex digits
* s string to encode
* callerbuf buffer to house result
* bufsz size of callerbuf
*
* Notes:
* The hex digits 0-9 and A-F are always part of the legal set due to
* their use in the encoding scheme, even if not explicitly included in
* 'legal'.
*
* Sample:
* The following call:
* (void)fname_encode("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
* '%', "This is a % test!", buf, 512);
* results in this encoding:
* "This%20is%20a%20%25%20test%21"
*/
char *
fname_encode(legal, quotechar, s, callerbuf, bufsz)
const char *legal;
char quotechar;
char *s, *callerbuf;
int bufsz;
{
char *sp, *op;
int cnt = 0;
static char hexdigits[] = "0123456789ABCDEF";
sp = s;
op = callerbuf;
*op = '\0';
while (*sp) {
/* Do we have room for one more character or encoding? */
if ((bufsz - cnt) <= 4)
return callerbuf;
if (*sp == quotechar) {
(void) sprintf(op, "%c%02X", quotechar, *sp);
op += 3;
cnt += 3;
} else if ((index(legal, *sp) != 0) || (index(hexdigits, *sp) != 0)) {
*op++ = *sp;
*op = '\0';
cnt++;
} else {
(void) sprintf(op, "%c%02X", quotechar, *sp);
op += 3;
cnt += 3;
}
sp++;
}
return callerbuf;
}
/*
* fname_decode()
*
* Args:
* quotechar lead-in character used to quote illegal characters as
* hex digits
* s string to decode
* callerbuf buffer to house result
* bufsz size of callerbuf
*/
char *
fname_decode(quotechar, s, callerbuf, bufsz)
char quotechar;
char *s, *callerbuf;
int bufsz;
{
char *sp, *op;
int k, calc, cnt = 0;
static char hexdigits[] = "0123456789ABCDEF";
sp = s;
op = callerbuf;
*op = '\0';
calc = 0;
while (*sp) {
/* Do we have room for one more character? */
if ((bufsz - cnt) <= 2)
return callerbuf;
if (*sp == quotechar) {
sp++;
for (k = 0; k < 16; ++k)
if (*sp == hexdigits[k])
break;
if (k >= 16)
return callerbuf; /* impossible, so bail */
calc = k << 4;
sp++;
for (k = 0; k < 16; ++k)
if (*sp == hexdigits[k])
break;
if (k >= 16)
return callerbuf; /* impossible, so bail */
calc += k;
sp++;
*op++ = calc;
*op = '\0';
} else {
*op++ = *sp++;
*op = '\0';
}
cnt++;
}
return callerbuf;
}
#ifdef PREFIXES_IN_USE
#define UNUSED_if_not_PREFIXES_IN_USE /*empty*/
#else
#define UNUSED_if_not_PREFIXES_IN_USE UNUSED
#endif
/*ARGSUSED*/
const char *
fqname(basenam, whichprefix, buffnum)
const char *basenam;
int whichprefix UNUSED_if_not_PREFIXES_IN_USE;
int buffnum UNUSED_if_not_PREFIXES_IN_USE;
{
#ifdef PREFIXES_IN_USE
char *bufptr;
#endif
#ifdef WIN32
char tmpbuf[BUFSZ];
#endif
#ifndef PREFIXES_IN_USE
return basenam;
#else
if (!basenam || whichprefix < 0 || whichprefix >= PREFIX_COUNT)
return basenam;
if (!fqn_prefix[whichprefix])
return basenam;
if (buffnum < 0 || buffnum >= FQN_NUMBUF) {
impossible("Invalid fqn_filename_buffer specified: %d", buffnum);
buffnum = 0;
}
bufptr = fqn_prefix[whichprefix];
#ifdef WIN32
if (strchr(fqn_prefix[whichprefix], '%')
|| strchr(fqn_prefix[whichprefix], '~'))
bufptr = translate_path_variables(fqn_prefix[whichprefix], tmpbuf);
#endif
if (strlen(bufptr) + strlen(basenam) >= FQN_MAX_FILENAME) {
impossible("fqname too long: %s + %s", bufptr, basenam);
return basenam; /* XXX */
}
Strcpy(fqn_filename_buffer[buffnum], bufptr);
return strcat(fqn_filename_buffer[buffnum], basenam);
#endif /* !PREFIXES_IN_USE */
}
int
validate_prefix_locations(reasonbuf)
char *reasonbuf; /* reasonbuf must be at least BUFSZ, supplied by caller */
{
#if defined(NOCWD_ASSUMPTIONS)
FILE *fp;
const char *filename;
int prefcnt, failcount = 0;
char panicbuf1[BUFSZ], panicbuf2[BUFSZ];
const char *details;
#endif
if (reasonbuf)
reasonbuf[0] = '\0';
#if defined(NOCWD_ASSUMPTIONS)
for (prefcnt = 1; prefcnt < PREFIX_COUNT; prefcnt++) {
/* don't test writing to configdir or datadir; they're readonly */
if (prefcnt == SYSCONFPREFIX || prefcnt == CONFIGPREFIX
|| prefcnt == DATAPREFIX)
continue;
filename = fqname("validate", prefcnt, 3);
if ((fp = fopen(filename, "w"))) {
fclose(fp);
(void) unlink(filename);
} else {
if (reasonbuf) {
if (failcount)
Strcat(reasonbuf, ", ");
Strcat(reasonbuf, fqn_prefix_names[prefcnt]);
}
/* the paniclog entry gets the value of errno as well */
Sprintf(panicbuf1, "Invalid %s", fqn_prefix_names[prefcnt]);
#if defined(NHSTDC) && !defined(NOTSTDC)
if (!(details = strerror(errno)))
#endif
details = "";
Sprintf(panicbuf2, "\"%s\", (%d) %s", fqn_prefix[prefcnt], errno,
details);
paniclog(panicbuf1, panicbuf2);
failcount++;
}
}
if (failcount)
return 0;
else
#endif
return 1;
}
/* fopen a file, with OS-dependent bells and whistles */
/* NOTE: a simpler version of this routine also exists in util/dlb_main.c */
FILE *
fopen_datafile(filename, mode, prefix)
const char *filename, *mode;
int prefix;
{
FILE *fp;
filename = fqname(filename, prefix, prefix == TROUBLEPREFIX ? 3 : 0);
fp = fopen(filename, mode);
return fp;
}
/* ---------- BEGIN LEVEL FILE HANDLING ----------- */
#ifdef MFLOPPY
/* Set names for bones[] and lock[] */
void
set_lock_and_bones()
{
if (!ramdisk) {
Strcpy(levels, permbones);
Strcpy(bones, permbones);
}
append_slash(permbones);
append_slash(levels);
#ifdef AMIGA
strncat(levels, bbs_id, PATHLEN);
#endif
append_slash(bones);
Strcat(bones, "bonesnn.*");
Strcpy(lock, levels);
#ifndef AMIGA
Strcat(lock, alllevels);
#endif
return;
}
#endif /* MFLOPPY */
/* Construct a file name for a level-type file, which is of the form
* something.level (with any old level stripped off).
* This assumes there is space on the end of 'file' to append
* a two digit number. This is true for 'level'
* but be careful if you use it for other things -dgk
*/
void
set_levelfile_name(file, lev)
char *file;
int lev;
{
char *tf;
tf = rindex(file, '.');
if (!tf)
tf = eos(file);
Sprintf(tf, ".%d", lev);
#ifdef VMS
Strcat(tf, ";1");
#endif
return;
}
int
create_levelfile(lev, errbuf)
int lev;
char errbuf[];
{
int fd;
const char *fq_lock;
if (errbuf)
*errbuf = '\0';
set_levelfile_name(lock, lev);
fq_lock = fqname(lock, LEVELPREFIX, 0);
#if defined(MICRO) || defined(WIN32)
/* Use O_TRUNC to force the file to be shortened if it already
* exists and is currently longer.
*/
#ifdef HOLD_LOCKFILE_OPEN
if (lev == 0)
fd = open_levelfile_exclusively(
fq_lock, lev, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY);
else
#endif
fd = open(fq_lock, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, FCMASK);
#else
#ifdef MAC
fd = maccreat(fq_lock, LEVL_TYPE);
#else
fd = creat(fq_lock, FCMASK);
#endif
#endif /* MICRO || WIN32 */
if (fd >= 0)
level_info[lev].flags |= LFILE_EXISTS;
else if (errbuf) /* failure explanation */
Sprintf(errbuf, "Cannot create file \"%s\" for level %d (errno %d).",
lock, lev, errno);
return fd;
}
int
open_levelfile(lev, errbuf)
int lev;
char errbuf[];
{
int fd;
const char *fq_lock;
if (errbuf)
*errbuf = '\0';
set_levelfile_name(lock, lev);
fq_lock = fqname(lock, LEVELPREFIX, 0);
#ifdef MFLOPPY
/* If not currently accessible, swap it in. */
if (level_info[lev].where != ACTIVE)
swapin_file(lev);
#endif
#ifdef MAC
fd = macopen(fq_lock, O_RDONLY | O_BINARY, LEVL_TYPE);
#else
#ifdef HOLD_LOCKFILE_OPEN
if (lev == 0)
fd = open_levelfile_exclusively(fq_lock, lev, O_RDONLY | O_BINARY);
else
#endif
fd = open(fq_lock, O_RDONLY | O_BINARY, 0);
#endif
/* for failure, return an explanation that our caller can use;
settle for `lock' instead of `fq_lock' because the latter
might end up being too big for nethack's BUFSZ */
if (fd < 0 && errbuf)
Sprintf(errbuf, "Cannot open file \"%s\" for level %d (errno %d).",
lock, lev, errno);
return fd;
}
void
delete_levelfile(lev)
int lev;
{
/*
* Level 0 might be created by port specific code that doesn't
* call create_levfile(), so always assume that it exists.
*/
if (lev == 0 || (level_info[lev].flags & LFILE_EXISTS)) {
set_levelfile_name(lock, lev);
#ifdef HOLD_LOCKFILE_OPEN
if (lev == 0)
really_close();
#endif
(void) unlink(fqname(lock, LEVELPREFIX, 0));
level_info[lev].flags &= ~LFILE_EXISTS;
}
}
void
clearlocks()
{
#ifdef HANGUPHANDLING
if (program_state.preserve_locks)
return;
#endif
#if !defined(PC_LOCKING) && defined(MFLOPPY) && !defined(AMIGA)
eraseall(levels, alllevels);
if (ramdisk)
eraseall(permbones, alllevels);
#else
{
register int x;
#ifndef NO_SIGNAL
(void) signal(SIGINT, SIG_IGN);
#endif
#if defined(UNIX) || defined(VMS)
sethanguphandler((void FDECL((*), (int) )) SIG_IGN);
#endif
/* can't access maxledgerno() before dungeons are created -dlc */
for (x = (n_dgns ? maxledgerno() : 0); x >= 0; x--)
delete_levelfile(x); /* not all levels need be present */
}
#endif /* ?PC_LOCKING,&c */
}
#if defined(SELECTSAVED)
/* qsort comparison routine */
STATIC_OVL int CFDECLSPEC
strcmp_wrap(p, q)
const void *p;
const void *q;
{
#if defined(UNIX) && defined(QT_GRAPHICS)
return strncasecmp(*(char **) p, *(char **) q, 16);
#else
return strncmpi(*(char **) p, *(char **) q, 16);
#endif
}
#endif
#ifdef HOLD_LOCKFILE_OPEN
STATIC_OVL int
open_levelfile_exclusively(name, lev, oflag)
const char *name;
int lev, oflag;
{
int reslt, fd;
if (!lftrack.init) {
lftrack.init = 1;
lftrack.fd = -1;
}
if (lftrack.fd >= 0) {
/* check for compatible access */
if (lftrack.oflag == oflag) {
fd = lftrack.fd;
reslt = lseek(fd, 0L, SEEK_SET);
if (reslt == -1L)
panic("open_levelfile_exclusively: lseek failed %d", errno);
lftrack.nethack_thinks_it_is_open = TRUE;
} else {
really_close();
fd = sopen(name, oflag, SH_DENYRW, FCMASK);
lftrack.fd = fd;
lftrack.oflag = oflag;
lftrack.nethack_thinks_it_is_open = TRUE;
}
} else {
fd = sopen(name, oflag, SH_DENYRW, FCMASK);
lftrack.fd = fd;
lftrack.oflag = oflag;
if (fd >= 0)
lftrack.nethack_thinks_it_is_open = TRUE;
}
return fd;
}
void
really_close()
{
int fd;
if (lftrack.init) {
fd = lftrack.fd;
lftrack.nethack_thinks_it_is_open = FALSE;
lftrack.fd = -1;
lftrack.oflag = 0;
if (fd != -1)
(void) close(fd);
}
return;
}
int
nhclose(fd)
int fd;
{
if (lftrack.fd == fd) {
really_close(); /* close it, but reopen it to hold it */
fd = open_levelfile(0, (char *) 0);
lftrack.nethack_thinks_it_is_open = FALSE;
return 0;
}
return close(fd);
}
#else /* !HOLD_LOCKFILE_OPEN */
int
nhclose(fd)
int fd;
{
return close(fd);
}
#endif /* ?HOLD_LOCKFILE_OPEN */
/* ---------- END LEVEL FILE HANDLING ----------- */
/* ---------- BEGIN BONES FILE HANDLING ----------- */
/* set up "file" to be file name for retrieving bones, and return a
* bonesid to be read/written in the bones file.
*/
STATIC_OVL char *
set_bonesfile_name(file, lev)
char *file;
d_level *lev;
{
s_level *sptr;
char *dptr;
/*
* "bonD0.nn" = bones for level nn in the main dungeon;
* "bonM0.T" = bones for Minetown;
* "bonQBar.n" = bones for level n in the Barbarian quest;
* "bon3D0.nn" = \
* "bon3M0.T" = > same as above, but for bones pool #3.
* "bon3QBar.n" = /
*
* Return value for content validation skips "bon" and the
* pool number (if present), making it feasible for the admin
* to manually move a bones file from one pool to another by
* renaming it.
*/
Strcpy(file, "bon");
#ifdef SYSCF
if (sysopt.bones_pools > 1) {
unsigned poolnum = min((unsigned) sysopt.bones_pools, 10);
poolnum = (unsigned) ubirthday % poolnum; /* 0..9 */
Sprintf(eos(file), "%u", poolnum);
}
#endif
dptr = eos(file); /* this used to be after the following Sprintf()
and the return value was (dptr - 2) */
/* when this naming scheme was adopted, 'filecode' was one letter;
3.3.0 turned it into a three letter string (via roles[] in role.c);
from that version through 3.6.0, 'dptr' pointed past the filecode
and the return value of (dptr - 2) was wrong for bones produced
in the quest branch, skipping the boneid character 'Q' and the
first letter of the role's filecode; bones loading still worked
because the bonesid used for validation had the same error */
Sprintf(dptr, "%c%s", dungeons[lev->dnum].boneid,
In_quest(lev) ? urole.filecode : "0");
if ((sptr = Is_special(lev)) != 0)
Sprintf(eos(dptr), ".%c", sptr->boneid);
else
Sprintf(eos(dptr), ".%d", lev->dlevel);
#ifdef VMS
Strcat(dptr, ";1");
#endif
return dptr;
}
/* set up temporary file name for writing bones, to avoid another game's
* trying to read from an uncompleted bones file. we want an uncontentious
* name, so use one in the namespace reserved for this game's level files.
* (we are not reading or writing level files while writing bones files, so
* the same array may be used instead of copying.)
*/
STATIC_OVL char *
set_bonestemp_name()
{
char *tf;
tf = rindex(lock, '.');
if (!tf)
tf = eos(lock);
Sprintf(tf, ".bn");
#ifdef VMS
Strcat(tf, ";1");
#endif
return lock;
}
int
create_bonesfile(lev, bonesid, errbuf)
d_level *lev;
char **bonesid;
char errbuf[];
{
const char *file;
int fd;
if (errbuf)
*errbuf = '\0';
*bonesid = set_bonesfile_name(bones, lev);
file = set_bonestemp_name();
file = fqname(file, BONESPREFIX, 0);
#if defined(MICRO) || defined(WIN32)
/* Use O_TRUNC to force the file to be shortened if it already
* exists and is currently longer.
*/
fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, FCMASK);
#else
#ifdef MAC
fd = maccreat(file, BONE_TYPE);
#else
fd = creat(file, FCMASK);
#endif
#endif
if (fd < 0 && errbuf) /* failure explanation */
Sprintf(errbuf, "Cannot create bones \"%s\", id %s (errno %d).", lock,
*bonesid, errno);
#if defined(VMS) && !defined(SECURE)
/*
Re-protect bones file with world:read+write+execute+delete access.
umask() doesn't seem very reliable; also, vaxcrtl won't let us set
delete access without write access, which is what's really wanted.
Can't simply create it with the desired protection because creat
ANDs the mask with the user's default protection, which usually
denies some or all access to world.
*/
(void) chmod(file, FCMASK | 007); /* allow other users full access */
#endif /* VMS && !SECURE */
return fd;
}
#ifdef MFLOPPY
/* remove partial bonesfile in process of creation */
void
cancel_bonesfile()
{
const char *tempname;
tempname = set_bonestemp_name();
tempname = fqname(tempname, BONESPREFIX, 0);
(void) unlink(tempname);
}
#endif /* MFLOPPY */
/* move completed bones file to proper name */
void
commit_bonesfile(lev)
d_level *lev;
{
const char *fq_bones, *tempname;
int ret;
(void) set_bonesfile_name(bones, lev);
fq_bones = fqname(bones, BONESPREFIX, 0);
tempname = set_bonestemp_name();
tempname = fqname(tempname, BONESPREFIX, 1);
#if (defined(SYSV) && !defined(SVR4)) || defined(GENIX)
/* old SYSVs don't have rename. Some SVR3's may, but since they
* also have link/unlink, it doesn't matter. :-)
*/
(void) unlink(fq_bones);
ret = link(tempname, fq_bones);
ret += unlink(tempname);
#else
ret = rename(tempname, fq_bones);
#endif
if (wizard && ret != 0)
pline("couldn't rename %s to %s.", tempname, fq_bones);
}
int
open_bonesfile(lev, bonesid)
d_level *lev;
char **bonesid;
{
const char *fq_bones;
int fd;
*bonesid = set_bonesfile_name(bones, lev);
fq_bones = fqname(bones, BONESPREFIX, 0);
nh_uncompress(fq_bones); /* no effect if nonexistent */
#ifdef MAC
fd = macopen(fq_bones, O_RDONLY | O_BINARY, BONE_TYPE);
#else
fd = open(fq_bones, O_RDONLY | O_BINARY, 0);
#endif
return fd;
}
int
delete_bonesfile(lev)
d_level *lev;
{
(void) set_bonesfile_name(bones, lev);
return !(unlink(fqname(bones, BONESPREFIX, 0)) < 0);
}
/* assume we're compressing the recently read or created bonesfile, so the
* file name is already set properly */
void
compress_bonesfile()
{
nh_compress(fqname(bones, BONESPREFIX, 0));
}
/* ---------- END BONES FILE HANDLING ----------- */
/* ---------- BEGIN SAVE FILE HANDLING ----------- */
/* set savefile name in OS-dependent manner from pre-existing plname,
* avoiding troublesome characters */
void
set_savefile_name(regularize_it)
boolean regularize_it;
{
#ifdef VMS
Sprintf(SAVEF, "[.save]%d%s", getuid(), plname);
if (regularize_it)
regularize(SAVEF + 7);
Strcat(SAVEF, ";1");
#else
#if defined(MICRO)
Strcpy(SAVEF, SAVEP);
#ifdef AMIGA
strncat(SAVEF, bbs_id, PATHLEN);
#endif
{
int i = strlen(SAVEP);
#ifdef AMIGA
/* plname has to share space with SAVEP and ".sav" */
(void) strncat(SAVEF, plname, FILENAME - i - 4);
#else
(void) strncat(SAVEF, plname, 8);
#endif
if (regularize_it)
regularize(SAVEF + i);
}
Strcat(SAVEF, SAVE_EXTENSION);
#else
#if defined(WIN32)
{
static const char okchars[] =
"*ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_-.";
const char *legal = okchars;
char fnamebuf[BUFSZ], encodedfnamebuf[BUFSZ];
/* Obtain the name of the logged on user and incorporate
* it into the name. */
Sprintf(fnamebuf, "%s", plname);
if (regularize_it)
++legal; /* skip '*' wildcard character */
(void) fname_encode(legal, '%', fnamebuf, encodedfnamebuf, BUFSZ);
Sprintf(SAVEF, "%s%s", encodedfnamebuf, SAVE_EXTENSION);
}
#else /* not VMS or MICRO or WIN32 */
Sprintf(SAVEF, "save/%d%s", (int) getuid(), plname);
if (regularize_it)
regularize(SAVEF + 5); /* avoid . or / in name */
#endif /* WIN32 */
#endif /* MICRO */
#endif /* VMS */
}
#ifdef INSURANCE
void
save_savefile_name(fd)
int fd;
{
(void) write(fd, (genericptr_t) SAVEF, sizeof(SAVEF));
}
#endif
#ifndef MICRO
/* change pre-existing savefile name to indicate an error savefile */
void
set_error_savefile()
{
#ifdef VMS
{
char *semi_colon = rindex(SAVEF, ';');
if (semi_colon)
*semi_colon = '\0';
}
Strcat(SAVEF, ".e;1");
#else
#ifdef MAC
Strcat(SAVEF, "-e");
#else
Strcat(SAVEF, ".e");
#endif
#endif
}
#endif
/* create save file, overwriting one if it already exists */
int
create_savefile()
{
const char *fq_save;
int fd;
fq_save = fqname(SAVEF, SAVEPREFIX, 0);
#if defined(MICRO) || defined(WIN32)
fd = open(fq_save, O_WRONLY | O_BINARY | O_CREAT | O_TRUNC, FCMASK);
#else
#ifdef MAC
fd = maccreat(fq_save, SAVE_TYPE);
#else
fd = creat(fq_save, FCMASK);
#endif
#if defined(VMS) && !defined(SECURE)
/*
Make sure the save file is owned by the current process. That's
the default for non-privileged users, but for priv'd users the
file will be owned by the directory's owner instead of the user.
*/
#undef getuid
(void) chown(fq_save, getuid(), getgid());
#define getuid() vms_getuid()
#endif /* VMS && !SECURE */
#endif /* MICRO */
return fd;
}
/* open savefile for reading */
int
open_savefile()
{
const char *fq_save;
int fd;
fq_save = fqname(SAVEF, SAVEPREFIX, 0);
#ifdef MAC
fd = macopen(fq_save, O_RDONLY | O_BINARY, SAVE_TYPE);
#else
fd = open(fq_save, O_RDONLY | O_BINARY, 0);
#endif
return fd;
}
/* delete savefile */
int
delete_savefile()
{
(void) unlink(fqname(SAVEF, SAVEPREFIX, 0));
return 0; /* for restore_saved_game() (ex-xxxmain.c) test */
}
/* try to open up a save file and prepare to restore it */
int
restore_saved_game()
{
const char *fq_save;
int fd;
reset_restpref();
set_savefile_name(TRUE);
#ifdef MFLOPPY
if (!saveDiskPrompt(1))
return -1;
#endif /* MFLOPPY */
fq_save = fqname(SAVEF, SAVEPREFIX, 0);
nh_uncompress(fq_save);
if ((fd = open_savefile()) < 0)
return fd;
if (validate(fd, fq_save) != 0) {
(void) nhclose(fd), fd = -1;
(void) delete_savefile();
}
return fd;
}
#if defined(SELECTSAVED)
char *
plname_from_file(filename)
const char *filename;
{
int fd;
char *result = 0;
Strcpy(SAVEF, filename);
#ifdef COMPRESS_EXTENSION
SAVEF[strlen(SAVEF) - strlen(COMPRESS_EXTENSION)] = '\0';
#endif
nh_uncompress(SAVEF);
if ((fd = open_savefile()) >= 0) {
if (validate(fd, filename) == 0) {
char tplname[PL_NSIZ];
get_plname_from_file(fd, tplname);
result = dupstr(tplname);
}
(void) nhclose(fd);
}
nh_compress(SAVEF);
return result;
#if 0
/* --------- obsolete - used to be ifndef STORE_PLNAME_IN_FILE ----*/
#if defined(UNIX) && defined(QT_GRAPHICS)
/* Name not stored in save file, so we have to extract it from
the filename, which loses information
(eg. "/", "_", and "." characters are lost. */
int k;
int uid;
char name[64]; /* more than PL_NSIZ */
#ifdef COMPRESS_EXTENSION
#define EXTSTR COMPRESS_EXTENSION
#else
#define EXTSTR ""
#endif
if ( sscanf( filename, "%*[^/]/%d%63[^.]" EXTSTR, &uid, name ) == 2 ) {
#undef EXTSTR
/* "_" most likely means " ", which certainly looks nicer */
for (k=0; name[k]; k++)
if ( name[k] == '_' )
name[k] = ' ';
return dupstr(name);
} else
#endif /* UNIX && QT_GRAPHICS */
{
return 0;
}
/* --------- end of obsolete code ----*/
#endif /* 0 - WAS STORE_PLNAME_IN_FILE*/
}
#endif /* defined(SELECTSAVED) */
char **
get_saved_games()
{
#if defined(SELECTSAVED)
int n, j = 0;
char **result = 0;
#ifdef WIN32
{
char *foundfile;
const char *fq_save;
const char *fq_new_save;
const char *fq_old_save;
char **files = 0;
int i;
Strcpy(plname, "*");
set_savefile_name(FALSE);
#if defined(ZLIB_COMP)
Strcat(SAVEF, COMPRESS_EXTENSION);
#endif
fq_save = fqname(SAVEF, SAVEPREFIX, 0);
n = 0;
foundfile = foundfile_buffer();
if (findfirst((char *) fq_save)) {
do {
++n;
} while (findnext());
}
if (n > 0) {
files = (char **) alloc((n + 1) * sizeof(char *)); /* at most */
(void) memset((genericptr_t) files, 0, (n + 1) * sizeof(char *));
if (findfirst((char *) fq_save)) {
i = 0;
do {
files[i++] = strdup(foundfile);
} while (findnext());
}
}
if (n > 0) {
result = (char **) alloc((n + 1) * sizeof(char *)); /* at most */
(void) memset((genericptr_t) result, 0, (n + 1) * sizeof(char *));
for(i = 0; i < n; i++) {
char *r;
r = plname_from_file(files[i]);
if (r) {
/* rename file if it is not named as expected */
Strcpy(plname, r);
set_savefile_name(FALSE);
fq_new_save = fqname(SAVEF, SAVEPREFIX, 0);
fq_old_save = fqname(files[i], SAVEPREFIX, 1);
if(strcmp(fq_old_save, fq_new_save) != 0 &&
!file_exists(fq_new_save))
rename(fq_old_save, fq_new_save);
result[j++] = r;
}
}
}
free_saved_games(files);
}
#endif
#if defined(UNIX) && defined(QT_GRAPHICS)
/* posixly correct version */
int myuid = getuid();
DIR *dir;
if ((dir = opendir(fqname("save", SAVEPREFIX, 0)))) {
for (n = 0; readdir(dir); n++)
;
closedir(dir);
if (n > 0) {
int i;
if (!(dir = opendir(fqname("save", SAVEPREFIX, 0))))
return 0;
result = (char **) alloc((n + 1) * sizeof(char *)); /* at most */
(void) memset((genericptr_t) result, 0, (n + 1) * sizeof(char *));
for (i = 0, j = 0; i < n; i++) {
int uid;
char name[64]; /* more than PL_NSIZ */
struct dirent *entry = readdir(dir);
if (!entry)
break;
if (sscanf(entry->d_name, "%d%63s", &uid, name) == 2) {
if (uid == myuid) {
char filename[BUFSZ];
char *r;
Sprintf(filename, "save/%d%s", uid, name);
r = plname_from_file(filename);
if (r)
result[j++] = r;
}
}
}
closedir(dir);
}
}
#endif
#ifdef VMS
Strcpy(plname, "*");
set_savefile_name(FALSE);
j = vms_get_saved_games(SAVEF, &result);
#endif /* VMS */
if (j > 0) {
if (j > 1)
qsort(result, j, sizeof (char *), strcmp_wrap);
result[j] = 0;
return result;
} else if (result) { /* could happen if save files are obsolete */
free_saved_games(result);
}
#endif /* SELECTSAVED */
return 0;
}
void
free_saved_games(saved)
char **saved;
{
if (saved) {
int i = 0;
while (saved[i])
free((genericptr_t) saved[i++]);
free((genericptr_t) saved);
}
}
/* ---------- END SAVE FILE HANDLING ----------- */
/* ---------- BEGIN FILE COMPRESSION HANDLING ----------- */
#ifdef COMPRESS
STATIC_OVL void
redirect(filename, mode, stream, uncomp)
const char *filename, *mode;
FILE *stream;
boolean uncomp;
{
if (freopen(filename, mode, stream) == (FILE *) 0) {
(void) fprintf(stderr, "freopen of %s for %scompress failed\n",
filename, uncomp ? "un" : "");
nh_terminate(EXIT_FAILURE);
}
}
/*
* using system() is simpler, but opens up security holes and causes
* problems on at least Interactive UNIX 3.0.1 (SVR3.2), where any
* setuid is renounced by /bin/sh, so the files cannot be accessed.
*
* cf. child() in unixunix.c.
*/
STATIC_OVL void
docompress_file(filename, uncomp)
const char *filename;
boolean uncomp;
{
char cfn[80];
FILE *cf;
const char *args[10];
#ifdef COMPRESS_OPTIONS
char opts[80];
#endif
int i = 0;
int f;
#ifdef TTY_GRAPHICS
boolean istty = WINDOWPORT("tty");
#endif
Strcpy(cfn, filename);
#ifdef COMPRESS_EXTENSION
Strcat(cfn, COMPRESS_EXTENSION);
#endif
/* when compressing, we know the file exists */
if (uncomp) {
if ((cf = fopen(cfn, RDBMODE)) == (FILE *) 0)
return;
(void) fclose(cf);
}
args[0] = COMPRESS;
if (uncomp)
args[++i] = "-d"; /* uncompress */
#ifdef COMPRESS_OPTIONS
{
/* we can't guarantee there's only one additional option, sigh */
char *opt;
boolean inword = FALSE;
Strcpy(opts, COMPRESS_OPTIONS);
opt = opts;
while (*opt) {
if ((*opt == ' ') || (*opt == '\t')) {
if (inword) {
*opt = '\0';
inword = FALSE;
}
} else if (!inword) {
args[++i] = opt;
inword = TRUE;
}
opt++;
}
}
#endif
args[++i] = (char *) 0;
#ifdef TTY_GRAPHICS
/* If we don't do this and we are right after a y/n question *and*
* there is an error message from the compression, the 'y' or 'n' can
* end up being displayed after the error message.
*/
if (istty)
mark_synch();
#endif
f = fork();
if (f == 0) { /* child */
#ifdef TTY_GRAPHICS
/* any error messages from the compression must come out after
* the first line, because the more() to let the user read
* them will have to clear the first line. This should be
* invisible if there are no error messages.
*/
if (istty)
raw_print("");
#endif
/* run compressor without privileges, in case other programs
* have surprises along the line of gzip once taking filenames
* in GZIP.
*/
/* assume all compressors will compress stdin to stdout
* without explicit filenames. this is true of at least
* compress and gzip, those mentioned in config.h.
*/
if (uncomp) {
redirect(cfn, RDBMODE, stdin, uncomp);
redirect(filename, WRBMODE, stdout, uncomp);
} else {
redirect(filename, RDBMODE, stdin, uncomp);
redirect(cfn, WRBMODE, stdout, uncomp);
}
(void) setgid(getgid());
(void) setuid(getuid());
(void) execv(args[0], (char *const *) args);
perror((char *) 0);
(void) fprintf(stderr, "Exec to %scompress %s failed.\n",
uncomp ? "un" : "", filename);
nh_terminate(EXIT_FAILURE);
} else if (f == -1) {
perror((char *) 0);
pline("Fork to %scompress %s failed.", uncomp ? "un" : "", filename);
return;
}
#ifndef NO_SIGNAL
(void) signal(SIGINT, SIG_IGN);
(void) signal(SIGQUIT, SIG_IGN);
(void) wait((int *) &i);
(void) signal(SIGINT, (SIG_RET_TYPE) done1);
if (wizard)
(void) signal(SIGQUIT, SIG_DFL);
#else
/* I don't think we can really cope with external compression
* without signals, so we'll declare that compress failed and
* go on. (We could do a better job by forcing off external
* compression if there are no signals, but we want this for
* testing with FailSafeC
*/
i = 1;
#endif
if (i == 0) {
/* (un)compress succeeded: remove file left behind */
if (uncomp)
(void) unlink(cfn);
else
(void) unlink(filename);
} else {
/* (un)compress failed; remove the new, bad file */
if (uncomp) {
raw_printf("Unable to uncompress %s", filename);
(void) unlink(filename);
} else {
/* no message needed for compress case; life will go on */
(void) unlink(cfn);
}
#ifdef TTY_GRAPHICS
/* Give them a chance to read any error messages from the
* compression--these would go to stdout or stderr and would get
* overwritten only in tty mode. It's still ugly, since the
* messages are being written on top of the screen, but at least
* the user can read them.
*/
if (istty && iflags.window_inited) {
clear_nhwindow(WIN_MESSAGE);
more();
/* No way to know if this is feasible */
/* doredraw(); */
}
#endif
}
}
#endif /* COMPRESS */
#if defined(COMPRESS) || defined(ZLIB_COMP)
#define UNUSED_if_not_COMPRESS /*empty*/
#else
#define UNUSED_if_not_COMPRESS UNUSED
#endif
/* compress file */
void
nh_compress(filename)
const char *filename UNUSED_if_not_COMPRESS;
{
#if !defined(COMPRESS) && !defined(ZLIB_COMP)
#ifdef PRAGMA_UNUSED
#pragma unused(filename)
#endif
#else
docompress_file(filename, FALSE);
#endif
}
/* uncompress file if it exists */
void
nh_uncompress(filename)
const char *filename UNUSED_if_not_COMPRESS;
{
#if !defined(COMPRESS) && !defined(ZLIB_COMP)
#ifdef PRAGMA_UNUSED
#pragma unused(filename)
#endif
#else
docompress_file(filename, TRUE);
#endif
}
#ifdef ZLIB_COMP /* RLC 09 Mar 1999: Support internal ZLIB */
STATIC_OVL boolean
make_compressed_name(filename, cfn)
const char *filename;
char *cfn;
{
#ifndef SHORT_FILENAMES
/* Assume free-form filename with no 8.3 restrictions */
strcpy(cfn, filename);
strcat(cfn, COMPRESS_EXTENSION);
return TRUE;
#else
#ifdef SAVE_EXTENSION
char *bp = (char *) 0;
strcpy(cfn, filename);
if ((bp = strstri(cfn, SAVE_EXTENSION))) {
strsubst(bp, SAVE_EXTENSION, ".saz");
return TRUE;
} else {
/* find last occurrence of bon */
bp = eos(cfn);
while (bp-- > cfn) {
if (strstri(bp, "bon")) {
strsubst(bp, "bon", "boz");
return TRUE;
}
}
}
#endif /* SAVE_EXTENSION */
return FALSE;
#endif /* SHORT_FILENAMES */
}
STATIC_OVL void
docompress_file(filename, uncomp)
const char *filename;
boolean uncomp;
{
gzFile compressedfile;
FILE *uncompressedfile;
char cfn[256];
char buf[1024];
unsigned len, len2;
if (!make_compressed_name(filename, cfn))
return;
if (!uncomp) {
/* Open the input and output files */
/* Note that gzopen takes "wb" as its mode, even on systems where
fopen takes "r" and "w" */
uncompressedfile = fopen(filename, RDBMODE);
if (!uncompressedfile) {
pline("Error in zlib docompress_file %s", filename);
return;
}
compressedfile = gzopen(cfn, "wb");
if (compressedfile == NULL) {
if (errno == 0) {
pline("zlib failed to allocate memory");
} else {
panic("Error in docompress_file %d", errno);
}
fclose(uncompressedfile);
return;
}
/* Copy from the uncompressed to the compressed file */
while (1) {
len = fread(buf, 1, sizeof(buf), uncompressedfile);
if (ferror(uncompressedfile)) {
pline("Failure reading uncompressed file");
pline("Can't compress %s.", filename);
fclose(uncompressedfile);
gzclose(compressedfile);
(void) unlink(cfn);
return;
}
if (len == 0)
break; /* End of file */
len2 = gzwrite(compressedfile, buf, len);
if (len2 == 0) {
pline("Failure writing compressed file");
pline("Can't compress %s.", filename);
fclose(uncompressedfile);
gzclose(compressedfile);
(void) unlink(cfn);
return;
}
}
fclose(uncompressedfile);
gzclose(compressedfile);
/* Delete the file left behind */
(void) unlink(filename);
} else { /* uncomp */
/* Open the input and output files */
/* Note that gzopen takes "rb" as its mode, even on systems where
fopen takes "r" and "w" */
compressedfile = gzopen(cfn, "rb");
if (compressedfile == NULL) {
if (errno == 0) {
pline("zlib failed to allocate memory");
} else if (errno != ENOENT) {
panic("Error in zlib docompress_file %s, %d", filename,
errno);
}
return;
}
uncompressedfile = fopen(filename, WRBMODE);
if (!uncompressedfile) {
pline("Error in zlib docompress file uncompress %s", filename);
gzclose(compressedfile);
return;
}
/* Copy from the compressed to the uncompressed file */
while (1) {
len = gzread(compressedfile, buf, sizeof(buf));
if (len == (unsigned) -1) {
pline("Failure reading compressed file");
pline("Can't uncompress %s.", filename);
fclose(uncompressedfile);
gzclose(compressedfile);
(void) unlink(filename);
return;
}
if (len == 0)
break; /* End of file */
fwrite(buf, 1, len, uncompressedfile);
if (ferror(uncompressedfile)) {
pline("Failure writing uncompressed file");
pline("Can't uncompress %s.", filename);
fclose(uncompressedfile);
gzclose(compressedfile);
(void) unlink(filename);
return;
}
}
fclose(uncompressedfile);
gzclose(compressedfile);
/* Delete the file left behind */
(void) unlink(cfn);
}
}
#endif /* RLC 09 Mar 1999: End ZLIB patch */
/* ---------- END FILE COMPRESSION HANDLING ----------- */
/* ---------- BEGIN FILE LOCKING HANDLING ----------- */
static int nesting = 0;
#if defined(NO_FILE_LINKS) || defined(USE_FCNTL) /* implies UNIX */
static int lockfd = -1; /* for lock_file() to pass to unlock_file() */
#endif
#ifdef USE_FCNTL
struct flock sflock; /* for unlocking, same as above */
#endif
#define HUP if (!program_state.done_hup)
#ifndef USE_FCNTL
STATIC_OVL char *
make_lockname(filename, lockname)
const char *filename;
char *lockname;
{
#if defined(UNIX) || defined(VMS) || defined(AMIGA) || defined(WIN32) \
|| defined(MSDOS)
#ifdef NO_FILE_LINKS
Strcpy(lockname, LOCKDIR);
Strcat(lockname, "/");
Strcat(lockname, filename);
#else
Strcpy(lockname, filename);
#endif
#ifdef VMS
{
char *semi_colon = rindex(lockname, ';');
if (semi_colon)
*semi_colon = '\0';
}
Strcat(lockname, ".lock;1");
#else
Strcat(lockname, "_lock");
#endif
return lockname;
#else /* !(UNIX || VMS || AMIGA || WIN32 || MSDOS) */
#ifdef PRAGMA_UNUSED
#pragma unused(filename)
#endif
lockname[0] = '\0';
return (char *) 0;
#endif
}
#endif /* !USE_FCNTL */
/* lock a file */
boolean
lock_file(filename, whichprefix, retryct)
const char *filename;
int whichprefix;
int retryct;
{
#if defined(PRAGMA_UNUSED) && !(defined(UNIX) || defined(VMS)) \
&& !(defined(AMIGA) || defined(WIN32) || defined(MSDOS))
#pragma unused(retryct)
#endif
#ifndef USE_FCNTL
char locknambuf[BUFSZ];
const char *lockname;
#endif
nesting++;
if (nesting > 1) {
impossible("TRIED TO NEST LOCKS");
return TRUE;
}
#ifndef USE_FCNTL
lockname = make_lockname(filename, locknambuf);
#ifndef NO_FILE_LINKS /* LOCKDIR should be subsumed by LOCKPREFIX */
lockname = fqname(lockname, LOCKPREFIX, 2);
#endif
#endif
filename = fqname(filename, whichprefix, 0);
#ifdef USE_FCNTL
lockfd = open(filename, O_RDWR);
if (lockfd == -1) {
HUP raw_printf("Cannot open file %s. Is NetHack installed correctly?",
filename);
nesting--;
return FALSE;
}
sflock.l_type = F_WRLCK;
sflock.l_whence = SEEK_SET;
sflock.l_start = 0;
sflock.l_len = 0;
#endif
#if defined(UNIX) || defined(VMS)
#ifdef USE_FCNTL
while (fcntl(lockfd, F_SETLK, &sflock) == -1) {
#else
#ifdef NO_FILE_LINKS
while ((lockfd = open(lockname, O_RDWR | O_CREAT | O_EXCL, 0666)) == -1) {
#else
while (link(filename, lockname) == -1) {
#endif
#endif
#ifdef USE_FCNTL
if (retryct--) {
HUP raw_printf(
"Waiting for release of fcntl lock on %s. (%d retries left.)",
filename, retryct);
sleep(1);
} else {
HUP(void) raw_print("I give up. Sorry.");
HUP raw_printf("Some other process has an unnatural grip on %s.",
filename);
nesting--;
return FALSE;
}
#else
int errnosv = errno;
switch (errnosv) { /* George Barbanis */
case EEXIST:
if (retryct--) {
HUP raw_printf(
"Waiting for access to %s. (%d retries left).", filename,
retryct);
#if defined(SYSV) || defined(ULTRIX) || defined(VMS)
(void)
#endif
sleep(1);
} else {
HUP(void) raw_print("I give up. Sorry.");
HUP raw_printf("Perhaps there is an old %s around?",
lockname);
nesting--;
return FALSE;
}
break;
case ENOENT:
HUP raw_printf("Can't find file %s to lock!", filename);
nesting--;
return FALSE;
case EACCES:
HUP raw_printf("No write permission to lock %s!", filename);
nesting--;
return FALSE;
#ifdef VMS /* c__translate(vmsfiles.c) */
case EPERM:
/* could be misleading, but usually right */
HUP raw_printf("Can't lock %s due to directory protection.",
filename);
nesting--;
return FALSE;
#endif
case EROFS:
/* take a wild guess at the underlying cause */
HUP perror(lockname);
HUP raw_printf("Cannot lock %s.", filename);
HUP raw_printf(
"(Perhaps you are running NetHack from inside the distribution package?).");
nesting--;
return FALSE;
default:
HUP perror(lockname);
HUP raw_printf("Cannot lock %s for unknown reason (%d).",
filename, errnosv);
nesting--;
return FALSE;
}
#endif /* USE_FCNTL */
}
#endif /* UNIX || VMS */
#if (defined(AMIGA) || defined(WIN32) || defined(MSDOS)) \
&& !defined(USE_FCNTL)
#ifdef AMIGA
#define OPENFAILURE(fd) (!fd)
lockptr = 0;
#else
#define OPENFAILURE(fd) (fd < 0)
lockptr = -1;
#endif
while (--retryct && OPENFAILURE(lockptr)) {
#if defined(WIN32) && !defined(WIN_CE)
lockptr = sopen(lockname, O_RDWR | O_CREAT, SH_DENYRW, S_IWRITE);
#else
(void) DeleteFile(lockname); /* in case dead process was here first */
#ifdef AMIGA
lockptr = Open(lockname, MODE_NEWFILE);
#else
lockptr = open(lockname, O_RDWR | O_CREAT | O_EXCL, S_IWRITE);
#endif
#endif
if (OPENFAILURE(lockptr)) {
raw_printf("Waiting for access to %s. (%d retries left).",
filename, retryct);
Delay(50);
}
}
if (!retryct) {
raw_printf("I give up. Sorry.");
nesting--;
return FALSE;
}
#endif /* AMIGA || WIN32 || MSDOS */
return TRUE;
}
#ifdef VMS /* for unlock_file, use the unlink() routine in vmsunix.c */
#ifdef unlink
#undef unlink
#endif
#define unlink(foo) vms_unlink(foo)
#endif
/* unlock file, which must be currently locked by lock_file */
void
unlock_file(filename)
const char *filename;
{
#ifndef USE_FCNTL
char locknambuf[BUFSZ];
const char *lockname;
#endif
if (nesting == 1) {
#ifdef USE_FCNTL
sflock.l_type = F_UNLCK;
if (lockfd >= 0) {
if (fcntl(lockfd, F_SETLK, &sflock) == -1)
HUP raw_printf("Can't remove fcntl lock on %s.", filename);
(void) close(lockfd), lockfd = -1;
}
#else
lockname = make_lockname(filename, locknambuf);
#ifndef NO_FILE_LINKS /* LOCKDIR should be subsumed by LOCKPREFIX */
lockname = fqname(lockname, LOCKPREFIX, 2);
#endif
#if defined(UNIX) || defined(VMS)
if (unlink(lockname) < 0)
HUP raw_printf("Can't unlink %s.", lockname);
#ifdef NO_FILE_LINKS
(void) nhclose(lockfd), lockfd = -1;
#endif
#endif /* UNIX || VMS */
#if defined(AMIGA) || defined(WIN32) || defined(MSDOS)
if (lockptr)
Close(lockptr);
DeleteFile(lockname);
lockptr = 0;
#endif /* AMIGA || WIN32 || MSDOS */
#endif /* USE_FCNTL */
}
nesting--;
}
/* ---------- END FILE LOCKING HANDLING ----------- */
/* ---------- BEGIN CONFIG FILE HANDLING ----------- */
const char *default_configfile =
#ifdef UNIX
".nethackrc";
#else
#if defined(MAC) || defined(__BEOS__)
"NetHack Defaults";
#else
#if defined(MSDOS) || defined(WIN32)
CONFIG_FILE;
#else
"NetHack.cnf";
#endif
#endif
#endif
/* used for messaging */
char configfile[BUFSZ];
#ifdef MSDOS
/* conflict with speed-dial under windows
* for XXX.cnf file so support of NetHack.cnf
* is for backward compatibility only.
* Preferred name (and first tried) is now defaults.nh but
* the game will try the old name if there
* is no defaults.nh.
*/
const char *backward_compat_configfile = "nethack.cnf";
#endif
/* remember the name of the file we're accessing;
if may be used in option reject messages */
STATIC_OVL void
set_configfile_name(fname)
const char *fname;
{
(void) strncpy(configfile, fname, sizeof configfile - 1);
configfile[sizeof configfile - 1] = '\0';
}
#ifndef MFLOPPY
#define fopenp fopen
#endif
STATIC_OVL FILE *
fopen_config_file(filename, src)
const char *filename;
int src;
{
FILE *fp;
#if defined(UNIX) || defined(VMS)
char tmp_config[BUFSZ];
char *envp;
#endif
if (src == SET_IN_SYS) {
/* SYSCF_FILE; if we can't open it, caller will bail */
if (filename && *filename) {
set_configfile_name(fqname(filename, SYSCONFPREFIX, 0));
fp = fopenp(configfile, "r");
} else
fp = (FILE *) 0;
return fp;
}
/* If src != SET_IN_SYS, "filename" is an environment variable, so it
* should hang around. If set, it is expected to be a full path name
* (if relevant)
*/
if (filename && *filename) {
set_configfile_name(filename);
#ifdef UNIX
if (access(configfile, 4) == -1) { /* 4 is R_OK on newer systems */
/* nasty sneaky attempt to read file through
* NetHack's setuid permissions -- this is the only
* place a file name may be wholly under the player's
* control (but SYSCF_FILE is not under the player's
* control so it's OK).
*/
raw_printf("Access to %s denied (%d).", configfile, errno);
wait_synch();
/* fall through to standard names */
} else
#endif
if ((fp = fopenp(configfile, "r")) != (FILE *) 0) {
return fp;
#if defined(UNIX) || defined(VMS)
} else {
/* access() above probably caught most problems for UNIX */
raw_printf("Couldn't open requested config file %s (%d).",
configfile, errno);
wait_synch();
#endif
}
}
/* fall through to standard names */
#if defined(MICRO) || defined(MAC) || defined(__BEOS__) || defined(WIN32)
set_configfile_name(fqname(default_configfile, CONFIGPREFIX, 0));
if ((fp = fopenp(configfile, "r")) != (FILE *) 0) {
return fp;
} else if (strcmp(default_configfile, configfile)) {
set_configfile_name(default_configfile);
if ((fp = fopenp(configfile, "r")) != (FILE *) 0)
return fp;
}
#ifdef MSDOS
set_configfile_name(fqname(backward_compat_configfile, CONFIGPREFIX, 0));
if ((fp = fopenp(configfile, "r")) != (FILE *) 0) {
return fp;
} else if (strcmp(backward_compat_configfile, configfile)) {
set_configfile_name(backward_compat_configfile);
if ((fp = fopenp(configfile, "r")) != (FILE *) 0)
return fp;
}
#endif
#else
/* constructed full path names don't need fqname() */
#ifdef VMS
/* no punctuation, so might be a logical name */
set_configfile_name("nethackini");
if ((fp = fopenp(configfile, "r")) != (FILE *) 0)
return fp;
set_configfile_name("sys$login:nethack.ini");
if ((fp = fopenp(configfile, "r")) != (FILE *) 0)
return fp;
envp = nh_getenv("HOME");
if (!envp || !*envp)
Strcpy(tmp_config, "NetHack.cnf");
else
Sprintf(tmp_config, "%s%s%s", envp,
!index(":]>/", envp[strlen(envp) - 1]) ? "/" : "",
"NetHack.cnf");
set_configfile_name(tmp_config);
if ((fp = fopenp(configfile, "r")) != (FILE *) 0)
return fp;
#else /* should be only UNIX left */
envp = nh_getenv("HOME");
if (!envp)
Strcpy(tmp_config, ".nethackrc");
else
Sprintf(tmp_config, "%s/%s", envp, ".nethackrc");
set_configfile_name(tmp_config);
if ((fp = fopenp(configfile, "r")) != (FILE *) 0)
return fp;
#if defined(__APPLE__) /* UNIX+__APPLE__ => MacOSX */
/* try an alternative */
if (envp) {
/* OSX-style configuration settings */
Sprintf(tmp_config, "%s/%s", envp,
"Library/Preferences/NetHack Defaults");
set_configfile_name(tmp_config);
if ((fp = fopenp(configfile, "r")) != (FILE *) 0)
return fp;
/* may be easier for user to edit if filename has '.txt' suffix */
Sprintf(tmp_config, "%s/%s", envp,
"Library/Preferences/NetHack Defaults.txt");
set_configfile_name(tmp_config);
if ((fp = fopenp(configfile, "r")) != (FILE *) 0)
return fp;
}
#endif /*__APPLE__*/
if (errno != ENOENT) {
const char *details;
/* e.g., problems when setuid NetHack can't search home
directory restricted to user */
#if defined(NHSTDC) && !defined(NOTSTDC)
if ((details = strerror(errno)) == 0)
#endif
details = "";
raw_printf("Couldn't open default config file %s %s(%d).",
configfile, details, errno);
wait_synch();
}
#endif /* !VMS => Unix */
#endif /* !(MICRO || MAC || __BEOS__ || WIN32) */
return (FILE *) 0;
}
/*
* Retrieve a list of integers from buf into a uchar array.
*
* NOTE: zeros are inserted unless modlist is TRUE, in which case the list
* location is unchanged. Callers must handle zeros if modlist is FALSE.
*/
STATIC_OVL int
get_uchars(bufp, list, modlist, size, name)
char *bufp; /* current pointer */
uchar *list; /* return list */
boolean modlist; /* TRUE: list is being modified in place */
int size; /* return list size */
const char *name; /* name of option for error message */
{
unsigned int num = 0;
int count = 0;
boolean havenum = FALSE;
while (1) {
switch (*bufp) {
case ' ':
case '\0':
case '\t':
case '\n':
if (havenum) {
/* if modifying in place, don't insert zeros */
if (num || !modlist)
list[count] = num;
count++;
num = 0;
havenum = FALSE;
}
if (count == size || !*bufp)
return count;
bufp++;
break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
havenum = TRUE;
num = num * 10 + (*bufp - '0');
bufp++;
break;
case '\\':
goto gi_error;
break;
default:
gi_error:
raw_printf("Syntax error in %s", name);
wait_synch();
return count;
}
}
/*NOTREACHED*/
}
#ifdef NOCWD_ASSUMPTIONS
STATIC_OVL void
adjust_prefix(bufp, prefixid)
char *bufp;
int prefixid;
{
char *ptr;
if (!bufp)
return;
#ifdef WIN32
if (fqn_prefix_locked[prefixid])
return;
#endif
/* Backward compatibility, ignore trailing ;n */
if ((ptr = index(bufp, ';')) != 0)
*ptr = '\0';
if (strlen(bufp) > 0) {
fqn_prefix[prefixid] = (char *) alloc(strlen(bufp) + 2);
Strcpy(fqn_prefix[prefixid], bufp);
append_slash(fqn_prefix[prefixid]);
}
}
#endif
/* Choose at random one of the sep separated parts from str. Mangles str. */
STATIC_OVL char *
choose_random_part(str,sep)
char *str;
char sep;
{
int nsep = 1;
int csep;
int len = 0;
char *begin = str;
if (!str)
return (char *) 0;
while (*str) {
if (*str == sep)
nsep++;
str++;
}
csep = rn2(nsep);
str = begin;
while ((csep > 0) && *str) {
str++;
if (*str == sep)
csep--;
}
if (*str) {
if (*str == sep)
str++;
begin = str;
while (*str && *str != sep) {
str++;
len++;
}
*str = '\0';
if (len)
return begin;
}
return (char *) 0;
}
STATIC_OVL void
free_config_sections()
{
if (config_section_chosen) {
free(config_section_chosen);
config_section_chosen = NULL;
}
if (config_section_current) {
free(config_section_current);
config_section_current = NULL;
}
}
STATIC_OVL boolean
is_config_section(str)
const char *str;
{
const char *a = rindex(str, ']');
return (a && *str == '[' && *(a+1) == '\0' && (int)(a - str) > 0);
}
STATIC_OVL boolean
handle_config_section(buf)
char *buf;
{
if (is_config_section(buf)) {
char *send;
if (config_section_current) {
free(config_section_current);
}
config_section_current = dupstr(&buf[1]);
send = rindex(config_section_current, ']');
*send = '\0';
debugpline1("set config section: '%s'", config_section_current);
return TRUE;
}
if (config_section_current) {
if (!config_section_chosen)
return TRUE;
if (strcmp(config_section_current, config_section_chosen))
return TRUE;
}
return FALSE;
}
#define match_varname(INP, NAM, LEN) match_optname(INP, NAM, LEN, TRUE)
/* find the '=' or ':' */
char *
find_optparam(buf)
const char *buf;
{
char *bufp, *altp;
bufp = index(buf, '=');
altp = index(buf, ':');
if (!bufp || (altp && altp < bufp))
bufp = altp;
return bufp;
}
boolean
parse_config_line(origbuf)
char *origbuf;
{
#if defined(MICRO) && !defined(NOCWD_ASSUMPTIONS)
static boolean ramdisk_specified = FALSE;
#endif
#ifdef SYSCF
int n, src = iflags.parse_config_file_src;
#endif
char *bufp, buf[4 * BUFSZ];
uchar translate[MAXPCHARS];
int len;
boolean retval = TRUE;
while (*origbuf == ' ' || *origbuf == '\t') /* skip leading whitespace */
++origbuf; /* (caller probably already did this) */
(void) strncpy(buf, origbuf, sizeof buf - 1);
buf[sizeof buf - 1] = '\0'; /* strncpy not guaranteed to NUL terminate */
/* convert any tab to space, condense consecutive spaces into one,
remove leading and trailing spaces (exception: if there is nothing
but spaces, one of them will be kept even though it leads/trails) */
mungspaces(buf);
/* find the '=' or ':' */
bufp = find_optparam(buf);
if (!bufp) {
config_error_add("Not a config statement, missing '='");
return FALSE;
}
/* skip past '=', then space between it and value, if any */
++bufp;
if (*bufp == ' ')
++bufp;
/* Go through possible variables */
/* some of these (at least LEVELS and SAVE) should now set the
* appropriate fqn_prefix[] rather than specialized variables
*/
if (match_varname(buf, "OPTIONS", 4)) {
/* hack: un-mungspaces to allow consecutive spaces in
general options until we verify that this is unnecessary;
'=' or ':' is guaranteed to be present */
bufp = find_optparam(origbuf);
++bufp; /* skip '='; parseoptions() handles spaces */
if (!parseoptions(bufp, TRUE, TRUE))
retval = FALSE;
} else if (match_varname(buf, "AUTOPICKUP_EXCEPTION", 5)) {
add_autopickup_exception(bufp);
} else if (match_varname(buf, "BINDINGS", 4)) {
if (!parsebindings(bufp))
retval = FALSE;
} else if (match_varname(buf, "AUTOCOMPLETE", 5)) {
parseautocomplete(bufp, TRUE);
} else if (match_varname(buf, "MSGTYPE", 7)) {
if (!msgtype_parse_add(bufp))
retval = FALSE;
#ifdef NOCWD_ASSUMPTIONS
} else if (match_varname(buf, "HACKDIR", 4)) {
adjust_prefix(bufp, HACKPREFIX);
} else if (match_varname(buf, "LEVELDIR", 4)
|| match_varname(buf, "LEVELS", 4)) {
adjust_prefix(bufp, LEVELPREFIX);
} else if (match_varname(buf, "SAVEDIR", 4)) {
adjust_prefix(bufp, SAVEPREFIX);
} else if (match_varname(buf, "BONESDIR", 5)) {
adjust_prefix(bufp, BONESPREFIX);
} else if (match_varname(buf, "DATADIR", 4)) {
adjust_prefix(bufp, DATAPREFIX);
} else if (match_varname(buf, "SCOREDIR", 4)) {
adjust_prefix(bufp, SCOREPREFIX);
} else if (match_varname(buf, "LOCKDIR", 4)) {
adjust_prefix(bufp, LOCKPREFIX);
} else if (match_varname(buf, "CONFIGDIR", 4)) {
adjust_prefix(bufp, CONFIGPREFIX);
} else if (match_varname(buf, "TROUBLEDIR", 4)) {
adjust_prefix(bufp, TROUBLEPREFIX);
#else /*NOCWD_ASSUMPTIONS*/
#ifdef MICRO
} else if (match_varname(buf, "HACKDIR", 4)) {
(void) strncpy(hackdir, bufp, PATHLEN - 1);
#ifdef MFLOPPY
} else if (match_varname(buf, "RAMDISK", 3)) {
/* The following ifdef is NOT in the wrong
* place. For now, we accept and silently
* ignore RAMDISK */
#ifndef AMIGA
if (strlen(bufp) >= PATHLEN)
bufp[PATHLEN - 1] = '\0';
Strcpy(levels, bufp);
ramdisk = (strcmp(permbones, levels) != 0);
ramdisk_specified = TRUE;
#endif
#endif
} else if (match_varname(buf, "LEVELS", 4)) {
if (strlen(bufp) >= PATHLEN)
bufp[PATHLEN - 1] = '\0';
Strcpy(permbones, bufp);
if (!ramdisk_specified || !*levels)
Strcpy(levels, bufp);
ramdisk = (strcmp(permbones, levels) != 0);
} else if (match_varname(buf, "SAVE", 4)) {
#ifdef MFLOPPY
extern int saveprompt;
#endif
char *ptr;
if ((ptr = index(bufp, ';')) != 0) {
*ptr = '\0';
#ifdef MFLOPPY
if (*(ptr + 1) == 'n' || *(ptr + 1) == 'N') {
saveprompt = FALSE;
}
#endif
}
#if defined(SYSFLAGS) && defined(MFLOPPY)
else
saveprompt = sysflags.asksavedisk;
#endif
(void) strncpy(SAVEP, bufp, SAVESIZE - 1);
append_slash(SAVEP);
#endif /* MICRO */
#endif /*NOCWD_ASSUMPTIONS*/
} else if (match_varname(buf, "NAME", 4)) {
(void) strncpy(plname, bufp, PL_NSIZ - 1);
} else if (match_varname(buf, "ROLE", 4)
|| match_varname(buf, "CHARACTER", 4)) {
if ((len = str2role(bufp)) >= 0)
flags.initrole = len;
} else if (match_varname(buf, "DOGNAME", 3)) {
(void) strncpy(dogname, bufp, PL_PSIZ - 1);
} else if (match_varname(buf, "CATNAME", 3)) {
(void) strncpy(catname, bufp, PL_PSIZ - 1);
#ifdef SYSCF
} else if (src == SET_IN_SYS && match_varname(buf, "WIZARDS", 7)) {
if (sysopt.wizards)
free((genericptr_t) sysopt.wizards);
sysopt.wizards = dupstr(bufp);
if (strlen(sysopt.wizards) && strcmp(sysopt.wizards, "*")) {
/* pre-format WIZARDS list now; it's displayed during a panic
and since that panic might be due to running out of memory,
we don't want to risk attempting to allocate any memory then */
if (sysopt.fmtd_wizard_list)
free((genericptr_t) sysopt.fmtd_wizard_list);
sysopt.fmtd_wizard_list = build_english_list(sysopt.wizards);
}
} else if (src == SET_IN_SYS && match_varname(buf, "SHELLERS", 8)) {
if (sysopt.shellers)
free((genericptr_t) sysopt.shellers);
sysopt.shellers = dupstr(bufp);
} else if (src == SET_IN_SYS && match_varname(buf, "EXPLORERS", 7)) {
if (sysopt.explorers)
free((genericptr_t) sysopt.explorers);
sysopt.explorers = dupstr(bufp);
} else if (src == SET_IN_SYS && match_varname(buf, "DEBUGFILES", 5)) {
/* if showdebug() has already been called (perhaps we've added
some debugpline() calls to option processing) and has found
a value for getenv("DEBUGFILES"), don't override that */
if (sysopt.env_dbgfl <= 0) {
if (sysopt.debugfiles)
free((genericptr_t) sysopt.debugfiles);
sysopt.debugfiles = dupstr(bufp);
}
} else if (src == SET_IN_SYS && match_varname(buf, "DUMPLOGFILE", 7)) {
#ifdef DUMPLOG
if (sysopt.dumplogfile)
free((genericptr_t) sysopt.dumplogfile);
sysopt.dumplogfile = dupstr(bufp);
#endif
#ifdef WIN32
} else if (src == SET_IN_SYS && match_varname(buf, "portable_device_top", 8)) {
if (sysopt.portable_device_top)
free((genericptr_t) sysopt.portable_device_top);
sysopt.portable_device_top = dupstr(bufp);
#endif
} else if (src == SET_IN_SYS && match_varname(buf, "GENERICUSERS", 12)) {
if (sysopt.genericusers)
free((genericptr_t) sysopt.genericusers);
sysopt.genericusers = dupstr(bufp);
} else if (src == SET_IN_SYS && match_varname(buf, "BONES_POOLS", 10)) {
/* max value of 10 guarantees (N % bones.pools) will be one digit
so we don't lose control of the length of bones file names */
n = atoi(bufp);
sysopt.bones_pools = (n <= 0) ? 0 : min(n, 10);
/* note: right now bones_pools==0 is the same as bones_pools==1,
but we could change that and make bones_pools==0 become an
indicator to suppress bones usage altogether */
} else if (src == SET_IN_SYS && match_varname(buf, "SUPPORT", 7)) {
if (sysopt.support)
free((genericptr_t) sysopt.support);
sysopt.support = dupstr(bufp);
} else if (src == SET_IN_SYS && match_varname(buf, "RECOVER", 7)) {
if (sysopt.recover)
free((genericptr_t) sysopt.recover);
sysopt.recover = dupstr(bufp);
} else if (src == SET_IN_SYS
&& match_varname(buf, "CHECK_SAVE_UID", 14)) {
n = atoi(bufp);
sysopt.check_save_uid = n;
} else if (src == SET_IN_SYS
&& match_varname(buf, "CHECK_PLNAME", 12)) {
n = atoi(bufp);
sysopt.check_plname = n;
} else if (match_varname(buf, "SEDUCE", 6)) {
n = !!atoi(bufp); /* XXX this could be tighter */
/* allow anyone to turn it off, but only sysconf to turn it on*/
if (src != SET_IN_SYS && n != 0) {
config_error_add("Illegal value in SEDUCE");
return FALSE;
}
sysopt.seduce = n;
sysopt_seduce_set(sysopt.seduce);
} else if (src == SET_IN_SYS && match_varname(buf, "MAXPLAYERS", 10)) {
n = atoi(bufp);
/* XXX to get more than 25, need to rewrite all lock code */
if (n < 0 || n > 25) {
config_error_add("Illegal value in MAXPLAYERS (maximum is 25).");
return FALSE;
}
sysopt.maxplayers = n;
} else if (src == SET_IN_SYS && match_varname(buf, "PERSMAX", 7)) {
n = atoi(bufp);
if (n < 1) {
config_error_add("Illegal value in PERSMAX (minimum is 1).");
return FALSE;
}
sysopt.persmax = n;
} else if (src == SET_IN_SYS && match_varname(buf, "PERS_IS_UID", 11)) {
n = atoi(bufp);
if (n != 0 && n != 1) {
config_error_add("Illegal value in PERS_IS_UID (must be 0 or 1).");
return FALSE;
}
sysopt.pers_is_uid = n;
} else if (src == SET_IN_SYS && match_varname(buf, "ENTRYMAX", 8)) {
n = atoi(bufp);
if (n < 10) {
config_error_add("Illegal value in ENTRYMAX (minimum is 10).");
return FALSE;
}
sysopt.entrymax = n;
} else if ((src == SET_IN_SYS) && match_varname(buf, "POINTSMIN", 9)) {
n = atoi(bufp);
if (n < 1) {
config_error_add("Illegal value in POINTSMIN (minimum is 1).");
return FALSE;
}
sysopt.pointsmin = n;
} else if (src == SET_IN_SYS
&& match_varname(buf, "MAX_STATUENAME_RANK", 10)) {
n = atoi(bufp);
if (n < 1) {
config_error_add(
"Illegal value in MAX_STATUENAME_RANK (minimum is 1).");
return FALSE;
}
sysopt.tt_oname_maxrank = n;
/* SYSCF PANICTRACE options */
} else if (src == SET_IN_SYS
&& match_varname(buf, "PANICTRACE_LIBC", 15)) {
n = atoi(bufp);
#if defined(PANICTRACE) && defined(PANICTRACE_LIBC)
if (n < 0 || n > 2) {
config_error_add("Illegal value in PANICTRACE_LIBC (not 0,1,2).");
return FALSE;
}
#endif
sysopt.panictrace_libc = n;
} else if (src == SET_IN_SYS
&& match_varname(buf, "PANICTRACE_GDB", 14)) {
n = atoi(bufp);
#if defined(PANICTRACE)
if (n < 0 || n > 2) {
config_error_add("Illegal value in PANICTRACE_GDB (not 0,1,2).");
return FALSE;
}
#endif
sysopt.panictrace_gdb = n;
} else if (src == SET_IN_SYS && match_varname(buf, "GDBPATH", 7)) {
#if defined(PANICTRACE) && !defined(VMS)
if (!file_exists(bufp)) {
config_error_add("File specified in GDBPATH does not exist.");
return FALSE;
}
#endif
if (sysopt.gdbpath)
free((genericptr_t) sysopt.gdbpath);
sysopt.gdbpath = dupstr(bufp);
} else if (src == SET_IN_SYS && match_varname(buf, "GREPPATH", 7)) {
#if defined(PANICTRACE) && !defined(VMS)
if (!file_exists(bufp)) {
config_error_add("File specified in GREPPATH does not exist.");
return FALSE;
}
#endif
if (sysopt.greppath)
free((genericptr_t) sysopt.greppath);
sysopt.greppath = dupstr(bufp);
} else if (src == SET_IN_SYS
&& match_varname(buf, "ACCESSIBILITY", 13)) {
n = atoi(bufp);
if (n < 0 || n > 1) {
config_error_add("Illegal value in ACCESSIBILITY (not 0,1).");
return FALSE;
}
sysopt.accessibility = n;
#endif /* SYSCF */
} else if (match_varname(buf, "BOULDER", 3)) {
(void) get_uchars(bufp, &ov_primary_syms[SYM_BOULDER + SYM_OFF_X],
TRUE, 1, "BOULDER");
} else if (match_varname(buf, "MENUCOLOR", 9)) {
if (!add_menu_coloring(bufp))
retval = FALSE;
} else if (match_varname(buf, "HILITE_STATUS", 6)) {
#ifdef STATUS_HILITES
if (!parse_status_hl1(bufp, TRUE))
retval = FALSE;
#endif
} else if (match_varname(buf, "WARNINGS", 5)) {
(void) get_uchars(bufp, translate, FALSE, WARNCOUNT,
"WARNINGS");
assign_warnings(translate);
} else if (match_varname(buf, "ROGUESYMBOLS", 4)) {
if (!parsesymbols(bufp, ROGUESET)) {
config_error_add("Error in ROGUESYMBOLS definition '%s'", bufp);
retval = FALSE;
}
switch_symbols(TRUE);
} else if (match_varname(buf, "SYMBOLS", 4)) {
if (!parsesymbols(bufp, PRIMARY)) {
config_error_add("Error in SYMBOLS definition '%s'", bufp);
retval = FALSE;
}
switch_symbols(TRUE);
} else if (match_varname(buf, "WIZKIT", 6)) {
(void) strncpy(wizkit, bufp, WIZKIT_MAX - 1);
#ifdef AMIGA
} else if (match_varname(buf, "FONT", 4)) {
char *t;
if (t = strchr(buf + 5, ':')) {
*t = 0;
amii_set_text_font(buf + 5, atoi(t + 1));
*t = ':';
}
} else if (match_varname(buf, "PATH", 4)) {
(void) strncpy(PATH, bufp, PATHLEN - 1);
} else if (match_varname(buf, "DEPTH", 5)) {
extern int amii_numcolors;
int val = atoi(bufp);
amii_numcolors = 1L << min(DEPTH, val);
#ifdef SYSFLAGS
} else if (match_varname(buf, "DRIPENS", 7)) {
int i, val;
char *t;
for (i = 0, t = strtok(bufp, ",/"); t != (char *) 0;
i < 20 && (t = strtok((char *) 0, ",/")), ++i) {
sscanf(t, "%d", &val);
sysflags.amii_dripens[i] = val;
}
#endif
} else if (match_varname(buf, "SCREENMODE", 10)) {
extern long amii_scrnmode;
if (!stricmp(bufp, "req"))
amii_scrnmode = 0xffffffff; /* Requester */
else if (sscanf(bufp, "%x", &amii_scrnmode) != 1)
amii_scrnmode = 0;
} else if (match_varname(buf, "MSGPENS", 7)) {
extern int amii_msgAPen, amii_msgBPen;
char *t = strtok(bufp, ",/");
if (t) {
sscanf(t, "%d", &amii_msgAPen);
if (t = strtok((char *) 0, ",/"))
sscanf(t, "%d", &amii_msgBPen);
}
} else if (match_varname(buf, "TEXTPENS", 8)) {
extern int amii_textAPen, amii_textBPen;
char *t = strtok(bufp, ",/");
if (t) {
sscanf(t, "%d", &amii_textAPen);
if (t = strtok((char *) 0, ",/"))
sscanf(t, "%d", &amii_textBPen);
}
} else if (match_varname(buf, "MENUPENS", 8)) {
extern int amii_menuAPen, amii_menuBPen;
char *t = strtok(bufp, ",/");
if (t) {
sscanf(t, "%d", &amii_menuAPen);
if (t = strtok((char *) 0, ",/"))
sscanf(t, "%d", &amii_menuBPen);
}
} else if (match_varname(buf, "STATUSPENS", 10)) {
extern int amii_statAPen, amii_statBPen;
char *t = strtok(bufp, ",/");
if (t) {
sscanf(t, "%d", &amii_statAPen);
if (t = strtok((char *) 0, ",/"))
sscanf(t, "%d", &amii_statBPen);
}
} else if (match_varname(buf, "OTHERPENS", 9)) {
extern int amii_otherAPen, amii_otherBPen;
char *t = strtok(bufp, ",/");
if (t) {
sscanf(t, "%d", &amii_otherAPen);
if (t = strtok((char *) 0, ",/"))
sscanf(t, "%d", &amii_otherBPen);
}
} else if (match_varname(buf, "PENS", 4)) {
extern unsigned short amii_init_map[AMII_MAXCOLORS];
int i;
char *t;
for (i = 0, t = strtok(bufp, ",/");
i < AMII_MAXCOLORS && t != (char *) 0;
t = strtok((char *) 0, ",/"), ++i) {
sscanf(t, "%hx", &amii_init_map[i]);
}
amii_setpens(amii_numcolors = i);
} else if (match_varname(buf, "FGPENS", 6)) {
extern int foreg[AMII_MAXCOLORS];
int i;
char *t;
for (i = 0, t = strtok(bufp, ",/");
i < AMII_MAXCOLORS && t != (char *) 0;
t = strtok((char *) 0, ",/"), ++i) {
sscanf(t, "%d", &foreg[i]);
}
} else if (match_varname(buf, "BGPENS", 6)) {
extern int backg[AMII_MAXCOLORS];
int i;
char *t;
for (i = 0, t = strtok(bufp, ",/");
i < AMII_MAXCOLORS && t != (char *) 0;
t = strtok((char *) 0, ",/"), ++i) {
sscanf(t, "%d", &backg[i]);
}
#endif /*AMIGA*/
#ifdef USER_SOUNDS
} else if (match_varname(buf, "SOUNDDIR", 8)) {
sounddir = dupstr(bufp);
} else if (match_varname(buf, "SOUND", 5)) {
add_sound_mapping(bufp);
#endif
} else if (match_varname(buf, "QT_TILEWIDTH", 12)) {
#ifdef QT_GRAPHICS
extern char *qt_tilewidth;
if (qt_tilewidth == NULL)
qt_tilewidth = dupstr(bufp);
#endif
} else if (match_varname(buf, "QT_TILEHEIGHT", 13)) {
#ifdef QT_GRAPHICS
extern char *qt_tileheight;
if (qt_tileheight == NULL)
qt_tileheight = dupstr(bufp);
#endif
} else if (match_varname(buf, "QT_FONTSIZE", 11)) {
#ifdef QT_GRAPHICS
extern char *qt_fontsize;
if (qt_fontsize == NULL)
qt_fontsize = dupstr(bufp);
#endif
} else if (match_varname(buf, "QT_COMPACT", 10)) {
#ifdef QT_GRAPHICS
extern int qt_compact_mode;
qt_compact_mode = atoi(bufp);
#endif
} else {
config_error_add("Unknown config statement");
return FALSE;
}
return retval;
}
#ifdef USER_SOUNDS
boolean
can_read_file(filename)
const char *filename;
{
return (boolean) (access(filename, 4) == 0);
}
#endif /* USER_SOUNDS */
struct _config_error_frame {
int line_num;
int num_errors;
boolean origline_shown;
boolean fromfile;
boolean secure;
char origline[4 * BUFSZ];
char source[BUFSZ];
struct _config_error_frame *next;
};
static struct _config_error_frame *config_error_data = 0;
void
config_error_init(from_file, sourcename, secure)
boolean from_file;
const char *sourcename;
boolean secure;
{
struct _config_error_frame *tmp = (struct _config_error_frame *)
alloc(sizeof (struct _config_error_frame));
tmp->line_num = 0;
tmp->num_errors = 0;
tmp->origline_shown = FALSE;
tmp->fromfile = from_file;
tmp->secure = secure;
tmp->origline[0] = '\0';
if (sourcename && sourcename[0]) {
(void) strncpy(tmp->source, sourcename, sizeof (tmp->source) - 1);
tmp->source[sizeof (tmp->source) - 1] = '\0';
} else
tmp->source[0] = '\0';
tmp->next = config_error_data;
config_error_data = tmp;
}
STATIC_OVL boolean
config_error_nextline(line)
const char *line;
{
struct _config_error_frame *ced = config_error_data;
if (!ced)
return FALSE;
if (ced->num_errors && ced->secure)
return FALSE;
ced->line_num++;
ced->origline_shown = FALSE;
if (line && line[0]) {
(void) strncpy(ced->origline, line, sizeof (ced->origline) - 1);
ced->origline[sizeof (ced->origline) - 1] = '\0';
} else
ced->origline[0] = '\0';
return TRUE;
}
/* varargs 'config_error_add()' moved to pline.c */
void
config_erradd(buf)
const char *buf;
{
char lineno[QBUFSZ];
if (!buf || !*buf)
buf = "Unknown error";
if (!config_error_data) {
/* either very early, where pline() will use raw_print(), or
player gave bad value when prompted by interactive 'O' command */
pline("%s%s.", !iflags.window_inited ? "config_error_add: " : "", buf);
wait_synch();
return;
}
config_error_data->num_errors++;
if (!config_error_data->origline_shown && !config_error_data->secure) {
pline("\n%s", config_error_data->origline);
config_error_data->origline_shown = TRUE;
}
if (config_error_data->line_num > 0 && !config_error_data->secure) {
Sprintf(lineno, "Line %d: ", config_error_data->line_num);
} else
lineno[0] = '\0';
pline("%s %s%s.", config_error_data->secure ? "Error:" : " *",
lineno, buf);
}
int
config_error_done()
{
int n;
struct _config_error_frame *tmp = config_error_data;
if (!config_error_data)
return 0;
n = config_error_data->num_errors;
if (n) {
pline("\n%d error%s in %s.\n", n,
(n > 1) ? "s" : "",
*config_error_data->source
? config_error_data->source : configfile);
wait_synch();
}
config_error_data = tmp->next;
free(tmp);
return n;
}
boolean
read_config_file(filename, src)
const char *filename;
int src;
{
FILE *fp;
boolean rv = TRUE;
if (!(fp = fopen_config_file(filename, src)))
return FALSE;
/* begin detection of duplicate configfile options */
set_duplicate_opt_detection(1);
free_config_sections();
iflags.parse_config_file_src = src;
rv = parse_conf_file(fp, parse_config_line);
(void) fclose(fp);
free_config_sections();
/* turn off detection of duplicate configfile options */
set_duplicate_opt_detection(0);
return rv;
}
STATIC_OVL FILE *
fopen_wizkit_file()
{
FILE *fp;
#if defined(VMS) || defined(UNIX)
char tmp_wizkit[BUFSZ];
#endif
char *envp;
envp = nh_getenv("WIZKIT");
if (envp && *envp)
(void) strncpy(wizkit, envp, WIZKIT_MAX - 1);
if (!wizkit[0])
return (FILE *) 0;
#ifdef UNIX
if (access(wizkit, 4) == -1) {
/* 4 is R_OK on newer systems */
/* nasty sneaky attempt to read file through
* NetHack's setuid permissions -- this is a
* place a file name may be wholly under the player's
* control
*/
raw_printf("Access to %s denied (%d).", wizkit, errno);
wait_synch();
/* fall through to standard names */
} else
#endif
if ((fp = fopenp(wizkit, "r")) != (FILE *) 0) {
return fp;
#if defined(UNIX) || defined(VMS)
} else {
/* access() above probably caught most problems for UNIX */
raw_printf("Couldn't open requested config file %s (%d).", wizkit,
errno);
wait_synch();
#endif
}
#if defined(MICRO) || defined(MAC) || defined(__BEOS__) || defined(WIN32)
if ((fp = fopenp(fqname(wizkit, CONFIGPREFIX, 0), "r")) != (FILE *) 0)
return fp;
#else
#ifdef VMS
envp = nh_getenv("HOME");
if (envp)
Sprintf(tmp_wizkit, "%s%s", envp, wizkit);
else
Sprintf(tmp_wizkit, "%s%s", "sys$login:", wizkit);
if ((fp = fopenp(tmp_wizkit, "r")) != (FILE *) 0)
return fp;
#else /* should be only UNIX left */
envp = nh_getenv("HOME");
if (envp)
Sprintf(tmp_wizkit, "%s/%s", envp, wizkit);
else
Strcpy(tmp_wizkit, wizkit);
if ((fp = fopenp(tmp_wizkit, "r")) != (FILE *) 0)
return fp;
else if (errno != ENOENT) {
/* e.g., problems when setuid NetHack can't search home
* directory restricted to user */
raw_printf("Couldn't open default wizkit file %s (%d).", tmp_wizkit,
errno);
wait_synch();
}
#endif
#endif
return (FILE *) 0;
}
/* add to hero's inventory if there's room, otherwise put item on floor */
STATIC_DCL void
wizkit_addinv(obj)
struct obj *obj;
{
if (!obj || obj == &zeroobj)
return;
/* subset of starting inventory pre-ID */
obj->dknown = 1;
if (Role_if(PM_PRIEST))
obj->bknown = 1; /* ok to bypass set_bknown() */
/* same criteria as lift_object()'s check for available inventory slot */
if (obj->oclass != COIN_CLASS && inv_cnt(FALSE) >= 52
&& !merge_choice(invent, obj)) {
/* inventory overflow; can't just place & stack object since
hero isn't in position yet, so schedule for arrival later */
add_to_migration(obj);
obj->ox = 0; /* index of main dungeon */
obj->oy = 1; /* starting level number */
obj->owornmask =
(long) (MIGR_WITH_HERO | MIGR_NOBREAK | MIGR_NOSCATTER);
} else {
(void) addinv(obj);
}
}
boolean
proc_wizkit_line(buf)
char *buf;
{
struct obj *otmp;
if (strlen(buf) >= BUFSZ)
buf[BUFSZ - 1] = '\0';
otmp = readobjnam(buf, (struct obj *) 0);
if (otmp) {
if (otmp != &zeroobj)
wizkit_addinv(otmp);
} else {
/* .60 limits output line width to 79 chars */
config_error_add("Bad wizkit item: \"%.60s\"", buf);
return FALSE;
}
return TRUE;
}
void
read_wizkit()
{
FILE *fp;
if (!wizard || !(fp = fopen_wizkit_file()))
return;
program_state.wizkit_wishing = 1;
config_error_init(TRUE, "WIZKIT", FALSE);
parse_conf_file(fp, proc_wizkit_line);
(void) fclose(fp);
config_error_done();
program_state.wizkit_wishing = 0;
return;
}
/* parse_conf_file
*
* Read from file fp, handling comments, empty lines, config sections,
* CHOOSE, and line continuation, calling proc for every valid line.
*
* Continued lines are merged together with one space in between.
*/
STATIC_OVL boolean
parse_conf_file(fp, proc)
FILE *fp;
boolean FDECL((*proc), (char *));
{
char inbuf[4 * BUFSZ];
boolean rv = TRUE; /* assume successful parse */
char *ep;
boolean skip = FALSE, morelines = FALSE;
char *buf = (char *) 0;
size_t inbufsz = sizeof inbuf;
free_config_sections();
while (fgets(inbuf, (int) inbufsz, fp)) {
ep = index(inbuf, '\n');
if (skip) { /* in case previous line was too long */
if (ep)
skip = FALSE; /* found newline; next line is normal */
} else {
if (!ep) { /* newline missing */
if (strlen(inbuf) < (inbufsz - 2)) {
/* likely the last line of file is just
missing a newline; process it anyway */
ep = eos(inbuf);
} else {
config_error_add("Line too long, skipping");
skip = TRUE; /* discard next fgets */
}
} else {
*ep = '\0'; /* remove newline */
}
if (ep) {
char *tmpbuf = (char *) 0;
int len;
boolean ignoreline = FALSE;
boolean oldline = FALSE;
/* line continuation (trailing '\') */
morelines = (--ep >= inbuf && *ep == '\\');
if (morelines)
*ep = '\0';
/* trim off spaces at end of line */
while (ep >= inbuf
&& (*ep == ' ' || *ep == '\t' || *ep == '\r'))
*ep-- = '\0';
if (!config_error_nextline(inbuf)) {
rv = FALSE;
if (buf)
free(buf), buf = (char *) 0;
break;
}
ep = inbuf;
while (*ep == ' ' || *ep == '\t')
++ep;
/* ignore empty lines and full-line comment lines */
if (!*ep || *ep == '#')
ignoreline = TRUE;
if (buf)
oldline = TRUE;
/* merge now read line with previous ones, if necessary */
if (!ignoreline) {
len = (int) strlen(ep) + 1; /* +1: final '\0' */
if (buf)
len += (int) strlen(buf) + 1; /* +1: space */
tmpbuf = (char *) alloc(len);
*tmpbuf = '\0';
if (buf) {
Strcat(strcpy(tmpbuf, buf), " ");
free(buf);
}
buf = strcat(tmpbuf, ep);
if (strlen(buf) >= sizeof inbuf)
buf[sizeof inbuf - 1] = '\0';
}
if (morelines || (ignoreline && !oldline))
continue;
if (handle_config_section(buf)) {
free(buf);
buf = (char *) 0;
continue;
}
/* from here onwards, we'll handle buf only */
if (match_varname(buf, "CHOOSE", 6)) {
char *section;
char *bufp = find_optparam(buf);
if (!bufp) {
config_error_add(
"Format is CHOOSE=section1,section2,...");
rv = FALSE;
free(buf);
buf = (char *) 0;
continue;
}
bufp++;
if (config_section_chosen)
free(config_section_chosen), config_section_chosen = 0;
section = choose_random_part(bufp, ',');
if (section) {
config_section_chosen = dupstr(section);
} else {
config_error_add("No config section to choose");
rv = FALSE;
}
free(buf);
buf = (char *) 0;
continue;
}
if (!proc(buf))
rv = FALSE;
free(buf);
buf = (char *) 0;
}
}
}
if (buf)
free(buf);
free_config_sections();
return rv;
}
extern struct symsetentry *symset_list; /* options.c */
extern const char *known_handling[]; /* drawing.c */
extern const char *known_restrictions[]; /* drawing.c */
static int symset_count = 0; /* for pick-list building only */
static boolean chosen_symset_start = FALSE, chosen_symset_end = FALSE;
static int symset_which_set = 0;
STATIC_OVL
FILE *
fopen_sym_file()
{
FILE *fp;
fp = fopen_datafile(SYMBOLS, "r",
#ifdef WIN32
SYSCONFPREFIX
#else
HACKPREFIX
#endif
);
return fp;
}
/*
* Returns 1 if the chose symset was found and loaded.
* 0 if it wasn't found in the sym file or other problem.
*/
int
read_sym_file(which_set)
int which_set;
{
FILE *fp;
symset[which_set].explicitly = FALSE;
if (!(fp = fopen_sym_file()))
return 0;
symset[which_set].explicitly = TRUE;
symset_count = 0;
chosen_symset_start = chosen_symset_end = FALSE;
symset_which_set = which_set;
config_error_init(TRUE, "symbols", FALSE);
parse_conf_file(fp, proc_symset_line);
(void) fclose(fp);
if (!chosen_symset_start && !chosen_symset_end) {
/* name caller put in symset[which_set].name was not found;
if it looks like "Default symbols", null it out and return
success to use the default; otherwise, return failure */
if (symset[which_set].name
&& (fuzzymatch(symset[which_set].name, "Default symbols",
" -_", TRUE)
|| !strcmpi(symset[which_set].name, "default")))
clear_symsetentry(which_set, TRUE);
config_error_done();
/* If name was defined, it was invalid... Then we're loading fallback */
if (symset[which_set].name) {
symset[which_set].explicitly = FALSE;
return 0;
}
return 1;
}
if (!chosen_symset_end)
config_error_add("Missing finish for symset \"%s\"",
symset[which_set].name ? symset[which_set].name
: "unknown");
config_error_done();
return 1;
}
boolean
proc_symset_line(buf)
char *buf;
{
return !((boolean) parse_sym_line(buf, symset_which_set));
}
/* returns 0 on error */
int
parse_sym_line(buf, which_set)
char *buf;
int which_set;
{
int val, i;
struct symparse *symp;
char *bufp, *commentp, *altp;
if (strlen(buf) >= BUFSZ)
buf[BUFSZ - 1] = '\0';
/* convert each instance of whitespace (tabs, consecutive spaces)
into a single space; leading and trailing spaces are stripped */
mungspaces(buf);
/* remove trailing comment, if any (this isn't strictly needed for
individual symbols, and it won't matter if "X#comment" without
separating space slips through; for handling or set description,
symbol set creator is responsible for preceding '#' with a space
and that comment itself doesn't contain " #") */
if ((commentp = rindex(buf, '#')) != 0 && commentp[-1] == ' ')
commentp[-1] = '\0';
/* find the '=' or ':' */
bufp = index(buf, '=');
altp = index(buf, ':');
if (!bufp || (altp && altp < bufp))
bufp = altp;
if (!bufp) {
if (strncmpi(buf, "finish", 6) == 0) {
/* end current graphics set */
if (chosen_symset_start)
chosen_symset_end = TRUE;
chosen_symset_start = FALSE;
return 1;
}
config_error_add("No \"finish\"");
return 0;
}
/* skip '=' and space which follows, if any */
++bufp;
if (*bufp == ' ')
++bufp;
symp = match_sym(buf);
if (!symp) {
config_error_add("Unknown sym keyword");
return 0;
}
if (!symset[which_set].name) {
/* A null symset name indicates that we're just
building a pick-list of possible symset
values from the file, so only do that */
if (symp->range == SYM_CONTROL) {
struct symsetentry *tmpsp, *lastsp;
for (lastsp = symset_list; lastsp; lastsp = lastsp->next)
if (!lastsp->next)
break;
switch (symp->idx) {
case 0:
tmpsp = (struct symsetentry *) alloc(sizeof *tmpsp);
tmpsp->next = (struct symsetentry *) 0;
if (!lastsp)
symset_list = tmpsp;
else
lastsp->next = tmpsp;
tmpsp->idx = symset_count++;
tmpsp->name = dupstr(bufp);
tmpsp->desc = (char *) 0;
tmpsp->handling = H_UNK;
/* initialize restriction bits */
tmpsp->nocolor = 0;
tmpsp->primary = 0;
tmpsp->rogue = 0;
break;
case 2:
/* handler type identified */
tmpsp = lastsp; /* most recent symset */
for (i = 0; known_handling[i]; ++i)
if (!strcmpi(known_handling[i], bufp)) {
tmpsp->handling = i;
break; /* for loop */
}
break;
case 3:
/* description:something */
tmpsp = lastsp; /* most recent symset */
if (tmpsp && !tmpsp->desc)
tmpsp->desc = dupstr(bufp);
break;
case 5:
/* restrictions: xxxx*/
tmpsp = lastsp; /* most recent symset */
for (i = 0; known_restrictions[i]; ++i) {
if (!strcmpi(known_restrictions[i], bufp)) {
switch (i) {
case 0:
tmpsp->primary = 1;
break;
case 1:
tmpsp->rogue = 1;
break;
}
break; /* while loop */
}
}
break;
}
}
return 1;
}
if (symp->range) {
if (symp->range == SYM_CONTROL) {
switch (symp->idx) {
case 0:
/* start of symset */
if (!strcmpi(bufp, symset[which_set].name)) {
/* matches desired one */
chosen_symset_start = TRUE;
/* these init_*() functions clear symset fields too */
if (which_set == ROGUESET)
init_rogue_symbols();
else if (which_set == PRIMARY)
init_primary_symbols();
}
break;
case 1:
/* finish symset */
if (chosen_symset_start)
chosen_symset_end = TRUE;
chosen_symset_start = FALSE;
break;
case 2:
/* handler type identified */
if (chosen_symset_start)
set_symhandling(bufp, which_set);
break;
/* case 3: (description) is ignored here */
case 4: /* color:off */
if (chosen_symset_start) {
if (bufp) {
if (!strcmpi(bufp, "true") || !strcmpi(bufp, "yes")
|| !strcmpi(bufp, "on"))
symset[which_set].nocolor = 0;
else if (!strcmpi(bufp, "false")
|| !strcmpi(bufp, "no")
|| !strcmpi(bufp, "off"))
symset[which_set].nocolor = 1;
}
}
break;
case 5: /* restrictions: xxxx*/
if (chosen_symset_start) {
int n = 0;
while (known_restrictions[n]) {
if (!strcmpi(known_restrictions[n], bufp)) {
switch (n) {
case 0:
symset[which_set].primary = 1;
break;
case 1:
symset[which_set].rogue = 1;
break;
}
break; /* while loop */
}
n++;
}
}
break;
}
} else { /* !SYM_CONTROL */
val = sym_val(bufp);
if (chosen_symset_start) {
if (which_set == PRIMARY) {
update_primary_symset(symp, val);
} else if (which_set == ROGUESET) {
update_rogue_symset(symp, val);
}
}
}
}
return 1;
}
STATIC_OVL void
set_symhandling(handling, which_set)
char *handling;
int which_set;
{
int i = 0;
symset[which_set].handling = H_UNK;
while (known_handling[i]) {
if (!strcmpi(known_handling[i], handling)) {
symset[which_set].handling = i;
return;
}
i++;
}
}
/* ---------- END CONFIG FILE HANDLING ----------- */
/* ---------- BEGIN SCOREBOARD CREATION ----------- */
#ifdef OS2_CODEVIEW
#define UNUSED_if_not_OS2_CODEVIEW /*empty*/
#else
#define UNUSED_if_not_OS2_CODEVIEW UNUSED
#endif
/* verify that we can write to scoreboard file; if not, try to create one */
/*ARGUSED*/
void
check_recordfile(dir)
const char *dir UNUSED_if_not_OS2_CODEVIEW;
{
#if defined(PRAGMA_UNUSED) && !defined(OS2_CODEVIEW)
#pragma unused(dir)
#endif
const char *fq_record;
int fd;
#if defined(UNIX) || defined(VMS)
fq_record = fqname(RECORD, SCOREPREFIX, 0);
fd = open(fq_record, O_RDWR, 0);
if (fd >= 0) {
#ifdef VMS /* must be stream-lf to use UPDATE_RECORD_IN_PLACE */
if (!file_is_stmlf(fd)) {
raw_printf(
"Warning: scoreboard file '%s' is not in stream_lf format",
fq_record);
wait_synch();
}
#endif
(void) nhclose(fd); /* RECORD is accessible */
} else if ((fd = open(fq_record, O_CREAT | O_RDWR, FCMASK)) >= 0) {
(void) nhclose(fd); /* RECORD newly created */
#if defined(VMS) && !defined(SECURE)
/* Re-protect RECORD with world:read+write+execute+delete access. */
(void) chmod(fq_record, FCMASK | 007);
#endif /* VMS && !SECURE */
} else {
raw_printf("Warning: cannot write scoreboard file '%s'", fq_record);
wait_synch();
}
#endif /* !UNIX && !VMS */
#if defined(MICRO) || defined(WIN32)
char tmp[PATHLEN];
#ifdef OS2_CODEVIEW /* explicit path on opening for OS/2 */
/* how does this work when there isn't an explicit path or fopenp
* for later access to the file via fopen_datafile? ? */
(void) strncpy(tmp, dir, PATHLEN - 1);
tmp[PATHLEN - 1] = '\0';
if ((strlen(tmp) + 1 + strlen(RECORD)) < (PATHLEN - 1)) {
append_slash(tmp);
Strcat(tmp, RECORD);
}
fq_record = tmp;
#else
Strcpy(tmp, RECORD);
fq_record = fqname(RECORD, SCOREPREFIX, 0);
#endif
#ifdef WIN32
/* If dir is NULL it indicates create but
only if it doesn't already exist */
if (!dir) {
char buf[BUFSZ];
buf[0] = '\0';
fd = open(fq_record, O_RDWR);
if (!(fd == -1 && errno == ENOENT)) {
if (fd >= 0) {
(void) nhclose(fd);
} else {
/* explanation for failure other than missing file */
Sprintf(buf, "error \"%s\", (errno %d).",
fq_record, errno);
paniclog("scorefile", buf);
}
return;
}
Sprintf(buf, "missing \"%s\", creating new scorefile.",
fq_record);
paniclog("scorefile", buf);
}
#endif
if ((fd = open(fq_record, O_RDWR)) < 0) {
/* try to create empty 'record' */
#if defined(AZTEC_C) || defined(_DCC) \
|| (defined(__GNUC__) && defined(__AMIGA__))
/* Aztec doesn't use the third argument */
/* DICE doesn't like it */
fd = open(fq_record, O_CREAT | O_RDWR);
#else
fd = open(fq_record, O_CREAT | O_RDWR, S_IREAD | S_IWRITE);
#endif
if (fd <= 0) {
raw_printf("Warning: cannot write record '%s'", tmp);
wait_synch();
} else {
(void) nhclose(fd);
}
} else {
/* open succeeded => 'record' exists */
(void) nhclose(fd);
}
#else /* MICRO || WIN32*/
#ifdef MAC
/* Create the "record" file, if necessary */
fq_record = fqname(RECORD, SCOREPREFIX, 0);
fd = macopen(fq_record, O_RDWR | O_CREAT, TEXT_TYPE);
if (fd != -1)
macclose(fd);
#endif /* MAC */
#endif /* MICRO || WIN32*/
}
/* ---------- END SCOREBOARD CREATION ----------- */
/* ---------- BEGIN PANIC/IMPOSSIBLE/TESTING LOG ----------- */
/*ARGSUSED*/
void
paniclog(type, reason)
const char *type; /* panic, impossible, trickery */
const char *reason; /* explanation */
{
#ifdef PANICLOG
FILE *lfile;
char buf[BUFSZ];
if (!program_state.in_paniclog) {
program_state.in_paniclog = 1;
lfile = fopen_datafile(PANICLOG, "a", TROUBLEPREFIX);
if (lfile) {
#ifdef PANICLOG_FMT2
(void) fprintf(lfile, "%ld %s: %s %s\n",
ubirthday, (plname ? plname : "(none)"),
type, reason);
#else
time_t now = getnow();
int uid = getuid();
char playmode = wizard ? 'D' : discover ? 'X' : '-';
(void) fprintf(lfile, "%s %08ld %06ld %d %c: %s %s\n",
version_string(buf), yyyymmdd(now), hhmmss(now),
uid, playmode, type, reason);
#endif /* !PANICLOG_FMT2 */
(void) fclose(lfile);
}
program_state.in_paniclog = 0;
}
#endif /* PANICLOG */
return;
}
void
testinglog(filenm, type, reason)
const char *filenm; /* ad hoc file name */
const char *type;
const char *reason; /* explanation */
{
FILE *lfile;
char fnbuf[BUFSZ];
if (!filenm)
return;
Strcpy(fnbuf, filenm);
if (index(fnbuf, '.') == 0)
Strcat(fnbuf, ".log");
lfile = fopen_datafile(fnbuf, "a", TROUBLEPREFIX);
if (lfile) {
(void) fprintf(lfile, "%s\n%s\n", type, reason);
(void) fclose(lfile);
}
return;
}
/* ---------- END PANIC/IMPOSSIBLE/TESTING LOG ----------- */
#ifdef SELF_RECOVER
/* ---------- BEGIN INTERNAL RECOVER ----------- */
boolean
recover_savefile()
{
int gfd, lfd, sfd;
int lev, savelev, hpid, pltmpsiz;
xchar levc;
struct version_info version_data;
int processed[256];
char savename[SAVESIZE], errbuf[BUFSZ];
struct savefile_info sfi;
char tmpplbuf[PL_NSIZ];
for (lev = 0; lev < 256; lev++)
processed[lev] = 0;
/* level 0 file contains:
* pid of creating process (ignored here)
* level number for current level of save file
* name of save file nethack would have created
* savefile info
* player name
* and game state
*/
gfd = open_levelfile(0, errbuf);
if (gfd < 0) {
raw_printf("%s\n", errbuf);
return FALSE;
}
if (read(gfd, (genericptr_t) &hpid, sizeof hpid) != sizeof hpid) {
raw_printf("\n%s\n%s\n",
"Checkpoint data incompletely written or subsequently clobbered.",
"Recovery impossible.");
(void) nhclose(gfd);
return FALSE;
}
if (read(gfd, (genericptr_t) &savelev, sizeof(savelev))
!= sizeof(savelev)) {
raw_printf(
"\nCheckpointing was not in effect for %s -- recovery impossible.\n",
lock);
(void) nhclose(gfd);
return FALSE;
}
if ((read(gfd, (genericptr_t) savename, sizeof savename)
!= sizeof savename)
|| (read(gfd, (genericptr_t) &version_data, sizeof version_data)
!= sizeof version_data)
|| (read(gfd, (genericptr_t) &sfi, sizeof sfi) != sizeof sfi)
|| (read(gfd, (genericptr_t) &pltmpsiz, sizeof pltmpsiz)
!= sizeof pltmpsiz) || (pltmpsiz > PL_NSIZ)
|| (read(gfd, (genericptr_t) &tmpplbuf, pltmpsiz) != pltmpsiz)) {
raw_printf("\nError reading %s -- can't recover.\n", lock);
(void) nhclose(gfd);
return FALSE;
}
/* save file should contain:
* version info
* savefile info
* player name
* current level (including pets)
* (non-level-based) game state
* other levels
*/
set_savefile_name(TRUE);
sfd = create_savefile();
if (sfd < 0) {
raw_printf("\nCannot recover savefile %s.\n", SAVEF);
(void) nhclose(gfd);
return FALSE;
}
lfd = open_levelfile(savelev, errbuf);
if (lfd < 0) {
raw_printf("\n%s\n", errbuf);
(void) nhclose(gfd);
(void) nhclose(sfd);
delete_savefile();
return FALSE;
}
if (write(sfd, (genericptr_t) &version_data, sizeof version_data)
!= sizeof version_data) {
raw_printf("\nError writing %s; recovery failed.", SAVEF);
(void) nhclose(gfd);
(void) nhclose(sfd);
(void) nhclose(lfd);
delete_savefile();
return FALSE;
}
if (write(sfd, (genericptr_t) &sfi, sizeof sfi) != sizeof sfi) {
raw_printf("\nError writing %s; recovery failed (savefile_info).\n",
SAVEF);
(void) nhclose(gfd);
(void) nhclose(sfd);
(void) nhclose(lfd);
delete_savefile();
return FALSE;
}
if (write(sfd, (genericptr_t) &pltmpsiz, sizeof pltmpsiz)
!= sizeof pltmpsiz) {
raw_printf("Error writing %s; recovery failed (player name size).\n",
SAVEF);
(void) nhclose(gfd);
(void) nhclose(sfd);
(void) nhclose(lfd);
delete_savefile();
return FALSE;
}
if (write(sfd, (genericptr_t) &tmpplbuf, pltmpsiz) != pltmpsiz) {
raw_printf("Error writing %s; recovery failed (player name).\n",
SAVEF);
(void) nhclose(gfd);
(void) nhclose(sfd);
(void) nhclose(lfd);
delete_savefile();
return FALSE;
}
if (!copy_bytes(lfd, sfd)) {
(void) nhclose(gfd);
(void) nhclose(sfd);
(void) nhclose(lfd);
delete_savefile();
return FALSE;
}
(void) nhclose(lfd);
processed[savelev] = 1;
if (!copy_bytes(gfd, sfd)) {
(void) nhclose(gfd);
(void) nhclose(sfd);
delete_savefile();
return FALSE;
}
(void) nhclose(gfd);
processed[0] = 1;
for (lev = 1; lev < 256; lev++) {
/* level numbers are kept in xchars in save.c, so the
* maximum level number (for the endlevel) must be < 256
*/
if (lev != savelev) {
lfd = open_levelfile(lev, (char *) 0);
if (lfd >= 0) {
/* any or all of these may not exist */
levc = (xchar) lev;
write(sfd, (genericptr_t) &levc, sizeof(levc));
if (!copy_bytes(lfd, sfd)) {
(void) nhclose(lfd);
(void) nhclose(sfd);
delete_savefile();
return FALSE;
}
(void) nhclose(lfd);
processed[lev] = 1;
}
}
}
(void) nhclose(sfd);
#ifdef HOLD_LOCKFILE_OPEN
really_close();
#endif
/*
* We have a successful savefile!
* Only now do we erase the level files.
*/
for (lev = 0; lev < 256; lev++) {
if (processed[lev]) {
const char *fq_lock;
set_levelfile_name(lock, lev);
fq_lock = fqname(lock, LEVELPREFIX, 3);
(void) unlink(fq_lock);
}
}
return TRUE;
}
boolean
copy_bytes(ifd, ofd)
int ifd, ofd;
{
char buf[BUFSIZ];
int nfrom, nto;
do {
nfrom = read(ifd, buf, BUFSIZ);
nto = write(ofd, buf, nfrom);
if (nto != nfrom)
return FALSE;
} while (nfrom == BUFSIZ);
return TRUE;
}
/* ---------- END INTERNAL RECOVER ----------- */
#endif /*SELF_RECOVER*/
/* ---------- OTHER ----------- */
#ifdef SYSCF
#ifdef SYSCF_FILE
void
assure_syscf_file()
{
int fd;
#ifdef WIN32
/* We are checking that the sysconf exists ... lock the path */
fqn_prefix_locked[SYSCONFPREFIX] = TRUE;
#endif
/*
* All we really care about is the end result - can we read the file?
* So just check that directly.
*
* Not tested on most of the old platforms (which don't attempt
* to implement SYSCF).
* Some ports don't like open()'s optional third argument;
* VMS overrides open() usage with a macro which requires it.
*/
#ifndef VMS
# if defined(NOCWD_ASSUMPTIONS) && defined(WIN32)
fd = open(fqname(SYSCF_FILE, SYSCONFPREFIX, 0), O_RDONLY);
# else
fd = open(SYSCF_FILE, O_RDONLY);
# endif
#else
fd = open(SYSCF_FILE, O_RDONLY, 0);
#endif
if (fd >= 0) {
/* readable */
close(fd);
return;
}
raw_printf("Unable to open SYSCF_FILE.\n");
exit(EXIT_FAILURE);
}
#endif /* SYSCF_FILE */
#endif /* SYSCF */
#ifdef DEBUG
/* used by debugpline() to decide whether to issue a message
* from a particular source file; caller passes __FILE__ and we check
* whether it is in the source file list supplied by SYSCF's DEBUGFILES
*
* pass FALSE to override wildcard matching; useful for files
* like dungeon.c and questpgr.c, which generate a ridiculous amount of
* output if DEBUG is defined and effectively block the use of a wildcard */
boolean
debugcore(filename, wildcards)
const char *filename;
boolean wildcards;
{
const char *debugfiles, *p;
if (!filename || !*filename)
return FALSE; /* sanity precaution */
if (sysopt.env_dbgfl == 0) {
/* check once for DEBUGFILES in the environment;
if found, it supersedes the sysconf value
[note: getenv() rather than nh_getenv() since a long value
is valid and doesn't pose any sort of overflow risk here] */
if ((p = getenv("DEBUGFILES")) != 0) {
if (sysopt.debugfiles)
free((genericptr_t) sysopt.debugfiles);
sysopt.debugfiles = dupstr(p);
sysopt.env_dbgfl = 1;
} else
sysopt.env_dbgfl = -1;
}
debugfiles = sysopt.debugfiles;
/* usual case: sysopt.debugfiles will be empty */
if (!debugfiles || !*debugfiles)
return FALSE;
/* strip filename's path if present */
#ifdef UNIX
if ((p = rindex(filename, '/')) != 0)
filename = p + 1;
#endif
#ifdef VMS
filename = vms_basename(filename);
/* vms_basename strips off 'type' suffix as well as path and version;
we want to put suffix back (".c" assumed); since it always returns
a pointer to a static buffer, we can safely modify its result */
Strcat((char *) filename, ".c");
#endif
/*
* Wildcard match will only work if there's a single pattern (which
* might be a single file name without any wildcarding) rather than
* a space-separated list.
* [to NOT do: We could step through the space-separated list and
* attempt a wildcard match against each element, but that would be
* overkill for the intended usage.]
*/
if (wildcards && pmatch(debugfiles, filename))
return TRUE;
/* check whether filename is an element of the list */
if ((p = strstr(debugfiles, filename)) != 0) {
int l = (int) strlen(filename);
if ((p == debugfiles || p[-1] == ' ' || p[-1] == '/')
&& (p[l] == ' ' || p[l] == '\0'))
return TRUE;
}
return FALSE;
}
#endif /*DEBUG*/
#ifdef UNIX
#ifndef PATH_MAX
#include <limits.h>
#endif
#endif
void
reveal_paths(VOID_ARGS)
{
const char *fqn, *nodumpreason;
char buf[BUFSZ];
#if defined(SYSCF) || !defined(UNIX) || defined(DLB)
const char *filep;
#ifdef SYSCF
const char *gamename = (hname && *hname) ? hname : "NetHack";
#endif
#endif
#ifdef UNIX
char *endp, *envp, cwdbuf[PATH_MAX];
#endif
#ifdef PREFIXES_IN_USE
const char *strp;
int i, maxlen = 0;
raw_print("Variable playground locations:");
for (i = 0; i < PREFIX_COUNT; i++)
raw_printf(" [%-10s]=\"%s\"", fqn_prefix_names[i],
fqn_prefix[i] ? fqn_prefix[i] : "not set");
#endif
/* sysconf file */
#ifdef SYSCF
#ifdef PREFIXES_IN_USE
strp = fqn_prefix_names[SYSCONFPREFIX];
maxlen = BUFSZ - sizeof " (in )";
if (strp && (int) strlen(strp) < maxlen)
Sprintf(buf, " (in %s)", strp);
#else
buf[0] = '\0';
#endif
raw_printf("%s system configuration file%s:", s_suffix(gamename), buf);
#ifdef SYSCF_FILE
filep = SYSCF_FILE;
#else
filep = "sysconf";
#endif
fqn = fqname(filep, SYSCONFPREFIX, 0);
if (fqn) {
set_configfile_name(fqn);
filep = configfile;
}
raw_printf(" \"%s\"", filep);
#else /* !SYSCF */
raw_printf("No system configuration file.");
#endif /* ?SYSCF */
/* symbols file */
buf[0] = '\0';
#ifndef UNIX
#ifdef PREFIXES_IN_USE
#ifdef WIN32
strp = fqn_prefix_names[SYSCONFPREFIX];
#else
strp = fqn_prefix_names[HACKPREFIX];
#endif /* WIN32 */
maxlen = BUFSZ - sizeof " (in )";
if (strp && (int) strlen(strp) < maxlen)
Sprintf(buf, " (in %s)", strp);
#endif /* PREFIXES_IN_USE */
raw_printf("The loadable symbols file%s:", buf);
#endif /* UNIX */
#ifdef UNIX
envp = getcwd(cwdbuf, PATH_MAX);
if (envp) {
raw_print("The loadable symbols file:");
raw_printf(" \"%s/%s\"", envp, SYMBOLS);
}
#else /* UNIX */
filep = SYMBOLS;
#ifdef PREFIXES_IN_USE
#ifdef WIN32
fqn = fqname(filep, SYSCONFPREFIX, 1);
#else
fqn = fqname(filep, HACKPREFIX, 1);
#endif /* WIN32 */
if (fqn)
filep = fqn;
#endif /* PREFIXES_IN_USE */
raw_printf(" \"%s\"", filep);
#endif /* UNIX */
/* dlb vs non-dlb */
buf[0] = '\0';
#ifdef PREFIXES_IN_USE
strp = fqn_prefix_names[DATAPREFIX];
maxlen = BUFSZ - sizeof " (in )";
if (strp && (int) strlen(strp) < maxlen)
Sprintf(buf, " (in %s)", strp);
#endif
#ifdef DLB
raw_printf("Basic data files%s are collected inside:", buf);
filep = DLBFILE;
#ifdef VERSION_IN_DLB_FILENAME
Strcpy(buf, build_dlb_filename((const char *) 0));
#ifdef PREFIXES_IN_USE
fqn = fqname(buf, DATAPREFIX, 1);
if (fqn)
filep = fqn;
#endif /* PREFIXES_IN_USE */
#endif
raw_printf(" \"%s\"", filep);
#ifdef DLBFILE2
filep = DLBFILE2;
raw_printf(" \"%s\"", filep);
#endif
#else /* !DLB */
raw_printf("Basic data files%s are in many separate files.", buf);
#endif /* ?DLB */
/* dumplog */
#ifndef DUMPLOG
nodumpreason = "not supported";
#else
nodumpreason = "disabled";
#ifdef SYSCF
fqn = sysopt.dumplogfile;
#else /* !SYSCF */
#ifdef DUMPLOG_FILE
fqn = DUMPLOG_FILE;
#else
fqn = (char *) 0;
#endif
#endif /* ?SYSCF */
if (fqn && *fqn) {
raw_print("Your end-of-game disclosure file:");
(void) dump_fmtstr(fqn, buf, FALSE);
buf[sizeof buf - sizeof " \"\""] = '\0';
raw_printf(" \"%s\"", buf);
} else
#endif /* ?DUMPLOG */
raw_printf("No end-of-game disclosure file (%s).", nodumpreason);
#ifdef WIN32
if (sysopt.portable_device_top) {
const char *pd = get_portable_device();
raw_printf("Writable folder for portable device config (sysconf %s):",
"portable_device_top");
raw_printf(" \"%s\"", pd);
}
#endif
/* personal configuration file */
buf[0] = '\0';
#ifdef PREFIXES_IN_USE
strp = fqn_prefix_names[CONFIGPREFIX];
maxlen = BUFSZ - sizeof " (in )";
if (strp && (int) strlen(strp) < maxlen)
Sprintf(buf, " (in %s)", strp);
#endif /* PREFIXES_IN_USE */
raw_printf("Your personal configuration file%s:", buf);
#ifdef UNIX
buf[0] = '\0';
if ((envp = nh_getenv("HOME")) != 0) {
copynchars(buf, envp, (int) sizeof buf - 1 - 1);
Strcat(buf, "/");
}
endp = eos(buf);
copynchars(endp, default_configfile,
(int) (sizeof buf - 1 - strlen(buf)));
#if defined(__APPLE__) /* UNIX+__APPLE__ => MacOSX aka OSX aka macOS */
if (envp) {
if (access(buf, 4) == -1) { /* 4: R_OK, -1: failure */
/* read access to default failed; might be protected excessively
but more likely it doesn't exist; try first alternate:
"$HOME/Library/Pref..."; 'endp' points past '/' */
copynchars(endp, "Library/Preferences/NetHack Defaults",
(int) (sizeof buf - 1 - strlen(buf)));
if (access(buf, 4) == -1) {
/* first alternate failed, try second:
".../NetHack Defaults.txt"; no 'endp', just append */
copynchars(eos(buf), ".txt",
(int) (sizeof buf - 1 - strlen(buf)));
if (access(buf, 4) == -1) {
/* second alternate failed too, so revert to the
original default ("$HOME/.nethackrc") for message */
copynchars(endp, default_configfile,
(int) (sizeof buf - 1 - strlen(buf)));
}
}
}
}
#endif /* __APPLE__ */
raw_printf(" \"%s\"", buf);
#else /* !UNIX */
fqn = (const char *) 0;
#ifdef PREFIXES_IN_USE
fqn = fqname(default_configfile, CONFIGPREFIX, 2);
#endif
raw_printf(" \"%s\"", fqn ? fqn : default_configfile);
#endif /* ?UNIX */
raw_print("");
}
/* ---------- BEGIN TRIBUTE ----------- */
/* 3.6 tribute code
*/
#define SECTIONSCOPE 1
#define TITLESCOPE 2
#define PASSAGESCOPE 3
#define MAXPASSAGES SIZE(context.novel.pasg) /* 20 */
static int FDECL(choose_passage, (int, unsigned));
/* choose a random passage that hasn't been chosen yet; once all have
been chosen, reset the tracking to make all passages available again */
static int
choose_passage(passagecnt, oid)
int passagecnt; /* total of available passages */
unsigned oid; /* book.o_id, used to determine whether re-reading same book */
{
int idx, res;
if (passagecnt < 1)
return 0;
/* if a different book or we've used up all the passages already,
reset in order to have all 'passagecnt' passages available */
if (oid != context.novel.id || context.novel.count == 0) {
int i, range = passagecnt, limit = MAXPASSAGES;
context.novel.id = oid;
if (range <= limit) {
/* collect all of the N indices */
context.novel.count = passagecnt;
for (idx = 0; idx < MAXPASSAGES; idx++)
context.novel.pasg[idx] = (xchar) ((idx < passagecnt)
? idx + 1 : 0);
} else {
/* collect MAXPASSAGES of the N indices */
context.novel.count = MAXPASSAGES;
for (idx = i = 0; i < passagecnt; ++i, --range)
if (range > 0 && rn2(range) < limit) {
context.novel.pasg[idx++] = (xchar) (i + 1);
--limit;
}
}
}
idx = rn2(context.novel.count);
res = (int) context.novel.pasg[idx];
/* move the last slot's passage index into the slot just used
and reduce the number of passages available */
context.novel.pasg[idx] = context.novel.pasg[--context.novel.count];
return res;
}
/* Returns True if you were able to read something. */
boolean
read_tribute(tribsection, tribtitle, tribpassage, nowin_buf, bufsz, oid)
const char *tribsection, *tribtitle;
int tribpassage, bufsz;
char *nowin_buf;
unsigned oid; /* book identifier */
{
dlb *fp;
char line[BUFSZ], lastline[BUFSZ];
int scope = 0;
int linect = 0, passagecnt = 0, targetpassage = 0;
const char *badtranslation = "an incomprehensible foreign translation";
boolean matchedsection = FALSE, matchedtitle = FALSE;
winid tribwin = WIN_ERR;
boolean grasped = FALSE;
boolean foundpassage = FALSE;
if (nowin_buf)
*nowin_buf = '\0';
/* check for mandatories */
if (!tribsection || !tribtitle) {
if (!nowin_buf)
pline("It's %s of \"%s\"!", badtranslation, tribtitle);
return grasped;
}
debugpline3("read_tribute %s, %s, %d.", tribsection, tribtitle,
tribpassage);
fp = dlb_fopen(TRIBUTEFILE, "r");
if (!fp) {
/* this is actually an error - cannot open tribute file! */
if (!nowin_buf)
pline("You feel too overwhelmed to continue!");
return grasped;
}
/*
* Syntax (not case-sensitive):
* %section books
*
* In the books section:
* %title booktitle (n)
* where booktitle=book title without quotes
* (n)= total number of passages present for this title
* %passage k
* where k=sequential passage number
*
* %e ends the passage/book/section
* If in a passage, it marks the end of that passage.
* If in a book, it marks the end of that book.
* If in a section, it marks the end of that section.
*
* %section death
*/
*line = *lastline = '\0';
while (dlb_fgets(line, sizeof line, fp) != 0) {
linect++;
(void) strip_newline(line);
switch (line[0]) {
case '%':
if (!strncmpi(&line[1], "section ", sizeof "section " - 1)) {
char *st = &line[9]; /* 9 from "%section " */
scope = SECTIONSCOPE;
matchedsection = !strcmpi(st, tribsection) ? TRUE : FALSE;
} else if (!strncmpi(&line[1], "title ", sizeof "title " - 1)) {
char *st = &line[7]; /* 7 from "%title " */
char *p1, *p2;
if ((p1 = index(st, '(')) != 0) {
*p1++ = '\0';
(void) mungspaces(st);
if ((p2 = index(p1, ')')) != 0) {
*p2 = '\0';
passagecnt = atoi(p1);
scope = TITLESCOPE;
if (matchedsection && !strcmpi(st, tribtitle)) {
matchedtitle = TRUE;
targetpassage = !tribpassage
? choose_passage(passagecnt, oid)
: (tribpassage <= passagecnt)
? tribpassage : 0;
} else {
matchedtitle = FALSE;
}
}
}
} else if (!strncmpi(&line[1], "passage ",
sizeof "passage " - 1)) {
int passagenum = 0;
char *st = &line[9]; /* 9 from "%passage " */
mungspaces(st);
passagenum = atoi(st);
if (passagenum > 0 && passagenum <= passagecnt) {
scope = PASSAGESCOPE;
if (matchedtitle && passagenum == targetpassage) {
foundpassage = TRUE;
if (!nowin_buf) {
tribwin = create_nhwindow(NHW_MENU);
if (tribwin == WIN_ERR)
goto cleanup;
}
}
}
} else if (!strncmpi(&line[1], "e ", sizeof "e " - 1)) {
if (foundpassage)
goto cleanup;
if (scope == TITLESCOPE)
matchedtitle = FALSE;
if (scope == SECTIONSCOPE)
matchedsection = FALSE;
if (scope)
--scope;
} else {
debugpline1("tribute file error: bad %% command, line %d.",
linect);
}
break;
case '#':
/* comment only, next! */
break;
default:
if (foundpassage) {
if (!nowin_buf) {
/* outputting multi-line passage to text window */
putstr(tribwin, 0, line);
if (*line)
Strcpy(lastline, line);
} else {
/* fetching one-line passage into buffer */
copynchars(nowin_buf, line, bufsz - 1);
goto cleanup; /* don't wait for "%e passage" */
}
}
}
}
cleanup:
(void) dlb_fclose(fp);
if (nowin_buf) {
/* one-line buffer */
grasped = *nowin_buf ? TRUE : FALSE;
} else {
if (tribwin != WIN_ERR) { /* implies 'foundpassage' */
/* multi-line window, normal case;
if lastline is empty, there were no non-empty lines between
"%passage n" and "%e passage" so we leave 'grasped' False */
if (*lastline) {
display_nhwindow(tribwin, FALSE);
/* put the final attribution line into message history,
analogous to the summary line from long quest messages */
if (index(lastline, '['))
mungspaces(lastline); /* to remove leading spaces */
else /* construct one if necessary */
Sprintf(lastline, "[%s, by Terry Pratchett]", tribtitle);
putmsghistory(lastline, FALSE);
grasped = TRUE;
}
destroy_nhwindow(tribwin);
}
if (!grasped)
/* multi-line window, problem */
pline("It seems to be %s of \"%s\"!", badtranslation, tribtitle);
}
return grasped;
}
boolean
Death_quote(buf, bufsz)
char *buf;
int bufsz;
{
unsigned death_oid = 1; /* chance of oid #1 being a novel is negligible */
return read_tribute("Death", "Death Quotes", 0, buf, bufsz, death_oid);
}
/* ---------- END TRIBUTE ----------- */
/*files.c*/
| ./CrossVul/dataset_final_sorted/CWE-120/c/good_1321_0 |
crossvul-cpp_data_bad_4847_0 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* The User Datagram Protocol (UDP).
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
* Hirokazu Takahashi, <taka@valinux.co.jp>
*
* Fixes:
* Alan Cox : verify_area() calls
* Alan Cox : stopped close while in use off icmp
* messages. Not a fix but a botch that
* for udp at least is 'valid'.
* Alan Cox : Fixed icmp handling properly
* Alan Cox : Correct error for oversized datagrams
* Alan Cox : Tidied select() semantics.
* Alan Cox : udp_err() fixed properly, also now
* select and read wake correctly on errors
* Alan Cox : udp_send verify_area moved to avoid mem leak
* Alan Cox : UDP can count its memory
* Alan Cox : send to an unknown connection causes
* an ECONNREFUSED off the icmp, but
* does NOT close.
* Alan Cox : Switched to new sk_buff handlers. No more backlog!
* Alan Cox : Using generic datagram code. Even smaller and the PEEK
* bug no longer crashes it.
* Fred Van Kempen : Net2e support for sk->broadcast.
* Alan Cox : Uses skb_free_datagram
* Alan Cox : Added get/set sockopt support.
* Alan Cox : Broadcasting without option set returns EACCES.
* Alan Cox : No wakeup calls. Instead we now use the callbacks.
* Alan Cox : Use ip_tos and ip_ttl
* Alan Cox : SNMP Mibs
* Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
* Matt Dillon : UDP length checks.
* Alan Cox : Smarter af_inet used properly.
* Alan Cox : Use new kernel side addressing.
* Alan Cox : Incorrect return on truncated datagram receive.
* Arnt Gulbrandsen : New udp_send and stuff
* Alan Cox : Cache last socket
* Alan Cox : Route cache
* Jon Peatfield : Minor efficiency fix to sendto().
* Mike Shaver : RFC1122 checks.
* Alan Cox : Nonblocking error fix.
* Willy Konynenberg : Transparent proxying support.
* Mike McLagan : Routing by source
* David S. Miller : New socket lookup architecture.
* Last socket cache retained as it
* does have a high hit rate.
* Olaf Kirch : Don't linearise iovec on sendmsg.
* Andi Kleen : Some cleanups, cache destination entry
* for connect.
* Vitaly E. Lavrov : Transparent proxy revived after year coma.
* Melvin Smith : Check msg_name not msg_namelen in sendto(),
* return ENOTCONN for unconnected sockets (POSIX)
* Janos Farkas : don't deliver multi/broadcasts to a different
* bound-to-device socket
* Hirokazu Takahashi : HW checksumming for outgoing UDP
* datagrams.
* Hirokazu Takahashi : sendfile() on UDP works now.
* Arnaldo C. Melo : convert /proc/net/udp to seq_file
* YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
* Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
* a single port at the same time.
* Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
* James Chapman : Add L2TP encapsulation type.
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) "UDP: " fmt
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/igmp.h>
#include <linux/inetdevice.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/tcp_states.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
#include <net/route.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <trace/events/udp.h>
#include <linux/static_key.h>
#include <trace/events/skb.h>
#include <net/busy_poll.h>
#include "udp_impl.h"
struct udp_table udp_table __read_mostly;
EXPORT_SYMBOL(udp_table);
long sysctl_udp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_udp_mem);
int sysctl_udp_rmem_min __read_mostly;
EXPORT_SYMBOL(sysctl_udp_rmem_min);
int sysctl_udp_wmem_min __read_mostly;
EXPORT_SYMBOL(sysctl_udp_wmem_min);
atomic_long_t udp_memory_allocated;
EXPORT_SYMBOL(udp_memory_allocated);
#define MAX_UDP_PORTS 65536
#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
static int udp_lib_lport_inuse(struct net *net, __u16 num,
const struct udp_hslot *hslot,
unsigned long *bitmap,
struct sock *sk,
int (*saddr_comp)(const struct sock *sk1,
const struct sock *sk2),
unsigned int log)
{
struct sock *sk2;
struct hlist_nulls_node *node;
kuid_t uid = sock_i_uid(sk);
sk_nulls_for_each(sk2, node, &hslot->head) {
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
(bitmap || udp_sk(sk2)->udp_port_hash == num) &&
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
(!sk2->sk_reuseport || !sk->sk_reuseport ||
!uid_eq(uid, sock_i_uid(sk2))) &&
saddr_comp(sk, sk2)) {
if (!bitmap)
return 1;
__set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap);
}
}
return 0;
}
/*
* Note: we still hold spinlock of primary hash chain, so no other writer
* can insert/delete a socket with local_port == num
*/
static int udp_lib_lport_inuse2(struct net *net, __u16 num,
struct udp_hslot *hslot2,
struct sock *sk,
int (*saddr_comp)(const struct sock *sk1,
const struct sock *sk2))
{
struct sock *sk2;
struct hlist_nulls_node *node;
kuid_t uid = sock_i_uid(sk);
int res = 0;
spin_lock(&hslot2->lock);
udp_portaddr_for_each_entry(sk2, node, &hslot2->head) {
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
(udp_sk(sk2)->udp_port_hash == num) &&
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
(!sk2->sk_reuseport || !sk->sk_reuseport ||
!uid_eq(uid, sock_i_uid(sk2))) &&
saddr_comp(sk, sk2)) {
res = 1;
break;
}
}
spin_unlock(&hslot2->lock);
return res;
}
/**
* udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
*
* @sk: socket struct in question
* @snum: port number to look up
* @saddr_comp: AF-dependent comparison of bound local IP addresses
* @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
* with NULL address
*/
int udp_lib_get_port(struct sock *sk, unsigned short snum,
int (*saddr_comp)(const struct sock *sk1,
const struct sock *sk2),
unsigned int hash2_nulladdr)
{
struct udp_hslot *hslot, *hslot2;
struct udp_table *udptable = sk->sk_prot->h.udp_table;
int error = 1;
struct net *net = sock_net(sk);
if (!snum) {
int low, high, remaining;
unsigned int rand;
unsigned short first, last;
DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
rand = prandom_u32();
first = reciprocal_scale(rand, remaining) + low;
/*
* force rand to be an odd multiple of UDP_HTABLE_SIZE
*/
rand = (rand | 1) * (udptable->mask + 1);
last = first + udptable->mask + 1;
do {
hslot = udp_hashslot(udptable, net, first);
bitmap_zero(bitmap, PORTS_PER_CHAIN);
spin_lock_bh(&hslot->lock);
udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
saddr_comp, udptable->log);
snum = first;
/*
* Iterate on all possible values of snum for this hash.
* Using steps of an odd multiple of UDP_HTABLE_SIZE
* give us randomization and full range coverage.
*/
do {
if (low <= snum && snum <= high &&
!test_bit(snum >> udptable->log, bitmap) &&
!inet_is_local_reserved_port(net, snum))
goto found;
snum += rand;
} while (snum != first);
spin_unlock_bh(&hslot->lock);
} while (++first != last);
goto fail;
} else {
hslot = udp_hashslot(udptable, net, snum);
spin_lock_bh(&hslot->lock);
if (hslot->count > 10) {
int exist;
unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
slot2 &= udptable->mask;
hash2_nulladdr &= udptable->mask;
hslot2 = udp_hashslot2(udptable, slot2);
if (hslot->count < hslot2->count)
goto scan_primary_hash;
exist = udp_lib_lport_inuse2(net, snum, hslot2,
sk, saddr_comp);
if (!exist && (hash2_nulladdr != slot2)) {
hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
exist = udp_lib_lport_inuse2(net, snum, hslot2,
sk, saddr_comp);
}
if (exist)
goto fail_unlock;
else
goto found;
}
scan_primary_hash:
if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk,
saddr_comp, 0))
goto fail_unlock;
}
found:
inet_sk(sk)->inet_num = snum;
udp_sk(sk)->udp_port_hash = snum;
udp_sk(sk)->udp_portaddr_hash ^= snum;
if (sk_unhashed(sk)) {
sk_nulls_add_node_rcu(sk, &hslot->head);
hslot->count++;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
spin_lock(&hslot2->lock);
hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
&hslot2->head);
hslot2->count++;
spin_unlock(&hslot2->lock);
}
error = 0;
fail_unlock:
spin_unlock_bh(&hslot->lock);
fail:
return error;
}
EXPORT_SYMBOL(udp_lib_get_port);
static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
{
struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
return (!ipv6_only_sock(sk2) &&
(!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr ||
inet1->inet_rcv_saddr == inet2->inet_rcv_saddr));
}
static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr,
unsigned int port)
{
return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
}
int udp_v4_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
unsigned int hash2_partial =
udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
/* precompute partial secondary hash */
udp_sk(sk)->udp_portaddr_hash = hash2_partial;
return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
}
static inline int compute_score(struct sock *sk, struct net *net,
__be32 saddr, unsigned short hnum, __be16 sport,
__be32 daddr, __be16 dport, int dif)
{
int score;
struct inet_sock *inet;
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
ipv6_only_sock(sk))
return -1;
score = (sk->sk_family == PF_INET) ? 2 : 1;
inet = inet_sk(sk);
if (inet->inet_rcv_saddr) {
if (inet->inet_rcv_saddr != daddr)
return -1;
score += 4;
}
if (inet->inet_daddr) {
if (inet->inet_daddr != saddr)
return -1;
score += 4;
}
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score += 4;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score += 4;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
return score;
}
/*
* In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
*/
static inline int compute_score2(struct sock *sk, struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned int hnum, int dif)
{
int score;
struct inet_sock *inet;
if (!net_eq(sock_net(sk), net) ||
ipv6_only_sock(sk))
return -1;
inet = inet_sk(sk);
if (inet->inet_rcv_saddr != daddr ||
inet->inet_num != hnum)
return -1;
score = (sk->sk_family == PF_INET) ? 2 : 1;
if (inet->inet_daddr) {
if (inet->inet_daddr != saddr)
return -1;
score += 4;
}
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score += 4;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score += 4;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
return score;
}
static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
const __u16 lport, const __be32 faddr,
const __be16 fport)
{
static u32 udp_ehash_secret __read_mostly;
net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
return __inet_ehashfn(laddr, lport, faddr, fport,
udp_ehash_secret + net_hash_mix(net));
}
/* called with read_rcu_lock() */
static struct sock *udp4_lib_lookup2(struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned int hnum, int dif,
struct udp_hslot *hslot2, unsigned int slot2)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
begin:
result = NULL;
badness = 0;
udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
score = compute_score2(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
}
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot2)
goto begin;
if (result) {
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score2(result, net, saddr, sport,
daddr, hnum, dif) < badness)) {
sock_put(result);
goto begin;
}
}
return result;
}
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
* harder than this. -DaveM
*/
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
__be16 sport, __be32 daddr, __be16 dport,
int dif, struct udp_table *udptable)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(dport);
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
rcu_read_lock();
if (hslot->count > 10) {
hash2 = udp4_portaddr_hash(net, daddr, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp4_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
hslot2, slot2);
if (!result) {
hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp4_lib_lookup2(net, saddr, sport,
htonl(INADDR_ANY), hnum, dif,
hslot2, slot2);
}
rcu_read_unlock();
return result;
}
begin:
result = NULL;
badness = 0;
sk_nulls_for_each_rcu(sk, node, &hslot->head) {
score = compute_score(sk, net, saddr, hnum, sport,
daddr, dport, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
}
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot)
goto begin;
if (result) {
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score(result, net, saddr, hnum, sport,
daddr, dport, dif) < badness)) {
sock_put(result);
goto begin;
}
}
rcu_read_unlock();
return result;
}
EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport,
struct udp_table *udptable)
{
const struct iphdr *iph = ip_hdr(skb);
return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
iph->daddr, dport, inet_iif(skb),
udptable);
}
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif)
{
return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup);
static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
__be16 loc_port, __be32 loc_addr,
__be16 rmt_port, __be32 rmt_addr,
int dif, unsigned short hnum)
{
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
(inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
(inet->inet_dport != rmt_port && inet->inet_dport) ||
(inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
ipv6_only_sock(sk) ||
(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
return false;
if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif))
return false;
return true;
}
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code.
* Header points to the ip header of the error packet. We move
* on past this. Then (as it used to claim before adjustment)
* header points to the first 8 bytes of the udp header. We need
* to find the appropriate port.
*/
void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
{
struct inet_sock *inet;
const struct iphdr *iph = (const struct iphdr *)skb->data;
struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct sock *sk;
int harderr;
int err;
struct net *net = dev_net(skb->dev);
sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
iph->saddr, uh->source, skb->dev->ifindex, udptable);
if (!sk) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return; /* No socket for error */
}
err = 0;
harderr = 0;
inet = inet_sk(sk);
switch (type) {
default:
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
break;
case ICMP_SOURCE_QUENCH:
goto out;
case ICMP_PARAMETERPROB:
err = EPROTO;
harderr = 1;
break;
case ICMP_DEST_UNREACH:
if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
ipv4_sk_update_pmtu(skb, sk, info);
if (inet->pmtudisc != IP_PMTUDISC_DONT) {
err = EMSGSIZE;
harderr = 1;
break;
}
goto out;
}
err = EHOSTUNREACH;
if (code <= NR_ICMP_UNREACH) {
harderr = icmp_err_convert[code].fatal;
err = icmp_err_convert[code].errno;
}
break;
case ICMP_REDIRECT:
ipv4_sk_redirect(skb, sk);
goto out;
}
/*
* RFC1122: OK. Passes ICMP errors back to application, as per
* 4.1.3.3.
*/
if (!inet->recverr) {
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out;
} else
ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
sk->sk_err = err;
sk->sk_error_report(sk);
out:
sock_put(sk);
}
void udp_err(struct sk_buff *skb, u32 info)
{
__udp4_lib_err(skb, info, &udp_table);
}
/*
* Throw away all pending data and cancel the corking. Socket is locked.
*/
void udp_flush_pending_frames(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
if (up->pending) {
up->len = 0;
up->pending = 0;
ip_flush_pending_frames(sk);
}
}
EXPORT_SYMBOL(udp_flush_pending_frames);
/**
* udp4_hwcsum - handle outgoing HW checksumming
* @skb: sk_buff containing the filled-in UDP header
* (checksum field must be zeroed out)
* @src: source IP address
* @dst: destination IP address
*/
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
{
struct udphdr *uh = udp_hdr(skb);
int offset = skb_transport_offset(skb);
int len = skb->len - offset;
int hlen = len;
__wsum csum = 0;
if (!skb_has_frag_list(skb)) {
/*
* Only one fragment on the socket.
*/
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
uh->check = ~csum_tcpudp_magic(src, dst, len,
IPPROTO_UDP, 0);
} else {
struct sk_buff *frags;
/*
* HW-checksum won't work as there are two or more
* fragments on the socket so that all csums of sk_buffs
* should be together
*/
skb_walk_frags(skb, frags) {
csum = csum_add(csum, frags->csum);
hlen -= frags->len;
}
csum = skb_checksum(skb, offset, hlen, csum);
skb->ip_summed = CHECKSUM_NONE;
uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
}
}
EXPORT_SYMBOL_GPL(udp4_hwcsum);
/* Function to set UDP checksum for an IPv4 UDP packet. This is intended
* for the simple case like when setting the checksum for a UDP tunnel.
*/
void udp_set_csum(bool nocheck, struct sk_buff *skb,
__be32 saddr, __be32 daddr, int len)
{
struct udphdr *uh = udp_hdr(skb);
if (nocheck)
uh->check = 0;
else if (skb_is_gso(skb))
uh->check = ~udp_v4_check(len, saddr, daddr, 0);
else if (skb_dst(skb) && skb_dst(skb)->dev &&
(skb_dst(skb)->dev->features &
(NETIF_F_IP_CSUM | NETIF_F_HW_CSUM))) {
BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
uh->check = ~udp_v4_check(len, saddr, daddr, 0);
} else {
__wsum csum;
BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
uh->check = 0;
csum = skb_checksum(skb, 0, len, 0);
uh->check = udp_v4_check(len, saddr, daddr, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
}
EXPORT_SYMBOL(udp_set_csum);
static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
struct udphdr *uh;
int err = 0;
int is_udplite = IS_UDPLITE(sk);
int offset = skb_transport_offset(skb);
int len = skb->len - offset;
__wsum csum = 0;
/*
* Create a UDP header
*/
uh = udp_hdr(skb);
uh->source = inet->inet_sport;
uh->dest = fl4->fl4_dport;
uh->len = htons(len);
uh->check = 0;
if (is_udplite) /* UDP-Lite */
csum = udplite_csum(skb);
else if (sk->sk_no_check_tx) { /* UDP csum disabled */
skb->ip_summed = CHECKSUM_NONE;
goto send;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
goto send;
} else
csum = udp_csum(skb);
/* add protocol-dependent pseudo-header */
uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
sk->sk_protocol, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
err = ip_send_skb(sock_net(sk), skb);
if (err) {
if (err == -ENOBUFS && !inet->recverr) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0;
}
} else
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite);
return err;
}
/*
* Push out all pending data as one UDP datagram. Socket is locked.
*/
int udp_push_pending_frames(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
struct sk_buff *skb;
int err = 0;
skb = ip_finish_skb(sk, fl4);
if (!skb)
goto out;
err = udp_send_skb(skb, fl4);
out:
up->len = 0;
up->pending = 0;
return err;
}
EXPORT_SYMBOL(udp_push_pending_frames);
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct udp_sock *up = udp_sk(sk);
struct flowi4 fl4_stack;
struct flowi4 *fl4;
int ulen = len;
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
int free = 0;
int connected = 0;
__be32 daddr, faddr, saddr;
__be16 dport;
u8 tos;
int err, is_udplite = IS_UDPLITE(sk);
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
struct sk_buff *skb;
struct ip_options_data opt_copy;
if (len > 0xFFFF)
return -EMSGSIZE;
/*
* Check the flags.
*/
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
return -EOPNOTSUPP;
ipc.opt = NULL;
ipc.tx_flags = 0;
ipc.ttl = 0;
ipc.tos = -1;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
fl4 = &inet->cork.fl.u.ip4;
if (up->pending) {
/*
* There are pending frames.
* The socket lock must be held while it's corked.
*/
lock_sock(sk);
if (likely(up->pending)) {
if (unlikely(up->pending != AF_INET)) {
release_sock(sk);
return -EINVAL;
}
goto do_append_data;
}
release_sock(sk);
}
ulen += sizeof(struct udphdr);
/*
* Get and verify the address.
*/
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
if (msg->msg_namelen < sizeof(*usin))
return -EINVAL;
if (usin->sin_family != AF_INET) {
if (usin->sin_family != AF_UNSPEC)
return -EAFNOSUPPORT;
}
daddr = usin->sin_addr.s_addr;
dport = usin->sin_port;
if (dport == 0)
return -EINVAL;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = inet->inet_daddr;
dport = inet->inet_dport;
/* Open fast path for connected socket.
Route will not be used, if at least one option is set.
*/
connected = 1;
}
ipc.addr = inet->inet_saddr;
ipc.oif = sk->sk_bound_dev_if;
sock_tx_timestamp(sk, &ipc.tx_flags);
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc,
sk->sk_family == AF_INET6);
if (err)
return err;
if (ipc.opt)
free = 1;
connected = 0;
}
if (!ipc.opt) {
struct ip_options_rcu *inet_opt;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt) {
memcpy(&opt_copy, inet_opt,
sizeof(*inet_opt) + inet_opt->opt.optlen);
ipc.opt = &opt_copy.opt;
}
rcu_read_unlock();
}
saddr = ipc.addr;
ipc.addr = faddr = daddr;
if (ipc.opt && ipc.opt->opt.srr) {
if (!daddr)
return -EINVAL;
faddr = ipc.opt->opt.faddr;
connected = 0;
}
tos = get_rttos(&ipc, inet);
if (sock_flag(sk, SOCK_LOCALROUTE) ||
(msg->msg_flags & MSG_DONTROUTE) ||
(ipc.opt && ipc.opt->opt.is_strictroute)) {
tos |= RTO_ONLINK;
connected = 0;
}
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
connected = 0;
} else if (!ipc.oif)
ipc.oif = inet->uc_index;
if (connected)
rt = (struct rtable *)sk_dst_check(sk, 0);
if (!rt) {
struct net *net = sock_net(sk);
__u8 flow_flags = inet_sk_flowi_flags(sk);
fl4 = &fl4_stack;
flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
flow_flags,
faddr, saddr, dport, inet->inet_sport);
if (!saddr && ipc.oif)
l3mdev_get_saddr(net, ipc.oif, fl4);
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
if (err == -ENETUNREACH)
IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
goto out;
}
err = -EACCES;
if ((rt->rt_flags & RTCF_BROADCAST) &&
!sock_flag(sk, SOCK_BROADCAST))
goto out;
if (connected)
sk_dst_set(sk, dst_clone(&rt->dst));
}
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
saddr = fl4->saddr;
if (!ipc.addr)
daddr = ipc.addr = fl4->daddr;
/* Lockless fast path for the non-corking case. */
if (!corkreq) {
skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc, &rt,
msg->msg_flags);
err = PTR_ERR(skb);
if (!IS_ERR_OR_NULL(skb))
err = udp_send_skb(skb, fl4);
goto out;
}
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
/* ... which is an evident application bug. --ANK */
release_sock(sk);
net_dbg_ratelimited("cork app bug 2\n");
err = -EINVAL;
goto out;
}
/*
* Now cork the socket to pend data.
*/
fl4 = &inet->cork.fl.u.ip4;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->fl4_dport = dport;
fl4->fl4_sport = inet->inet_sport;
up->pending = AF_INET;
do_append_data:
up->len += ulen;
err = ip_append_data(sk, fl4, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc, &rt,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
if (err)
udp_flush_pending_frames(sk);
else if (!corkreq)
err = udp_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
up->pending = 0;
release_sock(sk);
out:
ip_rt_put(rt);
if (free)
kfree(ipc.opt);
if (!err)
return len;
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
* we don't have a good statistic (IpOutDiscards but it can be too many
* things). We could add another new stat but at least for now that
* seems like overkill.
*/
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
}
return err;
do_confirm:
dst_confirm(&rt->dst);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto out;
}
EXPORT_SYMBOL(udp_sendmsg);
int udp_sendpage(struct sock *sk, struct page *page, int offset,
size_t size, int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct udp_sock *up = udp_sk(sk);
int ret;
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
if (!up->pending) {
struct msghdr msg = { .msg_flags = flags|MSG_MORE };
/* Call udp_sendmsg to specify destination address which
* sendpage interface can't pass.
* This will succeed only when the socket is connected.
*/
ret = udp_sendmsg(sk, &msg, 0);
if (ret < 0)
return ret;
}
lock_sock(sk);
if (unlikely(!up->pending)) {
release_sock(sk);
net_dbg_ratelimited("udp cork app bug 3\n");
return -EINVAL;
}
ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
page, offset, size, flags);
if (ret == -EOPNOTSUPP) {
release_sock(sk);
return sock_no_sendpage(sk->sk_socket, page, offset,
size, flags);
}
if (ret < 0) {
udp_flush_pending_frames(sk);
goto out;
}
up->len += size;
if (!(up->corkflag || (flags&MSG_MORE)))
ret = udp_push_pending_frames(sk);
if (!ret)
ret = size;
out:
release_sock(sk);
return ret;
}
/**
* first_packet_length - return length of first packet in receive queue
* @sk: socket
*
* Drops all bad checksum frames, until a valid one is found.
* Returns the length of found skb, or 0 if none is found.
*/
static unsigned int first_packet_length(struct sock *sk)
{
struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
struct sk_buff *skb;
unsigned int res;
__skb_queue_head_init(&list_kill);
spin_lock_bh(&rcvq->lock);
while ((skb = skb_peek(rcvq)) != NULL &&
udp_lib_checksum_complete(skb)) {
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS,
IS_UDPLITE(sk));
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
atomic_inc(&sk->sk_drops);
__skb_unlink(skb, rcvq);
__skb_queue_tail(&list_kill, skb);
}
res = skb ? skb->len : 0;
spin_unlock_bh(&rcvq->lock);
if (!skb_queue_empty(&list_kill)) {
bool slow = lock_sock_fast(sk);
__skb_queue_purge(&list_kill);
sk_mem_reclaim_partial(sk);
unlock_sock_fast(sk, slow);
}
return res;
}
/*
* IOCTL requests applicable to the UDP protocol
*/
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ:
{
int amount = sk_wmem_alloc_get(sk);
return put_user(amount, (int __user *)arg);
}
case SIOCINQ:
{
unsigned int amount = first_packet_length(sk);
if (amount)
/*
* We will only return the amount
* of this packet since that is all
* that will be read.
*/
amount -= sizeof(struct udphdr);
return put_user(amount, (int __user *)arg);
}
default:
return -ENOIOCTLCMD;
}
return 0;
}
EXPORT_SYMBOL(udp_ioctl);
/*
* This should be easy, if there is something there we
* return it, otherwise we block.
*/
int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
struct sk_buff *skb;
unsigned int ulen, copied;
int peeked, off = 0;
int err;
int is_udplite = IS_UDPLITE(sk);
bool slow;
if (flags & MSG_ERRQUEUE)
return ip_recv_error(sk, msg, len, addr_len);
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
&peeked, &off, &err);
if (!skb)
goto out;
ulen = skb->len - sizeof(struct udphdr);
copied = len;
if (copied > ulen)
copied = ulen;
else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
/*
* If checksum is needed at all, try to do it while copying the
* data. If the data is truncated, or if we only want a partial
* coverage checksum (UDP-Lite), do it before the copy.
*/
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
msg, copied);
else {
err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr),
msg);
if (err == -EINVAL)
goto csum_copy_err;
}
if (unlikely(err)) {
trace_kfree_skb(skb, udp_recvmsg);
if (!peeked) {
atomic_inc(&sk->sk_drops);
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
goto out_free;
}
if (!peeked)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
sock_recv_ts_and_drops(msg, sk, skb);
/* Copy the address. */
if (sin) {
sin->sin_family = AF_INET;
sin->sin_port = udp_hdr(skb)->source;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
}
if (inet->cmsg_flags)
ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr));
err = copied;
if (flags & MSG_TRUNC)
err = ulen;
out_free:
skb_free_datagram_locked(sk, skb);
out:
return err;
csum_copy_err:
slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) {
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
}
unlock_sock_fast(sk, slow);
/* starting over for a new packet, but check if we need to yield */
cond_resched();
msg->msg_flags &= ~MSG_TRUNC;
goto try_again;
}
int udp_disconnect(struct sock *sk, int flags)
{
struct inet_sock *inet = inet_sk(sk);
/*
* 1003.1g - break association.
*/
sk->sk_state = TCP_CLOSE;
inet->inet_daddr = 0;
inet->inet_dport = 0;
sock_rps_reset_rxhash(sk);
sk->sk_bound_dev_if = 0;
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
sk->sk_prot->unhash(sk);
inet->inet_sport = 0;
}
sk_dst_reset(sk);
return 0;
}
EXPORT_SYMBOL(udp_disconnect);
void udp_lib_unhash(struct sock *sk)
{
if (sk_hashed(sk)) {
struct udp_table *udptable = sk->sk_prot->h.udp_table;
struct udp_hslot *hslot, *hslot2;
hslot = udp_hashslot(udptable, sock_net(sk),
udp_sk(sk)->udp_port_hash);
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
spin_lock_bh(&hslot->lock);
if (sk_nulls_del_node_init_rcu(sk)) {
hslot->count--;
inet_sk(sk)->inet_num = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_lock(&hslot2->lock);
hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
hslot2->count--;
spin_unlock(&hslot2->lock);
}
spin_unlock_bh(&hslot->lock);
}
}
EXPORT_SYMBOL(udp_lib_unhash);
/*
* inet_rcv_saddr was changed, we must rehash secondary hash
*/
void udp_lib_rehash(struct sock *sk, u16 newhash)
{
if (sk_hashed(sk)) {
struct udp_table *udptable = sk->sk_prot->h.udp_table;
struct udp_hslot *hslot, *hslot2, *nhslot2;
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
nhslot2 = udp_hashslot2(udptable, newhash);
udp_sk(sk)->udp_portaddr_hash = newhash;
if (hslot2 != nhslot2) {
hslot = udp_hashslot(udptable, sock_net(sk),
udp_sk(sk)->udp_port_hash);
/* we must lock primary chain too */
spin_lock_bh(&hslot->lock);
spin_lock(&hslot2->lock);
hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
hslot2->count--;
spin_unlock(&hslot2->lock);
spin_lock(&nhslot2->lock);
hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
&nhslot2->head);
nhslot2->count++;
spin_unlock(&nhslot2->lock);
spin_unlock_bh(&hslot->lock);
}
}
}
EXPORT_SYMBOL(udp_lib_rehash);
static void udp_v4_rehash(struct sock *sk)
{
u16 new_hash = udp4_portaddr_hash(sock_net(sk),
inet_sk(sk)->inet_rcv_saddr,
inet_sk(sk)->inet_num);
udp_lib_rehash(sk, new_hash);
}
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
if (inet_sk(sk)->inet_daddr) {
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
sk_incoming_cpu_update(sk);
}
rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
/* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM)
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
is_udplite);
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
kfree_skb(skb);
trace_udp_fail_queue_rcv_skb(rc, sk);
return -1;
}
return 0;
}
static struct static_key udp_encap_needed __read_mostly;
void udp_encap_enable(void)
{
if (!static_key_enabled(&udp_encap_needed))
static_key_slow_inc(&udp_encap_needed);
}
EXPORT_SYMBOL(udp_encap_enable);
/* returns:
* -1: error
* 0: success
* >0: "udp encap" protocol resubmission
*
* Note that in the success and error cases, the skb is assumed to
* have either been requeued or freed.
*/
int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int rc;
int is_udplite = IS_UDPLITE(sk);
/*
* Charge it to the socket, dropping if the queue is full.
*/
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
nf_reset(skb);
if (static_key_false(&udp_encap_needed) && up->encap_type) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
/*
* This is an encapsulation socket so pass the skb to
* the socket's udp_encap_rcv() hook. Otherwise, just
* fall through and pass this up the UDP socket.
* up->encap_rcv() returns the following value:
* =0 if skb was successfully passed to the encap
* handler or was discarded by it.
* >0 if skb should be passed on to UDP.
* <0 if skb should be resubmitted as proto -N
*/
/* if we're overly short, let UDP handle it */
encap_rcv = ACCESS_ONCE(up->encap_rcv);
if (skb->len > sizeof(struct udphdr) && encap_rcv) {
int ret;
/* Verify checksum before giving to encap */
if (udp_lib_checksum_complete(skb))
goto csum_error;
ret = encap_rcv(sk, skb);
if (ret <= 0) {
UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INDATAGRAMS,
is_udplite);
return -ret;
}
}
/* FALLTHROUGH -- it's a UDP Packet */
}
/*
* UDP-Lite specific tests, ignored on UDP sockets
*/
if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
/*
* MIB statistics other than incrementing the error count are
* disabled for the following two types of errors: these depend
* on the application settings, not on the functioning of the
* protocol stack as such.
*
* RFC 3828 here recommends (sec 3.3): "There should also be a
* way ... to ... at least let the receiving application block
* delivery of packets with coverage values less than a value
* provided by the application."
*/
if (up->pcrlen == 0) { /* full coverage was set */
net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
UDP_SKB_CB(skb)->cscov, skb->len);
goto drop;
}
/* The next case involves violating the min. coverage requested
* by the receiver. This is subtle: if receiver wants x and x is
* greater than the buffersize/MTU then receiver will complain
* that it wants x while sender emits packets of smaller size y.
* Therefore the above ...()->partial_cov statement is essential.
*/
if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
UDP_SKB_CB(skb)->cscov, up->pcrlen);
goto drop;
}
}
if (rcu_access_pointer(sk->sk_filter) &&
udp_lib_checksum_complete(skb))
goto csum_error;
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
is_udplite);
goto drop;
}
rc = 0;
ipv4_pktinfo_prepare(sk, skb);
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
rc = __udp_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk);
goto drop;
}
bh_unlock_sock(sk);
return rc;
csum_error:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return -1;
}
static void flush_stack(struct sock **stack, unsigned int count,
struct sk_buff *skb, unsigned int final)
{
unsigned int i;
struct sk_buff *skb1 = NULL;
struct sock *sk;
for (i = 0; i < count; i++) {
sk = stack[i];
if (likely(!skb1))
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
atomic_inc(&sk->sk_drops);
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
}
if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
skb1 = NULL;
sock_put(sk);
}
if (unlikely(skb1))
kfree_skb(skb1);
}
/* For TCP sockets, sk_rx_dst is protected by socket lock
* For UDP, we use xchg() to guard against concurrent changes.
*/
static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old;
dst_hold(dst);
old = xchg(&sk->sk_rx_dst, dst);
dst_release(old);
}
/*
* Multicasts and broadcasts go to each listener.
*
* Note: called only from the BH handler context.
*/
static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
struct udphdr *uh,
__be32 saddr, __be32 daddr,
struct udp_table *udptable,
int proto)
{
struct sock *sk, *stack[256 / sizeof(struct sock *)];
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(uh->dest);
struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
int dif = skb->dev->ifindex;
unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
bool inner_flushed = false;
if (use_hash2) {
hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
udp_table.mask;
hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask;
start_lookup:
hslot = &udp_table.hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
spin_lock(&hslot->lock);
sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
if (__udp_is_mcast_sock(net, sk,
uh->dest, daddr,
uh->source, saddr,
dif, hnum)) {
if (unlikely(count == ARRAY_SIZE(stack))) {
flush_stack(stack, count, skb, ~0);
inner_flushed = true;
count = 0;
}
stack[count++] = sk;
sock_hold(sk);
}
}
spin_unlock(&hslot->lock);
/* Also lookup *:port if we are using hash2 and haven't done so yet. */
if (use_hash2 && hash2 != hash2_any) {
hash2 = hash2_any;
goto start_lookup;
}
/*
* do the slow work with no lock held
*/
if (count) {
flush_stack(stack, count, skb, count - 1);
} else {
if (!inner_flushed)
UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
proto == IPPROTO_UDPLITE);
consume_skb(skb);
}
return 0;
}
/* Initialize UDP checksum. If exited with zero value (success),
* CHECKSUM_UNNECESSARY means, that no more checks are required.
* Otherwise, csum completion requires chacksumming packet body,
* including udp header and folding it to skb->csum.
*/
static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
int proto)
{
int err;
UDP_SKB_CB(skb)->partial_cov = 0;
UDP_SKB_CB(skb)->cscov = skb->len;
if (proto == IPPROTO_UDPLITE) {
err = udplite_checksum_init(skb, uh);
if (err)
return err;
}
return skb_checksum_init_zero_check(skb, proto, uh->check,
inet_compute_pseudo);
}
/*
* All we need to do is get the socket, and then do a checksum.
*/
int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
struct sock *sk;
struct udphdr *uh;
unsigned short ulen;
struct rtable *rt = skb_rtable(skb);
__be32 saddr, daddr;
struct net *net = dev_net(skb->dev);
/*
* Validate the packet.
*/
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto drop; /* No space for header. */
uh = udp_hdr(skb);
ulen = ntohs(uh->len);
saddr = ip_hdr(skb)->saddr;
daddr = ip_hdr(skb)->daddr;
if (ulen > skb->len)
goto short_packet;
if (proto == IPPROTO_UDP) {
/* UDP validates ulen. */
if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
goto short_packet;
uh = udp_hdr(skb);
}
if (udp4_csum_init(skb, uh, proto))
goto csum_error;
sk = skb_steal_sock(skb);
if (sk) {
struct dst_entry *dst = skb_dst(skb);
int ret;
if (unlikely(sk->sk_rx_dst != dst))
udp_sk_rx_dst_set(sk, dst);
ret = udp_queue_rcv_skb(sk, skb);
sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
if (ret > 0)
return -ret;
return 0;
}
if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
return __udp4_lib_mcast_deliver(net, skb, uh,
saddr, daddr, udptable, proto);
sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk) {
int ret;
if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
inet_compute_pseudo);
ret = udp_queue_rcv_skb(sk, skb);
sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
if (ret > 0)
return -ret;
return 0;
}
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
nf_reset(skb);
/* No socket. Drop packet silently, if checksum is wrong */
if (udp_lib_checksum_complete(skb))
goto csum_error;
UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
/*
* Hmm. We got an UDP packet to a port to which we
* don't wanna listen. Ignore it.
*/
kfree_skb(skb);
return 0;
short_packet:
net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
proto == IPPROTO_UDPLITE ? "Lite" : "",
&saddr, ntohs(uh->source),
ulen, skb->len,
&daddr, ntohs(uh->dest));
goto drop;
csum_error:
/*
* RFC1122: OK. Discards the bad packet silently (as far as
* the network is concerned, anyway) as per 4.1.3.4 (MUST).
*/
net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
proto == IPPROTO_UDPLITE ? "Lite" : "",
&saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
ulen);
UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
drop:
UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
kfree_skb(skb);
return 0;
}
/* We can only early demux multicast if there is a single matching socket.
* If more than one socket found returns NULL
*/
static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
__be16 loc_port, __be32 loc_addr,
__be16 rmt_port, __be32 rmt_addr,
int dif)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(loc_port);
unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
struct udp_hslot *hslot = &udp_table.hash[slot];
/* Do not bother scanning a too big list */
if (hslot->count > 10)
return NULL;
rcu_read_lock();
begin:
count = 0;
result = NULL;
sk_nulls_for_each_rcu(sk, node, &hslot->head) {
if (__udp_is_mcast_sock(net, sk,
loc_port, loc_addr,
rmt_port, rmt_addr,
dif, hnum)) {
result = sk;
++count;
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot)
goto begin;
if (result) {
if (count != 1 ||
unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(!__udp_is_mcast_sock(net, result,
loc_port, loc_addr,
rmt_port, rmt_addr,
dif, hnum))) {
sock_put(result);
result = NULL;
}
}
rcu_read_unlock();
return result;
}
/* For unicast we should only early demux connected sockets or we can
* break forwarding setups. The chains here can be long so only check
* if the first socket is an exact match and if not move on.
*/
static struct sock *__udp4_lib_demux_lookup(struct net *net,
__be16 loc_port, __be32 loc_addr,
__be16 rmt_port, __be32 rmt_addr,
int dif)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(loc_port);
unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
unsigned int slot2 = hash2 & udp_table.mask;
struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
rcu_read_lock();
result = NULL;
udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
if (INET_MATCH(sk, net, acookie,
rmt_addr, loc_addr, ports, dif))
result = sk;
/* Only check first socket in chain */
break;
}
if (result) {
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(!INET_MATCH(sk, net, acookie,
rmt_addr, loc_addr,
ports, dif))) {
sock_put(result);
result = NULL;
}
}
rcu_read_unlock();
return result;
}
void udp_v4_early_demux(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
const struct iphdr *iph;
const struct udphdr *uh;
struct sock *sk;
struct dst_entry *dst;
int dif = skb->dev->ifindex;
int ours;
/* validate the packet */
if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
return;
iph = ip_hdr(skb);
uh = udp_hdr(skb);
if (skb->pkt_type == PACKET_BROADCAST ||
skb->pkt_type == PACKET_MULTICAST) {
struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
if (!in_dev)
return;
ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
iph->protocol);
if (!ours)
return;
sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
uh->source, iph->saddr, dif);
} else if (skb->pkt_type == PACKET_HOST) {
sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
uh->source, iph->saddr, dif);
} else {
return;
}
if (!sk)
return;
skb->sk = sk;
skb->destructor = sock_efree;
dst = READ_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
if (dst) {
/* DST_NOCACHE can not be used without taking a reference */
if (dst->flags & DST_NOCACHE) {
if (likely(atomic_inc_not_zero(&dst->__refcnt)))
skb_dst_set(skb, dst);
} else {
skb_dst_set_noref(skb, dst);
}
}
}
int udp_rcv(struct sk_buff *skb)
{
return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
}
void udp_destroy_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
bool slow = lock_sock_fast(sk);
udp_flush_pending_frames(sk);
unlock_sock_fast(sk, slow);
if (static_key_false(&udp_encap_needed) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
encap_destroy = ACCESS_ONCE(up->encap_destroy);
if (encap_destroy)
encap_destroy(sk);
}
}
/*
* Socket option code for UDP
*/
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen,
int (*push_pending_frames)(struct sock *))
{
struct udp_sock *up = udp_sk(sk);
int val, valbool;
int err = 0;
int is_udplite = IS_UDPLITE(sk);
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
valbool = val ? 1 : 0;
switch (optname) {
case UDP_CORK:
if (val != 0) {
up->corkflag = 1;
} else {
up->corkflag = 0;
lock_sock(sk);
push_pending_frames(sk);
release_sock(sk);
}
break;
case UDP_ENCAP:
switch (val) {
case 0:
case UDP_ENCAP_ESPINUDP:
case UDP_ENCAP_ESPINUDP_NON_IKE:
up->encap_rcv = xfrm4_udp_encap_rcv;
/* FALLTHROUGH */
case UDP_ENCAP_L2TPINUDP:
up->encap_type = val;
udp_encap_enable();
break;
default:
err = -ENOPROTOOPT;
break;
}
break;
case UDP_NO_CHECK6_TX:
up->no_check6_tx = valbool;
break;
case UDP_NO_CHECK6_RX:
up->no_check6_rx = valbool;
break;
/*
* UDP-Lite's partial checksum coverage (RFC 3828).
*/
/* The sender sets actual checksum coverage length via this option.
* The case coverage > packet length is handled by send module. */
case UDPLITE_SEND_CSCOV:
if (!is_udplite) /* Disable the option on UDP sockets */
return -ENOPROTOOPT;
if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
val = 8;
else if (val > USHRT_MAX)
val = USHRT_MAX;
up->pcslen = val;
up->pcflag |= UDPLITE_SEND_CC;
break;
/* The receiver specifies a minimum checksum coverage value. To make
* sense, this should be set to at least 8 (as done below). If zero is
* used, this again means full checksum coverage. */
case UDPLITE_RECV_CSCOV:
if (!is_udplite) /* Disable the option on UDP sockets */
return -ENOPROTOOPT;
if (val != 0 && val < 8) /* Avoid silly minimal values. */
val = 8;
else if (val > USHRT_MAX)
val = USHRT_MAX;
up->pcrlen = val;
up->pcflag |= UDPLITE_RECV_CC;
break;
default:
err = -ENOPROTOOPT;
break;
}
return err;
}
EXPORT_SYMBOL(udp_lib_setsockopt);
int udp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_push_pending_frames);
return ip_setsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
int compat_udp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_push_pending_frames);
return compat_ip_setsockopt(sk, level, optname, optval, optlen);
}
#endif
int udp_lib_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
struct udp_sock *up = udp_sk(sk);
int val, len;
if (get_user(len, optlen))
return -EFAULT;
len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;
switch (optname) {
case UDP_CORK:
val = up->corkflag;
break;
case UDP_ENCAP:
val = up->encap_type;
break;
case UDP_NO_CHECK6_TX:
val = up->no_check6_tx;
break;
case UDP_NO_CHECK6_RX:
val = up->no_check6_rx;
break;
/* The following two cannot be changed on UDP sockets, the return is
* always 0 (which corresponds to the full checksum coverage of UDP). */
case UDPLITE_SEND_CSCOV:
val = up->pcslen;
break;
case UDPLITE_RECV_CSCOV:
val = up->pcrlen;
break;
default:
return -ENOPROTOOPT;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
EXPORT_SYMBOL(udp_lib_getsockopt);
int udp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return ip_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
int compat_udp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return compat_ip_getsockopt(sk, level, optname, optval, optlen);
}
#endif
/**
* udp_poll - wait for a UDP event.
* @file - file struct
* @sock - socket
* @wait - poll table
*
* This is same as datagram poll, except for the special case of
* blocking sockets. If application is using a blocking fd
* and a packet with checksum error is in the queue;
* then it could get return from select indicating data available
* but then block when reading it. Add special case code
* to work around these arguably broken applications.
*/
unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
unsigned int mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
sock_rps_record_flow(sk);
/* Check for false positives due to checksum errors */
if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
!(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk))
mask &= ~(POLLIN | POLLRDNORM);
return mask;
}
EXPORT_SYMBOL(udp_poll);
struct proto udp_prot = {
.name = "UDP",
.owner = THIS_MODULE,
.close = udp_lib_close,
.connect = ip4_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = udp_destroy_sock,
.setsockopt = udp_setsockopt,
.getsockopt = udp_getsockopt,
.sendmsg = udp_sendmsg,
.recvmsg = udp_recvmsg,
.sendpage = udp_sendpage,
.backlog_rcv = __udp_queue_rcv_skb,
.release_cb = ip4_datagram_release_cb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.rehash = udp_v4_rehash,
.get_port = udp_v4_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
.sysctl_wmem = &sysctl_udp_wmem_min,
.sysctl_rmem = &sysctl_udp_rmem_min,
.obj_size = sizeof(struct udp_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.h.udp_table = &udp_table,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udp_setsockopt,
.compat_getsockopt = compat_udp_getsockopt,
#endif
.clear_sk = sk_prot_clear_portaddr_nulls,
};
EXPORT_SYMBOL(udp_prot);
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
static struct sock *udp_get_first(struct seq_file *seq, int start)
{
struct sock *sk;
struct udp_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
for (state->bucket = start; state->bucket <= state->udp_table->mask;
++state->bucket) {
struct hlist_nulls_node *node;
struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
if (hlist_nulls_empty(&hslot->head))
continue;
spin_lock_bh(&hslot->lock);
sk_nulls_for_each(sk, node, &hslot->head) {
if (!net_eq(sock_net(sk), net))
continue;
if (sk->sk_family == state->family)
goto found;
}
spin_unlock_bh(&hslot->lock);
}
sk = NULL;
found:
return sk;
}
static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
{
struct udp_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
do {
sk = sk_nulls_next(sk);
} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
if (!sk) {
if (state->bucket <= state->udp_table->mask)
spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
return udp_get_first(seq, state->bucket + 1);
}
return sk;
}
static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
{
struct sock *sk = udp_get_first(seq, 0);
if (sk)
while (pos && (sk = udp_get_next(seq, sk)) != NULL)
--pos;
return pos ? NULL : sk;
}
static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
{
struct udp_iter_state *state = seq->private;
state->bucket = MAX_UDP_PORTS;
return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
}
static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct sock *sk;
if (v == SEQ_START_TOKEN)
sk = udp_get_idx(seq, 0);
else
sk = udp_get_next(seq, v);
++*pos;
return sk;
}
static void udp_seq_stop(struct seq_file *seq, void *v)
{
struct udp_iter_state *state = seq->private;
if (state->bucket <= state->udp_table->mask)
spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
}
int udp_seq_open(struct inode *inode, struct file *file)
{
struct udp_seq_afinfo *afinfo = PDE_DATA(inode);
struct udp_iter_state *s;
int err;
err = seq_open_net(inode, file, &afinfo->seq_ops,
sizeof(struct udp_iter_state));
if (err < 0)
return err;
s = ((struct seq_file *)file->private_data)->private;
s->family = afinfo->family;
s->udp_table = afinfo->udp_table;
return err;
}
EXPORT_SYMBOL(udp_seq_open);
/* ------------------------------------------------------------------------ */
int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
{
struct proc_dir_entry *p;
int rc = 0;
afinfo->seq_ops.start = udp_seq_start;
afinfo->seq_ops.next = udp_seq_next;
afinfo->seq_ops.stop = udp_seq_stop;
p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
afinfo->seq_fops, afinfo);
if (!p)
rc = -ENOMEM;
return rc;
}
EXPORT_SYMBOL(udp_proc_register);
void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
{
remove_proc_entry(afinfo->name, net->proc_net);
}
EXPORT_SYMBOL(udp_proc_unregister);
/* ------------------------------------------------------------------------ */
static void udp4_format_sock(struct sock *sp, struct seq_file *f,
int bucket)
{
struct inet_sock *inet = inet_sk(sp);
__be32 dest = inet->inet_daddr;
__be32 src = inet->inet_rcv_saddr;
__u16 destp = ntohs(inet->inet_dport);
__u16 srcp = ntohs(inet->inet_sport);
seq_printf(f, "%5d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
bucket, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
atomic_read(&sp->sk_drops));
}
int udp4_seq_show(struct seq_file *seq, void *v)
{
seq_setwidth(seq, 127);
if (v == SEQ_START_TOKEN)
seq_puts(seq, " sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout "
"inode ref pointer drops");
else {
struct udp_iter_state *state = seq->private;
udp4_format_sock(v, seq, state->bucket);
}
seq_pad(seq, '\n');
return 0;
}
static const struct file_operations udp_afinfo_seq_fops = {
.owner = THIS_MODULE,
.open = udp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net
};
/* ------------------------------------------------------------------------ */
static struct udp_seq_afinfo udp4_seq_afinfo = {
.name = "udp",
.family = AF_INET,
.udp_table = &udp_table,
.seq_fops = &udp_afinfo_seq_fops,
.seq_ops = {
.show = udp4_seq_show,
},
};
static int __net_init udp4_proc_init_net(struct net *net)
{
return udp_proc_register(net, &udp4_seq_afinfo);
}
static void __net_exit udp4_proc_exit_net(struct net *net)
{
udp_proc_unregister(net, &udp4_seq_afinfo);
}
static struct pernet_operations udp4_net_ops = {
.init = udp4_proc_init_net,
.exit = udp4_proc_exit_net,
};
int __init udp4_proc_init(void)
{
return register_pernet_subsys(&udp4_net_ops);
}
void udp4_proc_exit(void)
{
unregister_pernet_subsys(&udp4_net_ops);
}
#endif /* CONFIG_PROC_FS */
static __initdata unsigned long uhash_entries;
static int __init set_uhash_entries(char *str)
{
ssize_t ret;
if (!str)
return 0;
ret = kstrtoul(str, 0, &uhash_entries);
if (ret)
return 0;
if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
uhash_entries = UDP_HTABLE_SIZE_MIN;
return 1;
}
__setup("uhash_entries=", set_uhash_entries);
void __init udp_table_init(struct udp_table *table, const char *name)
{
unsigned int i;
table->hash = alloc_large_system_hash(name,
2 * sizeof(struct udp_hslot),
uhash_entries,
21, /* one slot per 2 MB */
0,
&table->log,
&table->mask,
UDP_HTABLE_SIZE_MIN,
64 * 1024);
table->hash2 = table->hash + (table->mask + 1);
for (i = 0; i <= table->mask; i++) {
INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
table->hash[i].count = 0;
spin_lock_init(&table->hash[i].lock);
}
for (i = 0; i <= table->mask; i++) {
INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i);
table->hash2[i].count = 0;
spin_lock_init(&table->hash2[i].lock);
}
}
u32 udp_flow_hashrnd(void)
{
static u32 hashrnd __read_mostly;
net_get_random_once(&hashrnd, sizeof(hashrnd));
return hashrnd;
}
EXPORT_SYMBOL(udp_flow_hashrnd);
void __init udp_init(void)
{
unsigned long limit;
udp_table_init(&udp_table, "UDP");
limit = nr_free_buffer_pages() / 8;
limit = max(limit, 128UL);
sysctl_udp_mem[0] = limit / 4 * 3;
sysctl_udp_mem[1] = limit;
sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
sysctl_udp_rmem_min = SK_MEM_QUANTUM;
sysctl_udp_wmem_min = SK_MEM_QUANTUM;
}
| ./CrossVul/dataset_final_sorted/CWE-358/c/bad_4847_0 |
crossvul-cpp_data_good_3239_0 | /* Copyright (C) 2007-2012 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include "suricata-common.h"
#include "conf.h"
#include "defrag-hash.h"
#include "defrag-queue.h"
#include "defrag-config.h"
#include "util-random.h"
#include "util-byte.h"
#include "util-misc.h"
#include "util-hash-lookup3.h"
static DefragTracker *DefragTrackerGetUsedDefragTracker(void);
/** queue with spare tracker */
static DefragTrackerQueue defragtracker_spare_q;
uint32_t DefragTrackerSpareQueueGetSize(void)
{
return DefragTrackerQueueLen(&defragtracker_spare_q);
}
void DefragTrackerMoveToSpare(DefragTracker *h)
{
DefragTrackerEnqueue(&defragtracker_spare_q, h);
(void) SC_ATOMIC_SUB(defragtracker_counter, 1);
}
DefragTracker *DefragTrackerAlloc(void)
{
if (!(DEFRAG_CHECK_MEMCAP(sizeof(DefragTracker)))) {
return NULL;
}
(void) SC_ATOMIC_ADD(defrag_memuse, sizeof(DefragTracker));
DefragTracker *dt = SCMalloc(sizeof(DefragTracker));
if (unlikely(dt == NULL))
goto error;
memset(dt, 0x00, sizeof(DefragTracker));
SCMutexInit(&dt->lock, NULL);
SC_ATOMIC_INIT(dt->use_cnt);
return dt;
error:
return NULL;
}
void DefragTrackerFree(DefragTracker *dt)
{
if (dt != NULL) {
DefragTrackerClearMemory(dt);
SCMutexDestroy(&dt->lock);
SCFree(dt);
(void) SC_ATOMIC_SUB(defrag_memuse, sizeof(DefragTracker));
}
}
#define DefragTrackerIncrUsecnt(dt) \
SC_ATOMIC_ADD((dt)->use_cnt, 1)
#define DefragTrackerDecrUsecnt(dt) \
SC_ATOMIC_SUB((dt)->use_cnt, 1)
static void DefragTrackerInit(DefragTracker *dt, Packet *p)
{
/* copy address */
COPY_ADDRESS(&p->src, &dt->src_addr);
COPY_ADDRESS(&p->dst, &dt->dst_addr);
if (PKT_IS_IPV4(p)) {
dt->id = (int32_t)IPV4_GET_IPID(p);
dt->af = AF_INET;
} else {
dt->id = (int32_t)IPV6_EXTHDR_GET_FH_ID(p);
dt->af = AF_INET6;
}
dt->proto = IP_GET_IPPROTO(p);
dt->vlan_id[0] = p->vlan_id[0];
dt->vlan_id[1] = p->vlan_id[1];
dt->policy = DefragGetOsPolicy(p);
dt->host_timeout = DefragPolicyGetHostTimeout(p);
dt->remove = 0;
dt->seen_last = 0;
TAILQ_INIT(&dt->frags);
(void) DefragTrackerIncrUsecnt(dt);
}
void DefragTrackerRelease(DefragTracker *t)
{
(void) DefragTrackerDecrUsecnt(t);
SCMutexUnlock(&t->lock);
}
void DefragTrackerClearMemory(DefragTracker *dt)
{
DefragTrackerFreeFrags(dt);
SC_ATOMIC_DESTROY(dt->use_cnt);
}
#define DEFRAG_DEFAULT_HASHSIZE 4096
#define DEFRAG_DEFAULT_MEMCAP 16777216
#define DEFRAG_DEFAULT_PREALLOC 1000
/** \brief initialize the configuration
* \warning Not thread safe */
void DefragInitConfig(char quiet)
{
SCLogDebug("initializing defrag engine...");
memset(&defrag_config, 0, sizeof(defrag_config));
//SC_ATOMIC_INIT(flow_flags);
SC_ATOMIC_INIT(defragtracker_counter);
SC_ATOMIC_INIT(defrag_memuse);
SC_ATOMIC_INIT(defragtracker_prune_idx);
DefragTrackerQueueInit(&defragtracker_spare_q);
#ifndef AFLFUZZ_NO_RANDOM
unsigned int seed = RandomTimePreseed();
/* set defaults */
defrag_config.hash_rand = (int)(DEFRAG_DEFAULT_HASHSIZE * (rand_r(&seed) / RAND_MAX + 1.0));
#endif
defrag_config.hash_size = DEFRAG_DEFAULT_HASHSIZE;
defrag_config.memcap = DEFRAG_DEFAULT_MEMCAP;
defrag_config.prealloc = DEFRAG_DEFAULT_PREALLOC;
/* Check if we have memcap and hash_size defined at config */
char *conf_val;
uint32_t configval = 0;
/** set config values for memcap, prealloc and hash_size */
if ((ConfGet("defrag.memcap", &conf_val)) == 1)
{
if (ParseSizeStringU64(conf_val, &defrag_config.memcap) < 0) {
SCLogError(SC_ERR_SIZE_PARSE, "Error parsing defrag.memcap "
"from conf file - %s. Killing engine",
conf_val);
exit(EXIT_FAILURE);
}
}
if ((ConfGet("defrag.hash-size", &conf_val)) == 1)
{
if (ByteExtractStringUint32(&configval, 10, strlen(conf_val),
conf_val) > 0) {
defrag_config.hash_size = configval;
} else {
WarnInvalidConfEntry("defrag.hash-size", "%"PRIu32, defrag_config.hash_size);
}
}
if ((ConfGet("defrag.trackers", &conf_val)) == 1)
{
if (ByteExtractStringUint32(&configval, 10, strlen(conf_val),
conf_val) > 0) {
defrag_config.prealloc = configval;
} else {
WarnInvalidConfEntry("defrag.trackers", "%"PRIu32, defrag_config.prealloc);
}
}
SCLogDebug("DefragTracker config from suricata.yaml: memcap: %"PRIu64", hash-size: "
"%"PRIu32", prealloc: %"PRIu32, defrag_config.memcap,
defrag_config.hash_size, defrag_config.prealloc);
/* alloc hash memory */
uint64_t hash_size = defrag_config.hash_size * sizeof(DefragTrackerHashRow);
if (!(DEFRAG_CHECK_MEMCAP(hash_size))) {
SCLogError(SC_ERR_DEFRAG_INIT, "allocating defrag hash failed: "
"max defrag memcap is smaller than projected hash size. "
"Memcap: %"PRIu64", Hash table size %"PRIu64". Calculate "
"total hash size by multiplying \"defrag.hash-size\" with %"PRIuMAX", "
"which is the hash bucket size.", defrag_config.memcap, hash_size,
(uintmax_t)sizeof(DefragTrackerHashRow));
exit(EXIT_FAILURE);
}
defragtracker_hash = SCCalloc(defrag_config.hash_size, sizeof(DefragTrackerHashRow));
if (unlikely(defragtracker_hash == NULL)) {
SCLogError(SC_ERR_FATAL, "Fatal error encountered in DefragTrackerInitConfig. Exiting...");
exit(EXIT_FAILURE);
}
memset(defragtracker_hash, 0, defrag_config.hash_size * sizeof(DefragTrackerHashRow));
uint32_t i = 0;
for (i = 0; i < defrag_config.hash_size; i++) {
DRLOCK_INIT(&defragtracker_hash[i]);
}
(void) SC_ATOMIC_ADD(defrag_memuse, (defrag_config.hash_size * sizeof(DefragTrackerHashRow)));
if (quiet == FALSE) {
SCLogConfig("allocated %llu bytes of memory for the defrag hash... "
"%" PRIu32 " buckets of size %" PRIuMAX "",
SC_ATOMIC_GET(defrag_memuse), defrag_config.hash_size,
(uintmax_t)sizeof(DefragTrackerHashRow));
}
if ((ConfGet("defrag.prealloc", &conf_val)) == 1)
{
if (ConfValIsTrue(conf_val)) {
/* pre allocate defrag trackers */
for (i = 0; i < defrag_config.prealloc; i++) {
if (!(DEFRAG_CHECK_MEMCAP(sizeof(DefragTracker)))) {
SCLogError(SC_ERR_DEFRAG_INIT, "preallocating defrag trackers failed: "
"max defrag memcap reached. Memcap %"PRIu64", "
"Memuse %"PRIu64".", defrag_config.memcap,
((uint64_t)SC_ATOMIC_GET(defrag_memuse) + (uint64_t)sizeof(DefragTracker)));
exit(EXIT_FAILURE);
}
DefragTracker *h = DefragTrackerAlloc();
if (h == NULL) {
SCLogError(SC_ERR_DEFRAG_INIT, "preallocating defrag failed: %s", strerror(errno));
exit(EXIT_FAILURE);
}
DefragTrackerEnqueue(&defragtracker_spare_q,h);
}
if (quiet == FALSE) {
SCLogConfig("preallocated %" PRIu32 " defrag trackers of size %" PRIuMAX "",
defragtracker_spare_q.len, (uintmax_t)sizeof(DefragTracker));
}
}
}
if (quiet == FALSE) {
SCLogConfig("defrag memory usage: %llu bytes, maximum: %"PRIu64,
SC_ATOMIC_GET(defrag_memuse), defrag_config.memcap);
}
return;
}
/** \brief print some defrag stats
* \warning Not thread safe */
static void DefragTrackerPrintStats (void)
{
}
/** \brief shutdown the flow engine
* \warning Not thread safe */
void DefragHashShutdown(void)
{
DefragTracker *dt;
uint32_t u;
DefragTrackerPrintStats();
/* free spare queue */
while((dt = DefragTrackerDequeue(&defragtracker_spare_q))) {
BUG_ON(SC_ATOMIC_GET(dt->use_cnt) > 0);
DefragTrackerFree(dt);
}
/* clear and free the hash */
if (defragtracker_hash != NULL) {
for (u = 0; u < defrag_config.hash_size; u++) {
dt = defragtracker_hash[u].head;
while (dt) {
DefragTracker *n = dt->hnext;
DefragTrackerClearMemory(dt);
DefragTrackerFree(dt);
dt = n;
}
DRLOCK_DESTROY(&defragtracker_hash[u]);
}
SCFree(defragtracker_hash);
defragtracker_hash = NULL;
}
(void) SC_ATOMIC_SUB(defrag_memuse, defrag_config.hash_size * sizeof(DefragTrackerHashRow));
DefragTrackerQueueDestroy(&defragtracker_spare_q);
SC_ATOMIC_DESTROY(defragtracker_prune_idx);
SC_ATOMIC_DESTROY(defrag_memuse);
SC_ATOMIC_DESTROY(defragtracker_counter);
//SC_ATOMIC_DESTROY(flow_flags);
return;
}
/** \brief compare two raw ipv6 addrs
*
* \note we don't care about the real ipv6 ip's, this is just
* to consistently fill the DefragHashKey6 struct, without all
* the ntohl calls.
*
* \warning do not use elsewhere unless you know what you're doing.
* detect-engine-address-ipv6.c's AddressIPv6GtU32 is likely
* what you are looking for.
*/
static inline int DefragHashRawAddressIPv6GtU32(uint32_t *a, uint32_t *b)
{
int i;
for (i = 0; i < 4; i++) {
if (a[i] > b[i])
return 1;
if (a[i] < b[i])
break;
}
return 0;
}
typedef struct DefragHashKey4_ {
union {
struct {
uint32_t src, dst;
uint32_t id;
uint16_t vlan_id[2];
};
uint32_t u32[4];
};
} DefragHashKey4;
typedef struct DefragHashKey6_ {
union {
struct {
uint32_t src[4], dst[4];
uint32_t id;
uint16_t vlan_id[2];
};
uint32_t u32[10];
};
} DefragHashKey6;
/* calculate the hash key for this packet
*
* we're using:
* hash_rand -- set at init time
* source address
* destination address
* id
* vlan_id
*/
static inline uint32_t DefragHashGetKey(Packet *p)
{
uint32_t key;
if (p->ip4h != NULL) {
DefragHashKey4 dhk;
if (p->src.addr_data32[0] > p->dst.addr_data32[0]) {
dhk.src = p->src.addr_data32[0];
dhk.dst = p->dst.addr_data32[0];
} else {
dhk.src = p->dst.addr_data32[0];
dhk.dst = p->src.addr_data32[0];
}
dhk.id = (uint32_t)IPV4_GET_IPID(p);
dhk.vlan_id[0] = p->vlan_id[0];
dhk.vlan_id[1] = p->vlan_id[1];
uint32_t hash = hashword(dhk.u32, 4, defrag_config.hash_rand);
key = hash % defrag_config.hash_size;
} else if (p->ip6h != NULL) {
DefragHashKey6 dhk;
if (DefragHashRawAddressIPv6GtU32(p->src.addr_data32, p->dst.addr_data32)) {
dhk.src[0] = p->src.addr_data32[0];
dhk.src[1] = p->src.addr_data32[1];
dhk.src[2] = p->src.addr_data32[2];
dhk.src[3] = p->src.addr_data32[3];
dhk.dst[0] = p->dst.addr_data32[0];
dhk.dst[1] = p->dst.addr_data32[1];
dhk.dst[2] = p->dst.addr_data32[2];
dhk.dst[3] = p->dst.addr_data32[3];
} else {
dhk.src[0] = p->dst.addr_data32[0];
dhk.src[1] = p->dst.addr_data32[1];
dhk.src[2] = p->dst.addr_data32[2];
dhk.src[3] = p->dst.addr_data32[3];
dhk.dst[0] = p->src.addr_data32[0];
dhk.dst[1] = p->src.addr_data32[1];
dhk.dst[2] = p->src.addr_data32[2];
dhk.dst[3] = p->src.addr_data32[3];
}
dhk.id = IPV6_EXTHDR_GET_FH_ID(p);
dhk.vlan_id[0] = p->vlan_id[0];
dhk.vlan_id[1] = p->vlan_id[1];
uint32_t hash = hashword(dhk.u32, 10, defrag_config.hash_rand);
key = hash % defrag_config.hash_size;
} else
key = 0;
return key;
}
/* Since two or more trackers can have the same hash key, we need to compare
* the tracker with the current tracker key. */
#define CMP_DEFRAGTRACKER(d1,d2,id) \
(((CMP_ADDR(&(d1)->src_addr, &(d2)->src) && \
CMP_ADDR(&(d1)->dst_addr, &(d2)->dst)) || \
(CMP_ADDR(&(d1)->src_addr, &(d2)->dst) && \
CMP_ADDR(&(d1)->dst_addr, &(d2)->src))) && \
(d1)->proto == IP_GET_IPPROTO(p) && \
(d1)->id == (id) && \
(d1)->vlan_id[0] == (d2)->vlan_id[0] && \
(d1)->vlan_id[1] == (d2)->vlan_id[1])
static inline int DefragTrackerCompare(DefragTracker *t, Packet *p)
{
uint32_t id;
if (PKT_IS_IPV4(p)) {
id = (uint32_t)IPV4_GET_IPID(p);
} else {
id = IPV6_EXTHDR_GET_FH_ID(p);
}
return CMP_DEFRAGTRACKER(t, p, id);
}
/**
* \brief Get a new defrag tracker
*
* Get a new defrag tracker. We're checking memcap first and will try to make room
* if the memcap is reached.
*
* \retval dt *LOCKED* tracker on succes, NULL on error.
*/
static DefragTracker *DefragTrackerGetNew(Packet *p)
{
DefragTracker *dt = NULL;
/* get a tracker from the spare queue */
dt = DefragTrackerDequeue(&defragtracker_spare_q);
if (dt == NULL) {
/* If we reached the max memcap, we get a used tracker */
if (!(DEFRAG_CHECK_MEMCAP(sizeof(DefragTracker)))) {
/* declare state of emergency */
//if (!(SC_ATOMIC_GET(defragtracker_flags) & DEFRAG_EMERGENCY)) {
// SC_ATOMIC_OR(defragtracker_flags, DEFRAG_EMERGENCY);
/* under high load, waking up the flow mgr each time leads
* to high cpu usage. Flows are not timed out much faster if
* we check a 1000 times a second. */
// FlowWakeupFlowManagerThread();
//}
dt = DefragTrackerGetUsedDefragTracker();
if (dt == NULL) {
return NULL;
}
/* freed a tracker, but it's unlocked */
} else {
/* now see if we can alloc a new tracker */
dt = DefragTrackerAlloc();
if (dt == NULL) {
return NULL;
}
/* tracker is initialized but *unlocked* */
}
} else {
/* tracker has been recycled before it went into the spare queue */
/* tracker is initialized (recylced) but *unlocked* */
}
(void) SC_ATOMIC_ADD(defragtracker_counter, 1);
SCMutexLock(&dt->lock);
return dt;
}
/* DefragGetTrackerFromHash
*
* Hash retrieval function for trackers. Looks up the hash bucket containing the
* tracker pointer. Then compares the packet with the found tracker to see if it is
* the tracker we need. If it isn't, walk the list until the right tracker is found.
*
* returns a *LOCKED* tracker or NULL
*/
DefragTracker *DefragGetTrackerFromHash (Packet *p)
{
DefragTracker *dt = NULL;
/* get the key to our bucket */
uint32_t key = DefragHashGetKey(p);
/* get our hash bucket and lock it */
DefragTrackerHashRow *hb = &defragtracker_hash[key];
DRLOCK_LOCK(hb);
/* see if the bucket already has a tracker */
if (hb->head == NULL) {
dt = DefragTrackerGetNew(p);
if (dt == NULL) {
DRLOCK_UNLOCK(hb);
return NULL;
}
/* tracker is locked */
hb->head = dt;
hb->tail = dt;
/* got one, now lock, initialize and return */
DefragTrackerInit(dt,p);
DRLOCK_UNLOCK(hb);
return dt;
}
/* ok, we have a tracker in the bucket. Let's find out if it is our tracker */
dt = hb->head;
/* see if this is the tracker we are looking for */
if (dt->remove || DefragTrackerCompare(dt, p) == 0) {
DefragTracker *pdt = NULL; /* previous tracker */
while (dt) {
pdt = dt;
dt = dt->hnext;
if (dt == NULL) {
dt = pdt->hnext = DefragTrackerGetNew(p);
if (dt == NULL) {
DRLOCK_UNLOCK(hb);
return NULL;
}
hb->tail = dt;
/* tracker is locked */
dt->hprev = pdt;
/* initialize and return */
DefragTrackerInit(dt,p);
DRLOCK_UNLOCK(hb);
return dt;
}
if (DefragTrackerCompare(dt, p) != 0) {
/* we found our tracker, lets put it on top of the
* hash list -- this rewards active trackers */
if (dt->hnext) {
dt->hnext->hprev = dt->hprev;
}
if (dt->hprev) {
dt->hprev->hnext = dt->hnext;
}
if (dt == hb->tail) {
hb->tail = dt->hprev;
}
dt->hnext = hb->head;
dt->hprev = NULL;
hb->head->hprev = dt;
hb->head = dt;
/* found our tracker, lock & return */
SCMutexLock(&dt->lock);
(void) DefragTrackerIncrUsecnt(dt);
DRLOCK_UNLOCK(hb);
return dt;
}
}
}
/* lock & return */
SCMutexLock(&dt->lock);
(void) DefragTrackerIncrUsecnt(dt);
DRLOCK_UNLOCK(hb);
return dt;
}
/** \brief look up a tracker in the hash
*
* \param a address to look up
*
* \retval h *LOCKED* tracker or NULL
*/
DefragTracker *DefragLookupTrackerFromHash (Packet *p)
{
DefragTracker *dt = NULL;
/* get the key to our bucket */
uint32_t key = DefragHashGetKey(p);
/* get our hash bucket and lock it */
DefragTrackerHashRow *hb = &defragtracker_hash[key];
DRLOCK_LOCK(hb);
/* see if the bucket already has a tracker */
if (hb->head == NULL) {
DRLOCK_UNLOCK(hb);
return dt;
}
/* ok, we have a tracker in the bucket. Let's find out if it is our tracker */
dt = hb->head;
/* see if this is the tracker we are looking for */
if (DefragTrackerCompare(dt, p) == 0) {
while (dt) {
dt = dt->hnext;
if (dt == NULL) {
DRLOCK_UNLOCK(hb);
return dt;
}
if (DefragTrackerCompare(dt, p) != 0) {
/* we found our tracker, lets put it on top of the
* hash list -- this rewards active tracker */
if (dt->hnext) {
dt->hnext->hprev = dt->hprev;
}
if (dt->hprev) {
dt->hprev->hnext = dt->hnext;
}
if (dt == hb->tail) {
hb->tail = dt->hprev;
}
dt->hnext = hb->head;
dt->hprev = NULL;
hb->head->hprev = dt;
hb->head = dt;
/* found our tracker, lock & return */
SCMutexLock(&dt->lock);
(void) DefragTrackerIncrUsecnt(dt);
DRLOCK_UNLOCK(hb);
return dt;
}
}
}
/* lock & return */
SCMutexLock(&dt->lock);
(void) DefragTrackerIncrUsecnt(dt);
DRLOCK_UNLOCK(hb);
return dt;
}
/** \internal
* \brief Get a tracker from the hash directly.
*
* Called in conditions where the spare queue is empty and memcap is reached.
*
* Walks the hash until a tracker can be freed. "defragtracker_prune_idx" atomic int makes
* sure we don't start at the top each time since that would clear the top of
* the hash leading to longer and longer search times under high pressure (observed).
*
* \retval dt tracker or NULL
*/
static DefragTracker *DefragTrackerGetUsedDefragTracker(void)
{
uint32_t idx = SC_ATOMIC_GET(defragtracker_prune_idx) % defrag_config.hash_size;
uint32_t cnt = defrag_config.hash_size;
while (cnt--) {
if (++idx >= defrag_config.hash_size)
idx = 0;
DefragTrackerHashRow *hb = &defragtracker_hash[idx];
if (DRLOCK_TRYLOCK(hb) != 0)
continue;
DefragTracker *dt = hb->tail;
if (dt == NULL) {
DRLOCK_UNLOCK(hb);
continue;
}
if (SCMutexTrylock(&dt->lock) != 0) {
DRLOCK_UNLOCK(hb);
continue;
}
/** never prune a tracker that is used by a packets
* we are currently processing in one of the threads */
if (SC_ATOMIC_GET(dt->use_cnt) > 0) {
DRLOCK_UNLOCK(hb);
SCMutexUnlock(&dt->lock);
continue;
}
/* remove from the hash */
if (dt->hprev != NULL)
dt->hprev->hnext = dt->hnext;
if (dt->hnext != NULL)
dt->hnext->hprev = dt->hprev;
if (hb->head == dt)
hb->head = dt->hnext;
if (hb->tail == dt)
hb->tail = dt->hprev;
dt->hnext = NULL;
dt->hprev = NULL;
DRLOCK_UNLOCK(hb);
DefragTrackerClearMemory(dt);
SCMutexUnlock(&dt->lock);
(void) SC_ATOMIC_ADD(defragtracker_prune_idx, (defrag_config.hash_size - cnt));
return dt;
}
return NULL;
}
| ./CrossVul/dataset_final_sorted/CWE-358/c/good_3239_0 |
crossvul-cpp_data_bad_4847_1 | /*
* UDP over IPv6
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Based on linux/ipv4/udp.c
*
* Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support
* YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
* Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
* a single port at the same time.
* Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
* YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <net/ndisc.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/raw.h>
#include <net/tcp_states.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/inet6_hashtables.h>
#include <net/busy_poll.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <trace/events/skb.h>
#include "udp_impl.h"
static u32 udp6_ehashfn(const struct net *net,
const struct in6_addr *laddr,
const u16 lport,
const struct in6_addr *faddr,
const __be16 fport)
{
static u32 udp6_ehash_secret __read_mostly;
static u32 udp_ipv6_hash_secret __read_mostly;
u32 lhash, fhash;
net_get_random_once(&udp6_ehash_secret,
sizeof(udp6_ehash_secret));
net_get_random_once(&udp_ipv6_hash_secret,
sizeof(udp_ipv6_hash_secret));
lhash = (__force u32)laddr->s6_addr32[3];
fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
return __inet6_ehashfn(lhash, lport, fhash, fport,
udp_ipv6_hash_secret + net_hash_mix(net));
}
int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
{
const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
int sk2_ipv6only = inet_v6_ipv6only(sk2);
int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
/* if both are mapped, treat as IPv4 */
if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
return (!sk2_ipv6only &&
(!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr ||
sk->sk_rcv_saddr == sk2->sk_rcv_saddr));
if (addr_type2 == IPV6_ADDR_ANY &&
!(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
return 1;
if (addr_type == IPV6_ADDR_ANY &&
!(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED))
return 1;
if (sk2_rcv_saddr6 &&
ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
return 1;
return 0;
}
static u32 udp6_portaddr_hash(const struct net *net,
const struct in6_addr *addr6,
unsigned int port)
{
unsigned int hash, mix = net_hash_mix(net);
if (ipv6_addr_any(addr6))
hash = jhash_1word(0, mix);
else if (ipv6_addr_v4mapped(addr6))
hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
else
hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
return hash ^ port;
}
int udp_v6_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
unsigned int hash2_partial =
udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
/* precompute partial secondary hash */
udp_sk(sk)->udp_portaddr_hash = hash2_partial;
return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
}
static void udp_v6_rehash(struct sock *sk)
{
u16 new_hash = udp6_portaddr_hash(sock_net(sk),
&sk->sk_v6_rcv_saddr,
inet_sk(sk)->inet_num);
udp_lib_rehash(sk, new_hash);
}
static inline int compute_score(struct sock *sk, struct net *net,
unsigned short hnum,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif)
{
int score;
struct inet_sock *inet;
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
sk->sk_family != PF_INET6)
return -1;
score = 0;
inet = inet_sk(sk);
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score++;
}
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
return -1;
score++;
}
if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
return -1;
score++;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
return score;
}
static inline int compute_score2(struct sock *sk, struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr,
unsigned short hnum, int dif)
{
int score;
struct inet_sock *inet;
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
sk->sk_family != PF_INET6)
return -1;
if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
return -1;
score = 0;
inet = inet_sk(sk);
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score++;
}
if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
return -1;
score++;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
return score;
}
/* called with read_rcu_lock() */
static struct sock *udp6_lib_lookup2(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned int hnum, int dif,
struct udp_hslot *hslot2, unsigned int slot2)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
begin:
result = NULL;
badness = -1;
udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
score = compute_score2(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
}
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot2)
goto begin;
if (result) {
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score2(result, net, saddr, sport,
daddr, hnum, dif) < badness)) {
sock_put(result);
goto begin;
}
}
return result;
}
struct sock *__udp6_lib_lookup(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, struct udp_table *udptable)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(dport);
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
rcu_read_lock();
if (hslot->count > 10) {
hash2 = udp6_portaddr_hash(net, daddr, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
hslot2, slot2);
if (!result) {
hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
&in6addr_any, hnum, dif,
hslot2, slot2);
}
rcu_read_unlock();
return result;
}
begin:
result = NULL;
badness = -1;
sk_nulls_for_each_rcu(sk, node, &hslot->head) {
score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
}
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot)
goto begin;
if (result) {
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score(result, net, hnum, saddr, sport,
daddr, dport, dif) < badness)) {
sock_put(result);
goto begin;
}
}
rcu_read_unlock();
return result;
}
EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport,
struct udp_table *udptable)
{
struct sock *sk;
const struct ipv6hdr *iph = ipv6_hdr(skb);
sk = skb_steal_sock(skb);
if (unlikely(sk))
return sk;
return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport,
&iph->daddr, dport, inet6_iif(skb),
udptable);
}
struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport, int dif)
{
return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
}
EXPORT_SYMBOL_GPL(udp6_lib_lookup);
/*
* This should be easy, if there is something there we
* return it, otherwise we block.
*/
int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
unsigned int ulen, copied;
int peeked, off = 0;
int err;
int is_udplite = IS_UDPLITE(sk);
int is_udp4;
bool slow;
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len, addr_len);
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
&peeked, &off, &err);
if (!skb)
goto out;
ulen = skb->len - sizeof(struct udphdr);
copied = len;
if (copied > ulen)
copied = ulen;
else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
is_udp4 = (skb->protocol == htons(ETH_P_IP));
/*
* If checksum is needed at all, try to do it while copying the
* data. If the data is truncated, or if we only want a partial
* coverage checksum (UDP-Lite), do it before the copy.
*/
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
msg, copied);
else {
err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg);
if (err == -EINVAL)
goto csum_copy_err;
}
if (unlikely(err)) {
trace_kfree_skb(skb, udpv6_recvmsg);
if (!peeked) {
atomic_inc(&sk->sk_drops);
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS,
is_udplite);
else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS,
is_udplite);
}
goto out_free;
}
if (!peeked) {
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
}
sock_recv_ts_and_drops(msg, sk, skb);
/* Copy the address. */
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
sin6->sin6_family = AF_INET6;
sin6->sin6_port = udp_hdr(skb)->source;
sin6->sin6_flowinfo = 0;
if (is_udp4) {
ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
&sin6->sin6_addr);
sin6->sin6_scope_id = 0;
} else {
sin6->sin6_addr = ipv6_hdr(skb)->saddr;
sin6->sin6_scope_id =
ipv6_iface_scope_id(&sin6->sin6_addr,
inet6_iif(skb));
}
*addr_len = sizeof(*sin6);
}
if (np->rxopt.all)
ip6_datagram_recv_common_ctl(sk, msg, skb);
if (is_udp4) {
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
} else {
if (np->rxopt.all)
ip6_datagram_recv_specific_ctl(sk, msg, skb);
}
err = copied;
if (flags & MSG_TRUNC)
err = ulen;
out_free:
skb_free_datagram_locked(sk, skb);
out:
return err;
csum_copy_err:
slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) {
if (is_udp4) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite);
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
} else {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite);
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
}
unlock_sock_fast(sk, slow);
/* starting over for a new packet, but check if we need to yield */
cond_resched();
msg->msg_flags &= ~MSG_TRUNC;
goto try_again;
}
void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info,
struct udp_table *udptable)
{
struct ipv6_pinfo *np;
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct in6_addr *saddr = &hdr->saddr;
const struct in6_addr *daddr = &hdr->daddr;
struct udphdr *uh = (struct udphdr *)(skb->data+offset);
struct sock *sk;
int err;
struct net *net = dev_net(skb->dev);
sk = __udp6_lib_lookup(net, daddr, uh->dest,
saddr, uh->source, inet6_iif(skb), udptable);
if (!sk) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
return;
}
if (type == ICMPV6_PKT_TOOBIG) {
if (!ip6_sk_accept_pmtu(sk))
goto out;
ip6_sk_update_pmtu(skb, sk, info);
}
if (type == NDISC_REDIRECT) {
ip6_sk_redirect(skb, sk);
goto out;
}
np = inet6_sk(sk);
if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
goto out;
if (sk->sk_state != TCP_ESTABLISHED && !np->recverr)
goto out;
if (np->recverr)
ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
sk->sk_err = err;
sk->sk_error_report(sk);
out:
sock_put(sk);
}
static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
sk_incoming_cpu_update(sk);
}
rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
/* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM)
UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_RCVBUFERRORS, is_udplite);
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
kfree_skb(skb);
return -1;
}
return 0;
}
static __inline__ void udpv6_err(struct sk_buff *skb,
struct inet6_skb_parm *opt, u8 type,
u8 code, int offset, __be32 info)
{
__udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
}
static struct static_key udpv6_encap_needed __read_mostly;
void udpv6_encap_enable(void)
{
if (!static_key_enabled(&udpv6_encap_needed))
static_key_slow_inc(&udpv6_encap_needed);
}
EXPORT_SYMBOL(udpv6_encap_enable);
int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int rc;
int is_udplite = IS_UDPLITE(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
/*
* This is an encapsulation socket so pass the skb to
* the socket's udp_encap_rcv() hook. Otherwise, just
* fall through and pass this up the UDP socket.
* up->encap_rcv() returns the following value:
* =0 if skb was successfully passed to the encap
* handler or was discarded by it.
* >0 if skb should be passed on to UDP.
* <0 if skb should be resubmitted as proto -N
*/
/* if we're overly short, let UDP handle it */
encap_rcv = ACCESS_ONCE(up->encap_rcv);
if (skb->len > sizeof(struct udphdr) && encap_rcv) {
int ret;
/* Verify checksum before giving to encap */
if (udp_lib_checksum_complete(skb))
goto csum_error;
ret = encap_rcv(sk, skb);
if (ret <= 0) {
UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INDATAGRAMS,
is_udplite);
return -ret;
}
}
/* FALLTHROUGH -- it's a UDP Packet */
}
/*
* UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
*/
if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
if (up->pcrlen == 0) { /* full coverage was set */
net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
UDP_SKB_CB(skb)->cscov, skb->len);
goto drop;
}
if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
UDP_SKB_CB(skb)->cscov, up->pcrlen);
goto drop;
}
}
if (rcu_access_pointer(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto csum_error;
}
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_RCVBUFERRORS, is_udplite);
goto drop;
}
skb_dst_drop(skb);
bh_lock_sock(sk);
rc = 0;
if (!sock_owned_by_user(sk))
rc = __udpv6_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk);
goto drop;
}
bh_unlock_sock(sk);
return rc;
csum_error:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return -1;
}
static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
__be16 loc_port, const struct in6_addr *loc_addr,
__be16 rmt_port, const struct in6_addr *rmt_addr,
int dif, unsigned short hnum)
{
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net))
return false;
if (udp_sk(sk)->udp_port_hash != hnum ||
sk->sk_family != PF_INET6 ||
(inet->inet_dport && inet->inet_dport != rmt_port) ||
(!ipv6_addr_any(&sk->sk_v6_daddr) &&
!ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
(!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
return false;
if (!inet6_mc_check(sk, loc_addr, rmt_addr))
return false;
return true;
}
static void flush_stack(struct sock **stack, unsigned int count,
struct sk_buff *skb, unsigned int final)
{
struct sk_buff *skb1 = NULL;
struct sock *sk;
unsigned int i;
for (i = 0; i < count; i++) {
sk = stack[i];
if (likely(!skb1))
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
atomic_inc(&sk->sk_drops);
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
}
if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
skb1 = NULL;
sock_put(sk);
}
if (unlikely(skb1))
kfree_skb(skb1);
}
static void udp6_csum_zero_error(struct sk_buff *skb)
{
/* RFC 2460 section 8.1 says that we SHOULD log
* this error. Well, it is reasonable.
*/
net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
&ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
&ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
}
/*
* Note: called only from the BH handler context,
* so we don't need to lock the hashes.
*/
static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
const struct in6_addr *saddr, const struct in6_addr *daddr,
struct udp_table *udptable, int proto)
{
struct sock *sk, *stack[256 / sizeof(struct sock *)];
const struct udphdr *uh = udp_hdr(skb);
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(uh->dest);
struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
int dif = inet6_iif(skb);
unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
bool inner_flushed = false;
if (use_hash2) {
hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
udp_table.mask;
hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask;
start_lookup:
hslot = &udp_table.hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
spin_lock(&hslot->lock);
sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
if (__udp_v6_is_mcast_sock(net, sk,
uh->dest, daddr,
uh->source, saddr,
dif, hnum) &&
/* If zero checksum and no_check is not on for
* the socket then skip it.
*/
(uh->check || udp_sk(sk)->no_check6_rx)) {
if (unlikely(count == ARRAY_SIZE(stack))) {
flush_stack(stack, count, skb, ~0);
inner_flushed = true;
count = 0;
}
stack[count++] = sk;
sock_hold(sk);
}
}
spin_unlock(&hslot->lock);
/* Also lookup *:port if we are using hash2 and haven't done so yet. */
if (use_hash2 && hash2 != hash2_any) {
hash2 = hash2_any;
goto start_lookup;
}
if (count) {
flush_stack(stack, count, skb, count - 1);
} else {
if (!inner_flushed)
UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
proto == IPPROTO_UDPLITE);
consume_skb(skb);
}
return 0;
}
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
struct net *net = dev_net(skb->dev);
struct sock *sk;
struct udphdr *uh;
const struct in6_addr *saddr, *daddr;
u32 ulen = 0;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto discard;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
ulen = ntohs(uh->len);
if (ulen > skb->len)
goto short_packet;
if (proto == IPPROTO_UDP) {
/* UDP validates ulen. */
/* Check for jumbo payload */
if (ulen == 0)
ulen = skb->len;
if (ulen < sizeof(*uh))
goto short_packet;
if (ulen < skb->len) {
if (pskb_trim_rcsum(skb, ulen))
goto short_packet;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
}
}
if (udp6_csum_init(skb, uh, proto))
goto csum_error;
/*
* Multicast receive code
*/
if (ipv6_addr_is_multicast(daddr))
return __udp6_lib_mcast_deliver(net, skb,
saddr, daddr, udptable, proto);
/* Unicast */
/*
* check socket cache ... must talk to Alan about his plans
* for sock caches... i'll skip this for now.
*/
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk) {
int ret;
if (!uh->check && !udp_sk(sk)->no_check6_rx) {
sock_put(sk);
udp6_csum_zero_error(skb);
goto csum_error;
}
if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
ip6_compute_pseudo);
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
if (ret > 0)
return -ret;
return 0;
}
if (!uh->check) {
udp6_csum_zero_error(skb);
goto csum_error;
}
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
if (udp_lib_checksum_complete(skb))
goto csum_error;
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
kfree_skb(skb);
return 0;
short_packet:
net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
proto == IPPROTO_UDPLITE ? "-Lite" : "",
saddr, ntohs(uh->source),
ulen, skb->len,
daddr, ntohs(uh->dest));
goto discard;
csum_error:
UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
discard:
UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
kfree_skb(skb);
return 0;
}
static __inline__ int udpv6_rcv(struct sk_buff *skb)
{
return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
}
/*
* Throw away all pending data and cancel the corking. Socket is locked.
*/
static void udp_v6_flush_pending_frames(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
if (up->pending == AF_INET)
udp_flush_pending_frames(sk);
else if (up->pending) {
up->len = 0;
up->pending = 0;
ip6_flush_pending_frames(sk);
}
}
/**
* udp6_hwcsum_outgoing - handle outgoing HW checksumming
* @sk: socket we are sending on
* @skb: sk_buff containing the filled-in UDP header
* (checksum field must be zeroed out)
*/
static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
const struct in6_addr *saddr,
const struct in6_addr *daddr, int len)
{
unsigned int offset;
struct udphdr *uh = udp_hdr(skb);
struct sk_buff *frags = skb_shinfo(skb)->frag_list;
__wsum csum = 0;
if (!frags) {
/* Only one fragment on the socket. */
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
} else {
/*
* HW-checksum won't work as there are two or more
* fragments on the socket so that all csums of sk_buffs
* should be together
*/
offset = skb_transport_offset(skb);
skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
skb->ip_summed = CHECKSUM_NONE;
do {
csum = csum_add(csum, frags->csum);
} while ((frags = frags->next));
uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
}
}
/*
* Sending
*/
static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6)
{
struct sock *sk = skb->sk;
struct udphdr *uh;
int err = 0;
int is_udplite = IS_UDPLITE(sk);
__wsum csum = 0;
int offset = skb_transport_offset(skb);
int len = skb->len - offset;
/*
* Create a UDP header
*/
uh = udp_hdr(skb);
uh->source = fl6->fl6_sport;
uh->dest = fl6->fl6_dport;
uh->len = htons(len);
uh->check = 0;
if (is_udplite)
csum = udplite_csum(skb);
else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
skb->ip_summed = CHECKSUM_NONE;
goto send;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
goto send;
} else
csum = udp_csum(skb);
/* add protocol-dependent pseudo-header */
uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
len, fl6->flowi6_proto, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
err = ip6_send_skb(skb);
if (err) {
if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0;
}
} else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite);
return err;
}
static int udp_v6_push_pending_frames(struct sock *sk)
{
struct sk_buff *skb;
struct udp_sock *up = udp_sk(sk);
struct flowi6 fl6;
int err = 0;
if (up->pending == AF_INET)
return udp_push_pending_frames(sk);
/* ip6_finish_skb will release the cork, so make a copy of
* fl6 here.
*/
fl6 = inet_sk(sk)->cork.fl.u.ip6;
skb = ip6_finish_skb(sk);
if (!skb)
goto out;
err = udp_v6_send_skb(skb, &fl6);
out:
up->len = 0;
up->pending = 0;
return err;
}
int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct ipv6_txoptions opt_space;
struct udp_sock *up = udp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
struct in6_addr *daddr, *final_p, final;
struct ipv6_txoptions *opt = NULL;
struct ipv6_txoptions *opt_to_free = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_len = msg->msg_namelen;
int ulen = len;
int hlimit = -1;
int tclass = -1;
int dontfrag = -1;
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int err;
int connected = 0;
int is_udplite = IS_UDPLITE(sk);
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
/* destination address check */
if (sin6) {
if (addr_len < offsetof(struct sockaddr, sa_data))
return -EINVAL;
switch (sin6->sin6_family) {
case AF_INET6:
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
daddr = &sin6->sin6_addr;
break;
case AF_INET:
goto do_udp_sendmsg;
case AF_UNSPEC:
msg->msg_name = sin6 = NULL;
msg->msg_namelen = addr_len = 0;
daddr = NULL;
break;
default:
return -EINVAL;
}
} else if (!up->pending) {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = &sk->sk_v6_daddr;
} else
daddr = NULL;
if (daddr) {
if (ipv6_addr_v4mapped(daddr)) {
struct sockaddr_in sin;
sin.sin_family = AF_INET;
sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
sin.sin_addr.s_addr = daddr->s6_addr32[3];
msg->msg_name = &sin;
msg->msg_namelen = sizeof(sin);
do_udp_sendmsg:
if (__ipv6_only_sock(sk))
return -ENETUNREACH;
return udp_sendmsg(sk, msg, len);
}
}
if (up->pending == AF_INET)
return udp_sendmsg(sk, msg, len);
/* Rough check on arithmetic overflow,
better check is made in ip6_append_data().
*/
if (len > INT_MAX - sizeof(struct udphdr))
return -EMSGSIZE;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
if (up->pending) {
/*
* There are pending frames.
* The socket lock must be held while it's corked.
*/
lock_sock(sk);
if (likely(up->pending)) {
if (unlikely(up->pending != AF_INET6)) {
release_sock(sk);
return -EAFNOSUPPORT;
}
dst = NULL;
goto do_append_data;
}
release_sock(sk);
}
ulen += sizeof(struct udphdr);
memset(&fl6, 0, sizeof(fl6));
if (sin6) {
if (sin6->sin6_port == 0)
return -EINVAL;
fl6.fl6_dport = sin6->sin6_port;
daddr = &sin6->sin6_addr;
if (np->sndflow) {
fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
}
}
/*
* Otherwise it will be difficult to maintain
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
daddr = &sk->sk_v6_daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
__ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
fl6.flowi6_oif = sin6->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
fl6.fl6_dport = inet->inet_dport;
daddr = &sk->sk_v6_daddr;
fl6.flowlabel = np->flow_label;
connected = 1;
}
if (!fl6.flowi6_oif)
fl6.flowi6_oif = sk->sk_bound_dev_if;
if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
fl6.flowi6_mark = sk->sk_mark;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(*opt);
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
&hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
}
if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
connected = 0;
}
if (!opt) {
opt = txopt_get(np);
opt_to_free = opt;
}
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = sk->sk_protocol;
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
fl6.fl6_sport = inet->inet_sport;
final_p = fl6_update_dst(&fl6, opt, &final);
if (final_p)
connected = 0;
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
fl6.flowi6_oif = np->mcast_oif;
connected = 0;
} else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
goto out;
}
if (hlimit < 0)
hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
if (tclass < 0)
tclass = np->tclass;
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
/* Lockless fast path for the non-corking case */
if (!corkreq) {
struct sk_buff *skb;
skb = ip6_make_skb(sk, getfrag, msg, ulen,
sizeof(struct udphdr), hlimit, tclass, opt,
&fl6, (struct rt6_info *)dst,
msg->msg_flags, dontfrag);
err = PTR_ERR(skb);
if (!IS_ERR_OR_NULL(skb))
err = udp_v6_send_skb(skb, &fl6);
goto release_dst;
}
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
/* ... which is an evident application bug. --ANK */
release_sock(sk);
net_dbg_ratelimited("udp cork app bug 2\n");
err = -EINVAL;
goto out;
}
up->pending = AF_INET6;
do_append_data:
if (dontfrag < 0)
dontfrag = np->dontfrag;
up->len += ulen;
err = ip6_append_data(sk, getfrag, msg, ulen,
sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
(struct rt6_info *)dst,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
if (err)
udp_v6_flush_pending_frames(sk);
else if (!corkreq)
err = udp_v6_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
up->pending = 0;
if (err > 0)
err = np->recverr ? net_xmit_errno(err) : 0;
release_sock(sk);
release_dst:
if (dst) {
if (connected) {
ip6_dst_store(sk, dst,
ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
&sk->sk_v6_daddr : NULL,
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
&np->saddr :
#endif
NULL);
} else {
dst_release(dst);
}
dst = NULL;
}
out:
dst_release(dst);
fl6_sock_release(flowlabel);
txopt_put(opt_to_free);
if (!err)
return len;
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
* we don't have a good statistic (IpOutDiscards but it can be too many
* things). We could add another new stat but at least for now that
* seems like overkill.
*/
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
}
return err;
do_confirm:
dst_confirm(dst);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto out;
}
void udpv6_destroy_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
lock_sock(sk);
udp_v6_flush_pending_frames(sk);
release_sock(sk);
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
encap_destroy = ACCESS_ONCE(up->encap_destroy);
if (encap_destroy)
encap_destroy(sk);
}
inet6_destroy_sock(sk);
}
/*
* Socket option code for UDP
*/
int udpv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_v6_push_pending_frames);
return ipv6_setsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_v6_push_pending_frames);
return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
}
#endif
int udpv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
}
#endif
static const struct inet6_protocol udpv6_protocol = {
.handler = udpv6_rcv,
.err_handler = udpv6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
int udp6_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
} else {
int bucket = ((struct udp_iter_state *)seq->private)->bucket;
struct inet_sock *inet = inet_sk(v);
__u16 srcp = ntohs(inet->inet_sport);
__u16 destp = ntohs(inet->inet_dport);
ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
}
return 0;
}
static const struct file_operations udp6_afinfo_seq_fops = {
.owner = THIS_MODULE,
.open = udp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net
};
static struct udp_seq_afinfo udp6_seq_afinfo = {
.name = "udp6",
.family = AF_INET6,
.udp_table = &udp_table,
.seq_fops = &udp6_afinfo_seq_fops,
.seq_ops = {
.show = udp6_seq_show,
},
};
int __net_init udp6_proc_init(struct net *net)
{
return udp_proc_register(net, &udp6_seq_afinfo);
}
void udp6_proc_exit(struct net *net)
{
udp_proc_unregister(net, &udp6_seq_afinfo);
}
#endif /* CONFIG_PROC_FS */
void udp_v6_clear_sk(struct sock *sk, int size)
{
struct inet_sock *inet = inet_sk(sk);
/* we do not want to clear pinet6 field, because of RCU lookups */
sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
memset(&inet->pinet6 + 1, 0, size);
}
/* ------------------------------------------------------------------------ */
struct proto udpv6_prot = {
.name = "UDPv6",
.owner = THIS_MODULE,
.close = udp_lib_close,
.connect = ip6_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = udpv6_destroy_sock,
.setsockopt = udpv6_setsockopt,
.getsockopt = udpv6_getsockopt,
.sendmsg = udpv6_sendmsg,
.recvmsg = udpv6_recvmsg,
.backlog_rcv = __udpv6_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.rehash = udp_v6_rehash,
.get_port = udp_v6_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
.sysctl_wmem = &sysctl_udp_wmem_min,
.sysctl_rmem = &sysctl_udp_rmem_min,
.obj_size = sizeof(struct udp6_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.h.udp_table = &udp_table,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
.clear_sk = udp_v6_clear_sk,
};
static struct inet_protosw udpv6_protosw = {
.type = SOCK_DGRAM,
.protocol = IPPROTO_UDP,
.prot = &udpv6_prot,
.ops = &inet6_dgram_ops,
.flags = INET_PROTOSW_PERMANENT,
};
int __init udpv6_init(void)
{
int ret;
ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
if (ret)
goto out;
ret = inet6_register_protosw(&udpv6_protosw);
if (ret)
goto out_udpv6_protocol;
out:
return ret;
out_udpv6_protocol:
inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
goto out;
}
void udpv6_exit(void)
{
inet6_unregister_protosw(&udpv6_protosw);
inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
}
| ./CrossVul/dataset_final_sorted/CWE-358/c/bad_4847_1 |
crossvul-cpp_data_good_4847_1 | /*
* UDP over IPv6
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Based on linux/ipv4/udp.c
*
* Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support
* YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
* Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
* a single port at the same time.
* Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
* YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <net/ndisc.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/raw.h>
#include <net/tcp_states.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/inet6_hashtables.h>
#include <net/busy_poll.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <trace/events/skb.h>
#include "udp_impl.h"
static u32 udp6_ehashfn(const struct net *net,
const struct in6_addr *laddr,
const u16 lport,
const struct in6_addr *faddr,
const __be16 fport)
{
static u32 udp6_ehash_secret __read_mostly;
static u32 udp_ipv6_hash_secret __read_mostly;
u32 lhash, fhash;
net_get_random_once(&udp6_ehash_secret,
sizeof(udp6_ehash_secret));
net_get_random_once(&udp_ipv6_hash_secret,
sizeof(udp_ipv6_hash_secret));
lhash = (__force u32)laddr->s6_addr32[3];
fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
return __inet6_ehashfn(lhash, lport, fhash, fport,
udp_ipv6_hash_secret + net_hash_mix(net));
}
int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
{
const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
int sk2_ipv6only = inet_v6_ipv6only(sk2);
int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
/* if both are mapped, treat as IPv4 */
if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
return (!sk2_ipv6only &&
(!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr ||
sk->sk_rcv_saddr == sk2->sk_rcv_saddr));
if (addr_type2 == IPV6_ADDR_ANY &&
!(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
return 1;
if (addr_type == IPV6_ADDR_ANY &&
!(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED))
return 1;
if (sk2_rcv_saddr6 &&
ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
return 1;
return 0;
}
static u32 udp6_portaddr_hash(const struct net *net,
const struct in6_addr *addr6,
unsigned int port)
{
unsigned int hash, mix = net_hash_mix(net);
if (ipv6_addr_any(addr6))
hash = jhash_1word(0, mix);
else if (ipv6_addr_v4mapped(addr6))
hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
else
hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
return hash ^ port;
}
int udp_v6_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
unsigned int hash2_partial =
udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
/* precompute partial secondary hash */
udp_sk(sk)->udp_portaddr_hash = hash2_partial;
return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
}
static void udp_v6_rehash(struct sock *sk)
{
u16 new_hash = udp6_portaddr_hash(sock_net(sk),
&sk->sk_v6_rcv_saddr,
inet_sk(sk)->inet_num);
udp_lib_rehash(sk, new_hash);
}
static inline int compute_score(struct sock *sk, struct net *net,
unsigned short hnum,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif)
{
int score;
struct inet_sock *inet;
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
sk->sk_family != PF_INET6)
return -1;
score = 0;
inet = inet_sk(sk);
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score++;
}
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
return -1;
score++;
}
if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
return -1;
score++;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
return score;
}
static inline int compute_score2(struct sock *sk, struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr,
unsigned short hnum, int dif)
{
int score;
struct inet_sock *inet;
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
sk->sk_family != PF_INET6)
return -1;
if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
return -1;
score = 0;
inet = inet_sk(sk);
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score++;
}
if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
return -1;
score++;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
return score;
}
/* called with read_rcu_lock() */
static struct sock *udp6_lib_lookup2(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned int hnum, int dif,
struct udp_hslot *hslot2, unsigned int slot2)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
begin:
result = NULL;
badness = -1;
udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
score = compute_score2(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
}
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot2)
goto begin;
if (result) {
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score2(result, net, saddr, sport,
daddr, hnum, dif) < badness)) {
sock_put(result);
goto begin;
}
}
return result;
}
struct sock *__udp6_lib_lookup(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, struct udp_table *udptable)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(dport);
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
rcu_read_lock();
if (hslot->count > 10) {
hash2 = udp6_portaddr_hash(net, daddr, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
hslot2, slot2);
if (!result) {
hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
&in6addr_any, hnum, dif,
hslot2, slot2);
}
rcu_read_unlock();
return result;
}
begin:
result = NULL;
badness = -1;
sk_nulls_for_each_rcu(sk, node, &hslot->head) {
score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
}
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot)
goto begin;
if (result) {
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score(result, net, hnum, saddr, sport,
daddr, dport, dif) < badness)) {
sock_put(result);
goto begin;
}
}
rcu_read_unlock();
return result;
}
EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport,
struct udp_table *udptable)
{
struct sock *sk;
const struct ipv6hdr *iph = ipv6_hdr(skb);
sk = skb_steal_sock(skb);
if (unlikely(sk))
return sk;
return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport,
&iph->daddr, dport, inet6_iif(skb),
udptable);
}
struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport, int dif)
{
return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
}
EXPORT_SYMBOL_GPL(udp6_lib_lookup);
/*
* This should be easy, if there is something there we
* return it, otherwise we block.
*/
int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
unsigned int ulen, copied;
int peeked, off = 0;
int err;
int is_udplite = IS_UDPLITE(sk);
bool checksum_valid = false;
int is_udp4;
bool slow;
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len, addr_len);
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
&peeked, &off, &err);
if (!skb)
goto out;
ulen = skb->len - sizeof(struct udphdr);
copied = len;
if (copied > ulen)
copied = ulen;
else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
is_udp4 = (skb->protocol == htons(ETH_P_IP));
/*
* If checksum is needed at all, try to do it while copying the
* data. If the data is truncated, or if we only want a partial
* coverage checksum (UDP-Lite), do it before the copy.
*/
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
checksum_valid = !udp_lib_checksum_complete(skb);
if (!checksum_valid)
goto csum_copy_err;
}
if (checksum_valid || skb_csum_unnecessary(skb))
err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
msg, copied);
else {
err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg);
if (err == -EINVAL)
goto csum_copy_err;
}
if (unlikely(err)) {
trace_kfree_skb(skb, udpv6_recvmsg);
if (!peeked) {
atomic_inc(&sk->sk_drops);
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS,
is_udplite);
else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS,
is_udplite);
}
goto out_free;
}
if (!peeked) {
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
}
sock_recv_ts_and_drops(msg, sk, skb);
/* Copy the address. */
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
sin6->sin6_family = AF_INET6;
sin6->sin6_port = udp_hdr(skb)->source;
sin6->sin6_flowinfo = 0;
if (is_udp4) {
ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
&sin6->sin6_addr);
sin6->sin6_scope_id = 0;
} else {
sin6->sin6_addr = ipv6_hdr(skb)->saddr;
sin6->sin6_scope_id =
ipv6_iface_scope_id(&sin6->sin6_addr,
inet6_iif(skb));
}
*addr_len = sizeof(*sin6);
}
if (np->rxopt.all)
ip6_datagram_recv_common_ctl(sk, msg, skb);
if (is_udp4) {
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
} else {
if (np->rxopt.all)
ip6_datagram_recv_specific_ctl(sk, msg, skb);
}
err = copied;
if (flags & MSG_TRUNC)
err = ulen;
out_free:
skb_free_datagram_locked(sk, skb);
out:
return err;
csum_copy_err:
slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) {
if (is_udp4) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite);
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
} else {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite);
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
}
unlock_sock_fast(sk, slow);
/* starting over for a new packet, but check if we need to yield */
cond_resched();
msg->msg_flags &= ~MSG_TRUNC;
goto try_again;
}
void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info,
struct udp_table *udptable)
{
struct ipv6_pinfo *np;
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct in6_addr *saddr = &hdr->saddr;
const struct in6_addr *daddr = &hdr->daddr;
struct udphdr *uh = (struct udphdr *)(skb->data+offset);
struct sock *sk;
int err;
struct net *net = dev_net(skb->dev);
sk = __udp6_lib_lookup(net, daddr, uh->dest,
saddr, uh->source, inet6_iif(skb), udptable);
if (!sk) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
return;
}
if (type == ICMPV6_PKT_TOOBIG) {
if (!ip6_sk_accept_pmtu(sk))
goto out;
ip6_sk_update_pmtu(skb, sk, info);
}
if (type == NDISC_REDIRECT) {
ip6_sk_redirect(skb, sk);
goto out;
}
np = inet6_sk(sk);
if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
goto out;
if (sk->sk_state != TCP_ESTABLISHED && !np->recverr)
goto out;
if (np->recverr)
ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
sk->sk_err = err;
sk->sk_error_report(sk);
out:
sock_put(sk);
}
static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
sk_incoming_cpu_update(sk);
}
rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
/* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM)
UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_RCVBUFERRORS, is_udplite);
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
kfree_skb(skb);
return -1;
}
return 0;
}
static __inline__ void udpv6_err(struct sk_buff *skb,
struct inet6_skb_parm *opt, u8 type,
u8 code, int offset, __be32 info)
{
__udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
}
static struct static_key udpv6_encap_needed __read_mostly;
void udpv6_encap_enable(void)
{
if (!static_key_enabled(&udpv6_encap_needed))
static_key_slow_inc(&udpv6_encap_needed);
}
EXPORT_SYMBOL(udpv6_encap_enable);
int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int rc;
int is_udplite = IS_UDPLITE(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
/*
* This is an encapsulation socket so pass the skb to
* the socket's udp_encap_rcv() hook. Otherwise, just
* fall through and pass this up the UDP socket.
* up->encap_rcv() returns the following value:
* =0 if skb was successfully passed to the encap
* handler or was discarded by it.
* >0 if skb should be passed on to UDP.
* <0 if skb should be resubmitted as proto -N
*/
/* if we're overly short, let UDP handle it */
encap_rcv = ACCESS_ONCE(up->encap_rcv);
if (skb->len > sizeof(struct udphdr) && encap_rcv) {
int ret;
/* Verify checksum before giving to encap */
if (udp_lib_checksum_complete(skb))
goto csum_error;
ret = encap_rcv(sk, skb);
if (ret <= 0) {
UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INDATAGRAMS,
is_udplite);
return -ret;
}
}
/* FALLTHROUGH -- it's a UDP Packet */
}
/*
* UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
*/
if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
if (up->pcrlen == 0) { /* full coverage was set */
net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
UDP_SKB_CB(skb)->cscov, skb->len);
goto drop;
}
if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
UDP_SKB_CB(skb)->cscov, up->pcrlen);
goto drop;
}
}
if (rcu_access_pointer(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto csum_error;
}
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_RCVBUFERRORS, is_udplite);
goto drop;
}
skb_dst_drop(skb);
bh_lock_sock(sk);
rc = 0;
if (!sock_owned_by_user(sk))
rc = __udpv6_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk);
goto drop;
}
bh_unlock_sock(sk);
return rc;
csum_error:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return -1;
}
static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
__be16 loc_port, const struct in6_addr *loc_addr,
__be16 rmt_port, const struct in6_addr *rmt_addr,
int dif, unsigned short hnum)
{
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net))
return false;
if (udp_sk(sk)->udp_port_hash != hnum ||
sk->sk_family != PF_INET6 ||
(inet->inet_dport && inet->inet_dport != rmt_port) ||
(!ipv6_addr_any(&sk->sk_v6_daddr) &&
!ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
(!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
return false;
if (!inet6_mc_check(sk, loc_addr, rmt_addr))
return false;
return true;
}
static void flush_stack(struct sock **stack, unsigned int count,
struct sk_buff *skb, unsigned int final)
{
struct sk_buff *skb1 = NULL;
struct sock *sk;
unsigned int i;
for (i = 0; i < count; i++) {
sk = stack[i];
if (likely(!skb1))
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
atomic_inc(&sk->sk_drops);
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
}
if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
skb1 = NULL;
sock_put(sk);
}
if (unlikely(skb1))
kfree_skb(skb1);
}
static void udp6_csum_zero_error(struct sk_buff *skb)
{
/* RFC 2460 section 8.1 says that we SHOULD log
* this error. Well, it is reasonable.
*/
net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
&ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
&ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
}
/*
* Note: called only from the BH handler context,
* so we don't need to lock the hashes.
*/
static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
const struct in6_addr *saddr, const struct in6_addr *daddr,
struct udp_table *udptable, int proto)
{
struct sock *sk, *stack[256 / sizeof(struct sock *)];
const struct udphdr *uh = udp_hdr(skb);
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(uh->dest);
struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
int dif = inet6_iif(skb);
unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
bool inner_flushed = false;
if (use_hash2) {
hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
udp_table.mask;
hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask;
start_lookup:
hslot = &udp_table.hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
spin_lock(&hslot->lock);
sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
if (__udp_v6_is_mcast_sock(net, sk,
uh->dest, daddr,
uh->source, saddr,
dif, hnum) &&
/* If zero checksum and no_check is not on for
* the socket then skip it.
*/
(uh->check || udp_sk(sk)->no_check6_rx)) {
if (unlikely(count == ARRAY_SIZE(stack))) {
flush_stack(stack, count, skb, ~0);
inner_flushed = true;
count = 0;
}
stack[count++] = sk;
sock_hold(sk);
}
}
spin_unlock(&hslot->lock);
/* Also lookup *:port if we are using hash2 and haven't done so yet. */
if (use_hash2 && hash2 != hash2_any) {
hash2 = hash2_any;
goto start_lookup;
}
if (count) {
flush_stack(stack, count, skb, count - 1);
} else {
if (!inner_flushed)
UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
proto == IPPROTO_UDPLITE);
consume_skb(skb);
}
return 0;
}
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
struct net *net = dev_net(skb->dev);
struct sock *sk;
struct udphdr *uh;
const struct in6_addr *saddr, *daddr;
u32 ulen = 0;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto discard;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
ulen = ntohs(uh->len);
if (ulen > skb->len)
goto short_packet;
if (proto == IPPROTO_UDP) {
/* UDP validates ulen. */
/* Check for jumbo payload */
if (ulen == 0)
ulen = skb->len;
if (ulen < sizeof(*uh))
goto short_packet;
if (ulen < skb->len) {
if (pskb_trim_rcsum(skb, ulen))
goto short_packet;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
}
}
if (udp6_csum_init(skb, uh, proto))
goto csum_error;
/*
* Multicast receive code
*/
if (ipv6_addr_is_multicast(daddr))
return __udp6_lib_mcast_deliver(net, skb,
saddr, daddr, udptable, proto);
/* Unicast */
/*
* check socket cache ... must talk to Alan about his plans
* for sock caches... i'll skip this for now.
*/
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk) {
int ret;
if (!uh->check && !udp_sk(sk)->no_check6_rx) {
sock_put(sk);
udp6_csum_zero_error(skb);
goto csum_error;
}
if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
ip6_compute_pseudo);
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
if (ret > 0)
return -ret;
return 0;
}
if (!uh->check) {
udp6_csum_zero_error(skb);
goto csum_error;
}
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
if (udp_lib_checksum_complete(skb))
goto csum_error;
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
kfree_skb(skb);
return 0;
short_packet:
net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
proto == IPPROTO_UDPLITE ? "-Lite" : "",
saddr, ntohs(uh->source),
ulen, skb->len,
daddr, ntohs(uh->dest));
goto discard;
csum_error:
UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
discard:
UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
kfree_skb(skb);
return 0;
}
static __inline__ int udpv6_rcv(struct sk_buff *skb)
{
return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
}
/*
* Throw away all pending data and cancel the corking. Socket is locked.
*/
static void udp_v6_flush_pending_frames(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
if (up->pending == AF_INET)
udp_flush_pending_frames(sk);
else if (up->pending) {
up->len = 0;
up->pending = 0;
ip6_flush_pending_frames(sk);
}
}
/**
* udp6_hwcsum_outgoing - handle outgoing HW checksumming
* @sk: socket we are sending on
* @skb: sk_buff containing the filled-in UDP header
* (checksum field must be zeroed out)
*/
static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
const struct in6_addr *saddr,
const struct in6_addr *daddr, int len)
{
unsigned int offset;
struct udphdr *uh = udp_hdr(skb);
struct sk_buff *frags = skb_shinfo(skb)->frag_list;
__wsum csum = 0;
if (!frags) {
/* Only one fragment on the socket. */
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
} else {
/*
* HW-checksum won't work as there are two or more
* fragments on the socket so that all csums of sk_buffs
* should be together
*/
offset = skb_transport_offset(skb);
skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
skb->ip_summed = CHECKSUM_NONE;
do {
csum = csum_add(csum, frags->csum);
} while ((frags = frags->next));
uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
}
}
/*
* Sending
*/
static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6)
{
struct sock *sk = skb->sk;
struct udphdr *uh;
int err = 0;
int is_udplite = IS_UDPLITE(sk);
__wsum csum = 0;
int offset = skb_transport_offset(skb);
int len = skb->len - offset;
/*
* Create a UDP header
*/
uh = udp_hdr(skb);
uh->source = fl6->fl6_sport;
uh->dest = fl6->fl6_dport;
uh->len = htons(len);
uh->check = 0;
if (is_udplite)
csum = udplite_csum(skb);
else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
skb->ip_summed = CHECKSUM_NONE;
goto send;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
goto send;
} else
csum = udp_csum(skb);
/* add protocol-dependent pseudo-header */
uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
len, fl6->flowi6_proto, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
err = ip6_send_skb(skb);
if (err) {
if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0;
}
} else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite);
return err;
}
static int udp_v6_push_pending_frames(struct sock *sk)
{
struct sk_buff *skb;
struct udp_sock *up = udp_sk(sk);
struct flowi6 fl6;
int err = 0;
if (up->pending == AF_INET)
return udp_push_pending_frames(sk);
/* ip6_finish_skb will release the cork, so make a copy of
* fl6 here.
*/
fl6 = inet_sk(sk)->cork.fl.u.ip6;
skb = ip6_finish_skb(sk);
if (!skb)
goto out;
err = udp_v6_send_skb(skb, &fl6);
out:
up->len = 0;
up->pending = 0;
return err;
}
int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct ipv6_txoptions opt_space;
struct udp_sock *up = udp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
struct in6_addr *daddr, *final_p, final;
struct ipv6_txoptions *opt = NULL;
struct ipv6_txoptions *opt_to_free = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_len = msg->msg_namelen;
int ulen = len;
int hlimit = -1;
int tclass = -1;
int dontfrag = -1;
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int err;
int connected = 0;
int is_udplite = IS_UDPLITE(sk);
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
/* destination address check */
if (sin6) {
if (addr_len < offsetof(struct sockaddr, sa_data))
return -EINVAL;
switch (sin6->sin6_family) {
case AF_INET6:
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
daddr = &sin6->sin6_addr;
break;
case AF_INET:
goto do_udp_sendmsg;
case AF_UNSPEC:
msg->msg_name = sin6 = NULL;
msg->msg_namelen = addr_len = 0;
daddr = NULL;
break;
default:
return -EINVAL;
}
} else if (!up->pending) {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = &sk->sk_v6_daddr;
} else
daddr = NULL;
if (daddr) {
if (ipv6_addr_v4mapped(daddr)) {
struct sockaddr_in sin;
sin.sin_family = AF_INET;
sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
sin.sin_addr.s_addr = daddr->s6_addr32[3];
msg->msg_name = &sin;
msg->msg_namelen = sizeof(sin);
do_udp_sendmsg:
if (__ipv6_only_sock(sk))
return -ENETUNREACH;
return udp_sendmsg(sk, msg, len);
}
}
if (up->pending == AF_INET)
return udp_sendmsg(sk, msg, len);
/* Rough check on arithmetic overflow,
better check is made in ip6_append_data().
*/
if (len > INT_MAX - sizeof(struct udphdr))
return -EMSGSIZE;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
if (up->pending) {
/*
* There are pending frames.
* The socket lock must be held while it's corked.
*/
lock_sock(sk);
if (likely(up->pending)) {
if (unlikely(up->pending != AF_INET6)) {
release_sock(sk);
return -EAFNOSUPPORT;
}
dst = NULL;
goto do_append_data;
}
release_sock(sk);
}
ulen += sizeof(struct udphdr);
memset(&fl6, 0, sizeof(fl6));
if (sin6) {
if (sin6->sin6_port == 0)
return -EINVAL;
fl6.fl6_dport = sin6->sin6_port;
daddr = &sin6->sin6_addr;
if (np->sndflow) {
fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
}
}
/*
* Otherwise it will be difficult to maintain
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
daddr = &sk->sk_v6_daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
__ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
fl6.flowi6_oif = sin6->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
fl6.fl6_dport = inet->inet_dport;
daddr = &sk->sk_v6_daddr;
fl6.flowlabel = np->flow_label;
connected = 1;
}
if (!fl6.flowi6_oif)
fl6.flowi6_oif = sk->sk_bound_dev_if;
if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
fl6.flowi6_mark = sk->sk_mark;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(*opt);
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
&hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
}
if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
connected = 0;
}
if (!opt) {
opt = txopt_get(np);
opt_to_free = opt;
}
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = sk->sk_protocol;
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
fl6.fl6_sport = inet->inet_sport;
final_p = fl6_update_dst(&fl6, opt, &final);
if (final_p)
connected = 0;
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
fl6.flowi6_oif = np->mcast_oif;
connected = 0;
} else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
goto out;
}
if (hlimit < 0)
hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
if (tclass < 0)
tclass = np->tclass;
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
/* Lockless fast path for the non-corking case */
if (!corkreq) {
struct sk_buff *skb;
skb = ip6_make_skb(sk, getfrag, msg, ulen,
sizeof(struct udphdr), hlimit, tclass, opt,
&fl6, (struct rt6_info *)dst,
msg->msg_flags, dontfrag);
err = PTR_ERR(skb);
if (!IS_ERR_OR_NULL(skb))
err = udp_v6_send_skb(skb, &fl6);
goto release_dst;
}
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
/* ... which is an evident application bug. --ANK */
release_sock(sk);
net_dbg_ratelimited("udp cork app bug 2\n");
err = -EINVAL;
goto out;
}
up->pending = AF_INET6;
do_append_data:
if (dontfrag < 0)
dontfrag = np->dontfrag;
up->len += ulen;
err = ip6_append_data(sk, getfrag, msg, ulen,
sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
(struct rt6_info *)dst,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
if (err)
udp_v6_flush_pending_frames(sk);
else if (!corkreq)
err = udp_v6_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
up->pending = 0;
if (err > 0)
err = np->recverr ? net_xmit_errno(err) : 0;
release_sock(sk);
release_dst:
if (dst) {
if (connected) {
ip6_dst_store(sk, dst,
ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
&sk->sk_v6_daddr : NULL,
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
&np->saddr :
#endif
NULL);
} else {
dst_release(dst);
}
dst = NULL;
}
out:
dst_release(dst);
fl6_sock_release(flowlabel);
txopt_put(opt_to_free);
if (!err)
return len;
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
* we don't have a good statistic (IpOutDiscards but it can be too many
* things). We could add another new stat but at least for now that
* seems like overkill.
*/
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
}
return err;
do_confirm:
dst_confirm(dst);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto out;
}
void udpv6_destroy_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
lock_sock(sk);
udp_v6_flush_pending_frames(sk);
release_sock(sk);
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
encap_destroy = ACCESS_ONCE(up->encap_destroy);
if (encap_destroy)
encap_destroy(sk);
}
inet6_destroy_sock(sk);
}
/*
* Socket option code for UDP
*/
int udpv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_v6_push_pending_frames);
return ipv6_setsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_v6_push_pending_frames);
return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
}
#endif
int udpv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
}
#endif
static const struct inet6_protocol udpv6_protocol = {
.handler = udpv6_rcv,
.err_handler = udpv6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
int udp6_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
} else {
int bucket = ((struct udp_iter_state *)seq->private)->bucket;
struct inet_sock *inet = inet_sk(v);
__u16 srcp = ntohs(inet->inet_sport);
__u16 destp = ntohs(inet->inet_dport);
ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
}
return 0;
}
static const struct file_operations udp6_afinfo_seq_fops = {
.owner = THIS_MODULE,
.open = udp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net
};
static struct udp_seq_afinfo udp6_seq_afinfo = {
.name = "udp6",
.family = AF_INET6,
.udp_table = &udp_table,
.seq_fops = &udp6_afinfo_seq_fops,
.seq_ops = {
.show = udp6_seq_show,
},
};
int __net_init udp6_proc_init(struct net *net)
{
return udp_proc_register(net, &udp6_seq_afinfo);
}
void udp6_proc_exit(struct net *net)
{
udp_proc_unregister(net, &udp6_seq_afinfo);
}
#endif /* CONFIG_PROC_FS */
void udp_v6_clear_sk(struct sock *sk, int size)
{
struct inet_sock *inet = inet_sk(sk);
/* we do not want to clear pinet6 field, because of RCU lookups */
sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
memset(&inet->pinet6 + 1, 0, size);
}
/* ------------------------------------------------------------------------ */
struct proto udpv6_prot = {
.name = "UDPv6",
.owner = THIS_MODULE,
.close = udp_lib_close,
.connect = ip6_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = udpv6_destroy_sock,
.setsockopt = udpv6_setsockopt,
.getsockopt = udpv6_getsockopt,
.sendmsg = udpv6_sendmsg,
.recvmsg = udpv6_recvmsg,
.backlog_rcv = __udpv6_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.rehash = udp_v6_rehash,
.get_port = udp_v6_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
.sysctl_wmem = &sysctl_udp_wmem_min,
.sysctl_rmem = &sysctl_udp_rmem_min,
.obj_size = sizeof(struct udp6_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.h.udp_table = &udp_table,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
.clear_sk = udp_v6_clear_sk,
};
static struct inet_protosw udpv6_protosw = {
.type = SOCK_DGRAM,
.protocol = IPPROTO_UDP,
.prot = &udpv6_prot,
.ops = &inet6_dgram_ops,
.flags = INET_PROTOSW_PERMANENT,
};
int __init udpv6_init(void)
{
int ret;
ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
if (ret)
goto out;
ret = inet6_register_protosw(&udpv6_protosw);
if (ret)
goto out_udpv6_protocol;
out:
return ret;
out_udpv6_protocol:
inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
goto out;
}
void udpv6_exit(void)
{
inet6_unregister_protosw(&udpv6_protosw);
inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
}
| ./CrossVul/dataset_final_sorted/CWE-358/c/good_4847_1 |
crossvul-cpp_data_good_4847_0 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* The User Datagram Protocol (UDP).
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
* Hirokazu Takahashi, <taka@valinux.co.jp>
*
* Fixes:
* Alan Cox : verify_area() calls
* Alan Cox : stopped close while in use off icmp
* messages. Not a fix but a botch that
* for udp at least is 'valid'.
* Alan Cox : Fixed icmp handling properly
* Alan Cox : Correct error for oversized datagrams
* Alan Cox : Tidied select() semantics.
* Alan Cox : udp_err() fixed properly, also now
* select and read wake correctly on errors
* Alan Cox : udp_send verify_area moved to avoid mem leak
* Alan Cox : UDP can count its memory
* Alan Cox : send to an unknown connection causes
* an ECONNREFUSED off the icmp, but
* does NOT close.
* Alan Cox : Switched to new sk_buff handlers. No more backlog!
* Alan Cox : Using generic datagram code. Even smaller and the PEEK
* bug no longer crashes it.
* Fred Van Kempen : Net2e support for sk->broadcast.
* Alan Cox : Uses skb_free_datagram
* Alan Cox : Added get/set sockopt support.
* Alan Cox : Broadcasting without option set returns EACCES.
* Alan Cox : No wakeup calls. Instead we now use the callbacks.
* Alan Cox : Use ip_tos and ip_ttl
* Alan Cox : SNMP Mibs
* Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
* Matt Dillon : UDP length checks.
* Alan Cox : Smarter af_inet used properly.
* Alan Cox : Use new kernel side addressing.
* Alan Cox : Incorrect return on truncated datagram receive.
* Arnt Gulbrandsen : New udp_send and stuff
* Alan Cox : Cache last socket
* Alan Cox : Route cache
* Jon Peatfield : Minor efficiency fix to sendto().
* Mike Shaver : RFC1122 checks.
* Alan Cox : Nonblocking error fix.
* Willy Konynenberg : Transparent proxying support.
* Mike McLagan : Routing by source
* David S. Miller : New socket lookup architecture.
* Last socket cache retained as it
* does have a high hit rate.
* Olaf Kirch : Don't linearise iovec on sendmsg.
* Andi Kleen : Some cleanups, cache destination entry
* for connect.
* Vitaly E. Lavrov : Transparent proxy revived after year coma.
* Melvin Smith : Check msg_name not msg_namelen in sendto(),
* return ENOTCONN for unconnected sockets (POSIX)
* Janos Farkas : don't deliver multi/broadcasts to a different
* bound-to-device socket
* Hirokazu Takahashi : HW checksumming for outgoing UDP
* datagrams.
* Hirokazu Takahashi : sendfile() on UDP works now.
* Arnaldo C. Melo : convert /proc/net/udp to seq_file
* YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
* Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
* a single port at the same time.
* Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
* James Chapman : Add L2TP encapsulation type.
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) "UDP: " fmt
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/igmp.h>
#include <linux/inetdevice.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/tcp_states.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
#include <net/route.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <trace/events/udp.h>
#include <linux/static_key.h>
#include <trace/events/skb.h>
#include <net/busy_poll.h>
#include "udp_impl.h"
struct udp_table udp_table __read_mostly;
EXPORT_SYMBOL(udp_table);
long sysctl_udp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_udp_mem);
int sysctl_udp_rmem_min __read_mostly;
EXPORT_SYMBOL(sysctl_udp_rmem_min);
int sysctl_udp_wmem_min __read_mostly;
EXPORT_SYMBOL(sysctl_udp_wmem_min);
atomic_long_t udp_memory_allocated;
EXPORT_SYMBOL(udp_memory_allocated);
#define MAX_UDP_PORTS 65536
#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
static int udp_lib_lport_inuse(struct net *net, __u16 num,
const struct udp_hslot *hslot,
unsigned long *bitmap,
struct sock *sk,
int (*saddr_comp)(const struct sock *sk1,
const struct sock *sk2),
unsigned int log)
{
struct sock *sk2;
struct hlist_nulls_node *node;
kuid_t uid = sock_i_uid(sk);
sk_nulls_for_each(sk2, node, &hslot->head) {
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
(bitmap || udp_sk(sk2)->udp_port_hash == num) &&
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
(!sk2->sk_reuseport || !sk->sk_reuseport ||
!uid_eq(uid, sock_i_uid(sk2))) &&
saddr_comp(sk, sk2)) {
if (!bitmap)
return 1;
__set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap);
}
}
return 0;
}
/*
* Note: we still hold spinlock of primary hash chain, so no other writer
* can insert/delete a socket with local_port == num
*/
static int udp_lib_lport_inuse2(struct net *net, __u16 num,
struct udp_hslot *hslot2,
struct sock *sk,
int (*saddr_comp)(const struct sock *sk1,
const struct sock *sk2))
{
struct sock *sk2;
struct hlist_nulls_node *node;
kuid_t uid = sock_i_uid(sk);
int res = 0;
spin_lock(&hslot2->lock);
udp_portaddr_for_each_entry(sk2, node, &hslot2->head) {
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
(udp_sk(sk2)->udp_port_hash == num) &&
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
(!sk2->sk_reuseport || !sk->sk_reuseport ||
!uid_eq(uid, sock_i_uid(sk2))) &&
saddr_comp(sk, sk2)) {
res = 1;
break;
}
}
spin_unlock(&hslot2->lock);
return res;
}
/**
* udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
*
* @sk: socket struct in question
* @snum: port number to look up
* @saddr_comp: AF-dependent comparison of bound local IP addresses
* @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
* with NULL address
*/
int udp_lib_get_port(struct sock *sk, unsigned short snum,
int (*saddr_comp)(const struct sock *sk1,
const struct sock *sk2),
unsigned int hash2_nulladdr)
{
struct udp_hslot *hslot, *hslot2;
struct udp_table *udptable = sk->sk_prot->h.udp_table;
int error = 1;
struct net *net = sock_net(sk);
if (!snum) {
int low, high, remaining;
unsigned int rand;
unsigned short first, last;
DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
rand = prandom_u32();
first = reciprocal_scale(rand, remaining) + low;
/*
* force rand to be an odd multiple of UDP_HTABLE_SIZE
*/
rand = (rand | 1) * (udptable->mask + 1);
last = first + udptable->mask + 1;
do {
hslot = udp_hashslot(udptable, net, first);
bitmap_zero(bitmap, PORTS_PER_CHAIN);
spin_lock_bh(&hslot->lock);
udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
saddr_comp, udptable->log);
snum = first;
/*
* Iterate on all possible values of snum for this hash.
* Using steps of an odd multiple of UDP_HTABLE_SIZE
* give us randomization and full range coverage.
*/
do {
if (low <= snum && snum <= high &&
!test_bit(snum >> udptable->log, bitmap) &&
!inet_is_local_reserved_port(net, snum))
goto found;
snum += rand;
} while (snum != first);
spin_unlock_bh(&hslot->lock);
} while (++first != last);
goto fail;
} else {
hslot = udp_hashslot(udptable, net, snum);
spin_lock_bh(&hslot->lock);
if (hslot->count > 10) {
int exist;
unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
slot2 &= udptable->mask;
hash2_nulladdr &= udptable->mask;
hslot2 = udp_hashslot2(udptable, slot2);
if (hslot->count < hslot2->count)
goto scan_primary_hash;
exist = udp_lib_lport_inuse2(net, snum, hslot2,
sk, saddr_comp);
if (!exist && (hash2_nulladdr != slot2)) {
hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
exist = udp_lib_lport_inuse2(net, snum, hslot2,
sk, saddr_comp);
}
if (exist)
goto fail_unlock;
else
goto found;
}
scan_primary_hash:
if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk,
saddr_comp, 0))
goto fail_unlock;
}
found:
inet_sk(sk)->inet_num = snum;
udp_sk(sk)->udp_port_hash = snum;
udp_sk(sk)->udp_portaddr_hash ^= snum;
if (sk_unhashed(sk)) {
sk_nulls_add_node_rcu(sk, &hslot->head);
hslot->count++;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
spin_lock(&hslot2->lock);
hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
&hslot2->head);
hslot2->count++;
spin_unlock(&hslot2->lock);
}
error = 0;
fail_unlock:
spin_unlock_bh(&hslot->lock);
fail:
return error;
}
EXPORT_SYMBOL(udp_lib_get_port);
static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
{
struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
return (!ipv6_only_sock(sk2) &&
(!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr ||
inet1->inet_rcv_saddr == inet2->inet_rcv_saddr));
}
static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr,
unsigned int port)
{
return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
}
int udp_v4_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
unsigned int hash2_partial =
udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
/* precompute partial secondary hash */
udp_sk(sk)->udp_portaddr_hash = hash2_partial;
return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
}
static inline int compute_score(struct sock *sk, struct net *net,
__be32 saddr, unsigned short hnum, __be16 sport,
__be32 daddr, __be16 dport, int dif)
{
int score;
struct inet_sock *inet;
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
ipv6_only_sock(sk))
return -1;
score = (sk->sk_family == PF_INET) ? 2 : 1;
inet = inet_sk(sk);
if (inet->inet_rcv_saddr) {
if (inet->inet_rcv_saddr != daddr)
return -1;
score += 4;
}
if (inet->inet_daddr) {
if (inet->inet_daddr != saddr)
return -1;
score += 4;
}
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score += 4;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score += 4;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
return score;
}
/*
* In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
*/
static inline int compute_score2(struct sock *sk, struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned int hnum, int dif)
{
int score;
struct inet_sock *inet;
if (!net_eq(sock_net(sk), net) ||
ipv6_only_sock(sk))
return -1;
inet = inet_sk(sk);
if (inet->inet_rcv_saddr != daddr ||
inet->inet_num != hnum)
return -1;
score = (sk->sk_family == PF_INET) ? 2 : 1;
if (inet->inet_daddr) {
if (inet->inet_daddr != saddr)
return -1;
score += 4;
}
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score += 4;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score += 4;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
return score;
}
static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
const __u16 lport, const __be32 faddr,
const __be16 fport)
{
static u32 udp_ehash_secret __read_mostly;
net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
return __inet_ehashfn(laddr, lport, faddr, fport,
udp_ehash_secret + net_hash_mix(net));
}
/* called with read_rcu_lock() */
static struct sock *udp4_lib_lookup2(struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned int hnum, int dif,
struct udp_hslot *hslot2, unsigned int slot2)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
begin:
result = NULL;
badness = 0;
udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
score = compute_score2(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
}
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot2)
goto begin;
if (result) {
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score2(result, net, saddr, sport,
daddr, hnum, dif) < badness)) {
sock_put(result);
goto begin;
}
}
return result;
}
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
* harder than this. -DaveM
*/
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
__be16 sport, __be32 daddr, __be16 dport,
int dif, struct udp_table *udptable)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(dport);
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
rcu_read_lock();
if (hslot->count > 10) {
hash2 = udp4_portaddr_hash(net, daddr, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp4_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
hslot2, slot2);
if (!result) {
hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp4_lib_lookup2(net, saddr, sport,
htonl(INADDR_ANY), hnum, dif,
hslot2, slot2);
}
rcu_read_unlock();
return result;
}
begin:
result = NULL;
badness = 0;
sk_nulls_for_each_rcu(sk, node, &hslot->head) {
score = compute_score(sk, net, saddr, hnum, sport,
daddr, dport, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
}
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot)
goto begin;
if (result) {
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score(result, net, saddr, hnum, sport,
daddr, dport, dif) < badness)) {
sock_put(result);
goto begin;
}
}
rcu_read_unlock();
return result;
}
EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport,
struct udp_table *udptable)
{
const struct iphdr *iph = ip_hdr(skb);
return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
iph->daddr, dport, inet_iif(skb),
udptable);
}
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif)
{
return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup);
static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
__be16 loc_port, __be32 loc_addr,
__be16 rmt_port, __be32 rmt_addr,
int dif, unsigned short hnum)
{
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
(inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
(inet->inet_dport != rmt_port && inet->inet_dport) ||
(inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
ipv6_only_sock(sk) ||
(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
return false;
if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif))
return false;
return true;
}
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code.
* Header points to the ip header of the error packet. We move
* on past this. Then (as it used to claim before adjustment)
* header points to the first 8 bytes of the udp header. We need
* to find the appropriate port.
*/
void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
{
struct inet_sock *inet;
const struct iphdr *iph = (const struct iphdr *)skb->data;
struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct sock *sk;
int harderr;
int err;
struct net *net = dev_net(skb->dev);
sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
iph->saddr, uh->source, skb->dev->ifindex, udptable);
if (!sk) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return; /* No socket for error */
}
err = 0;
harderr = 0;
inet = inet_sk(sk);
switch (type) {
default:
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
break;
case ICMP_SOURCE_QUENCH:
goto out;
case ICMP_PARAMETERPROB:
err = EPROTO;
harderr = 1;
break;
case ICMP_DEST_UNREACH:
if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
ipv4_sk_update_pmtu(skb, sk, info);
if (inet->pmtudisc != IP_PMTUDISC_DONT) {
err = EMSGSIZE;
harderr = 1;
break;
}
goto out;
}
err = EHOSTUNREACH;
if (code <= NR_ICMP_UNREACH) {
harderr = icmp_err_convert[code].fatal;
err = icmp_err_convert[code].errno;
}
break;
case ICMP_REDIRECT:
ipv4_sk_redirect(skb, sk);
goto out;
}
/*
* RFC1122: OK. Passes ICMP errors back to application, as per
* 4.1.3.3.
*/
if (!inet->recverr) {
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out;
} else
ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
sk->sk_err = err;
sk->sk_error_report(sk);
out:
sock_put(sk);
}
void udp_err(struct sk_buff *skb, u32 info)
{
__udp4_lib_err(skb, info, &udp_table);
}
/*
* Throw away all pending data and cancel the corking. Socket is locked.
*/
void udp_flush_pending_frames(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
if (up->pending) {
up->len = 0;
up->pending = 0;
ip_flush_pending_frames(sk);
}
}
EXPORT_SYMBOL(udp_flush_pending_frames);
/**
* udp4_hwcsum - handle outgoing HW checksumming
* @skb: sk_buff containing the filled-in UDP header
* (checksum field must be zeroed out)
* @src: source IP address
* @dst: destination IP address
*/
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
{
struct udphdr *uh = udp_hdr(skb);
int offset = skb_transport_offset(skb);
int len = skb->len - offset;
int hlen = len;
__wsum csum = 0;
if (!skb_has_frag_list(skb)) {
/*
* Only one fragment on the socket.
*/
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
uh->check = ~csum_tcpudp_magic(src, dst, len,
IPPROTO_UDP, 0);
} else {
struct sk_buff *frags;
/*
* HW-checksum won't work as there are two or more
* fragments on the socket so that all csums of sk_buffs
* should be together
*/
skb_walk_frags(skb, frags) {
csum = csum_add(csum, frags->csum);
hlen -= frags->len;
}
csum = skb_checksum(skb, offset, hlen, csum);
skb->ip_summed = CHECKSUM_NONE;
uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
}
}
EXPORT_SYMBOL_GPL(udp4_hwcsum);
/* Function to set UDP checksum for an IPv4 UDP packet. This is intended
* for the simple case like when setting the checksum for a UDP tunnel.
*/
void udp_set_csum(bool nocheck, struct sk_buff *skb,
__be32 saddr, __be32 daddr, int len)
{
struct udphdr *uh = udp_hdr(skb);
if (nocheck)
uh->check = 0;
else if (skb_is_gso(skb))
uh->check = ~udp_v4_check(len, saddr, daddr, 0);
else if (skb_dst(skb) && skb_dst(skb)->dev &&
(skb_dst(skb)->dev->features &
(NETIF_F_IP_CSUM | NETIF_F_HW_CSUM))) {
BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
uh->check = ~udp_v4_check(len, saddr, daddr, 0);
} else {
__wsum csum;
BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
uh->check = 0;
csum = skb_checksum(skb, 0, len, 0);
uh->check = udp_v4_check(len, saddr, daddr, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
}
EXPORT_SYMBOL(udp_set_csum);
static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
struct udphdr *uh;
int err = 0;
int is_udplite = IS_UDPLITE(sk);
int offset = skb_transport_offset(skb);
int len = skb->len - offset;
__wsum csum = 0;
/*
* Create a UDP header
*/
uh = udp_hdr(skb);
uh->source = inet->inet_sport;
uh->dest = fl4->fl4_dport;
uh->len = htons(len);
uh->check = 0;
if (is_udplite) /* UDP-Lite */
csum = udplite_csum(skb);
else if (sk->sk_no_check_tx) { /* UDP csum disabled */
skb->ip_summed = CHECKSUM_NONE;
goto send;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
goto send;
} else
csum = udp_csum(skb);
/* add protocol-dependent pseudo-header */
uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
sk->sk_protocol, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
err = ip_send_skb(sock_net(sk), skb);
if (err) {
if (err == -ENOBUFS && !inet->recverr) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0;
}
} else
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite);
return err;
}
/*
* Push out all pending data as one UDP datagram. Socket is locked.
*/
int udp_push_pending_frames(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
struct sk_buff *skb;
int err = 0;
skb = ip_finish_skb(sk, fl4);
if (!skb)
goto out;
err = udp_send_skb(skb, fl4);
out:
up->len = 0;
up->pending = 0;
return err;
}
EXPORT_SYMBOL(udp_push_pending_frames);
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct udp_sock *up = udp_sk(sk);
struct flowi4 fl4_stack;
struct flowi4 *fl4;
int ulen = len;
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
int free = 0;
int connected = 0;
__be32 daddr, faddr, saddr;
__be16 dport;
u8 tos;
int err, is_udplite = IS_UDPLITE(sk);
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
struct sk_buff *skb;
struct ip_options_data opt_copy;
if (len > 0xFFFF)
return -EMSGSIZE;
/*
* Check the flags.
*/
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
return -EOPNOTSUPP;
ipc.opt = NULL;
ipc.tx_flags = 0;
ipc.ttl = 0;
ipc.tos = -1;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
fl4 = &inet->cork.fl.u.ip4;
if (up->pending) {
/*
* There are pending frames.
* The socket lock must be held while it's corked.
*/
lock_sock(sk);
if (likely(up->pending)) {
if (unlikely(up->pending != AF_INET)) {
release_sock(sk);
return -EINVAL;
}
goto do_append_data;
}
release_sock(sk);
}
ulen += sizeof(struct udphdr);
/*
* Get and verify the address.
*/
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
if (msg->msg_namelen < sizeof(*usin))
return -EINVAL;
if (usin->sin_family != AF_INET) {
if (usin->sin_family != AF_UNSPEC)
return -EAFNOSUPPORT;
}
daddr = usin->sin_addr.s_addr;
dport = usin->sin_port;
if (dport == 0)
return -EINVAL;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = inet->inet_daddr;
dport = inet->inet_dport;
/* Open fast path for connected socket.
Route will not be used, if at least one option is set.
*/
connected = 1;
}
ipc.addr = inet->inet_saddr;
ipc.oif = sk->sk_bound_dev_if;
sock_tx_timestamp(sk, &ipc.tx_flags);
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc,
sk->sk_family == AF_INET6);
if (err)
return err;
if (ipc.opt)
free = 1;
connected = 0;
}
if (!ipc.opt) {
struct ip_options_rcu *inet_opt;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt) {
memcpy(&opt_copy, inet_opt,
sizeof(*inet_opt) + inet_opt->opt.optlen);
ipc.opt = &opt_copy.opt;
}
rcu_read_unlock();
}
saddr = ipc.addr;
ipc.addr = faddr = daddr;
if (ipc.opt && ipc.opt->opt.srr) {
if (!daddr)
return -EINVAL;
faddr = ipc.opt->opt.faddr;
connected = 0;
}
tos = get_rttos(&ipc, inet);
if (sock_flag(sk, SOCK_LOCALROUTE) ||
(msg->msg_flags & MSG_DONTROUTE) ||
(ipc.opt && ipc.opt->opt.is_strictroute)) {
tos |= RTO_ONLINK;
connected = 0;
}
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
connected = 0;
} else if (!ipc.oif)
ipc.oif = inet->uc_index;
if (connected)
rt = (struct rtable *)sk_dst_check(sk, 0);
if (!rt) {
struct net *net = sock_net(sk);
__u8 flow_flags = inet_sk_flowi_flags(sk);
fl4 = &fl4_stack;
flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
flow_flags,
faddr, saddr, dport, inet->inet_sport);
if (!saddr && ipc.oif)
l3mdev_get_saddr(net, ipc.oif, fl4);
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
if (err == -ENETUNREACH)
IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
goto out;
}
err = -EACCES;
if ((rt->rt_flags & RTCF_BROADCAST) &&
!sock_flag(sk, SOCK_BROADCAST))
goto out;
if (connected)
sk_dst_set(sk, dst_clone(&rt->dst));
}
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
saddr = fl4->saddr;
if (!ipc.addr)
daddr = ipc.addr = fl4->daddr;
/* Lockless fast path for the non-corking case. */
if (!corkreq) {
skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc, &rt,
msg->msg_flags);
err = PTR_ERR(skb);
if (!IS_ERR_OR_NULL(skb))
err = udp_send_skb(skb, fl4);
goto out;
}
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
/* ... which is an evident application bug. --ANK */
release_sock(sk);
net_dbg_ratelimited("cork app bug 2\n");
err = -EINVAL;
goto out;
}
/*
* Now cork the socket to pend data.
*/
fl4 = &inet->cork.fl.u.ip4;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->fl4_dport = dport;
fl4->fl4_sport = inet->inet_sport;
up->pending = AF_INET;
do_append_data:
up->len += ulen;
err = ip_append_data(sk, fl4, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc, &rt,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
if (err)
udp_flush_pending_frames(sk);
else if (!corkreq)
err = udp_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
up->pending = 0;
release_sock(sk);
out:
ip_rt_put(rt);
if (free)
kfree(ipc.opt);
if (!err)
return len;
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
* we don't have a good statistic (IpOutDiscards but it can be too many
* things). We could add another new stat but at least for now that
* seems like overkill.
*/
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
}
return err;
do_confirm:
dst_confirm(&rt->dst);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto out;
}
EXPORT_SYMBOL(udp_sendmsg);
int udp_sendpage(struct sock *sk, struct page *page, int offset,
size_t size, int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct udp_sock *up = udp_sk(sk);
int ret;
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
if (!up->pending) {
struct msghdr msg = { .msg_flags = flags|MSG_MORE };
/* Call udp_sendmsg to specify destination address which
* sendpage interface can't pass.
* This will succeed only when the socket is connected.
*/
ret = udp_sendmsg(sk, &msg, 0);
if (ret < 0)
return ret;
}
lock_sock(sk);
if (unlikely(!up->pending)) {
release_sock(sk);
net_dbg_ratelimited("udp cork app bug 3\n");
return -EINVAL;
}
ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
page, offset, size, flags);
if (ret == -EOPNOTSUPP) {
release_sock(sk);
return sock_no_sendpage(sk->sk_socket, page, offset,
size, flags);
}
if (ret < 0) {
udp_flush_pending_frames(sk);
goto out;
}
up->len += size;
if (!(up->corkflag || (flags&MSG_MORE)))
ret = udp_push_pending_frames(sk);
if (!ret)
ret = size;
out:
release_sock(sk);
return ret;
}
/**
* first_packet_length - return length of first packet in receive queue
* @sk: socket
*
* Drops all bad checksum frames, until a valid one is found.
* Returns the length of found skb, or 0 if none is found.
*/
static unsigned int first_packet_length(struct sock *sk)
{
struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
struct sk_buff *skb;
unsigned int res;
__skb_queue_head_init(&list_kill);
spin_lock_bh(&rcvq->lock);
while ((skb = skb_peek(rcvq)) != NULL &&
udp_lib_checksum_complete(skb)) {
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS,
IS_UDPLITE(sk));
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
atomic_inc(&sk->sk_drops);
__skb_unlink(skb, rcvq);
__skb_queue_tail(&list_kill, skb);
}
res = skb ? skb->len : 0;
spin_unlock_bh(&rcvq->lock);
if (!skb_queue_empty(&list_kill)) {
bool slow = lock_sock_fast(sk);
__skb_queue_purge(&list_kill);
sk_mem_reclaim_partial(sk);
unlock_sock_fast(sk, slow);
}
return res;
}
/*
* IOCTL requests applicable to the UDP protocol
*/
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ:
{
int amount = sk_wmem_alloc_get(sk);
return put_user(amount, (int __user *)arg);
}
case SIOCINQ:
{
unsigned int amount = first_packet_length(sk);
if (amount)
/*
* We will only return the amount
* of this packet since that is all
* that will be read.
*/
amount -= sizeof(struct udphdr);
return put_user(amount, (int __user *)arg);
}
default:
return -ENOIOCTLCMD;
}
return 0;
}
EXPORT_SYMBOL(udp_ioctl);
/*
* This should be easy, if there is something there we
* return it, otherwise we block.
*/
int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
struct sk_buff *skb;
unsigned int ulen, copied;
int peeked, off = 0;
int err;
int is_udplite = IS_UDPLITE(sk);
bool checksum_valid = false;
bool slow;
if (flags & MSG_ERRQUEUE)
return ip_recv_error(sk, msg, len, addr_len);
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
&peeked, &off, &err);
if (!skb)
goto out;
ulen = skb->len - sizeof(struct udphdr);
copied = len;
if (copied > ulen)
copied = ulen;
else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
/*
* If checksum is needed at all, try to do it while copying the
* data. If the data is truncated, or if we only want a partial
* coverage checksum (UDP-Lite), do it before the copy.
*/
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
checksum_valid = !udp_lib_checksum_complete(skb);
if (!checksum_valid)
goto csum_copy_err;
}
if (checksum_valid || skb_csum_unnecessary(skb))
err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
msg, copied);
else {
err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr),
msg);
if (err == -EINVAL)
goto csum_copy_err;
}
if (unlikely(err)) {
trace_kfree_skb(skb, udp_recvmsg);
if (!peeked) {
atomic_inc(&sk->sk_drops);
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
goto out_free;
}
if (!peeked)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
sock_recv_ts_and_drops(msg, sk, skb);
/* Copy the address. */
if (sin) {
sin->sin_family = AF_INET;
sin->sin_port = udp_hdr(skb)->source;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
}
if (inet->cmsg_flags)
ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr));
err = copied;
if (flags & MSG_TRUNC)
err = ulen;
out_free:
skb_free_datagram_locked(sk, skb);
out:
return err;
csum_copy_err:
slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) {
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
}
unlock_sock_fast(sk, slow);
/* starting over for a new packet, but check if we need to yield */
cond_resched();
msg->msg_flags &= ~MSG_TRUNC;
goto try_again;
}
int udp_disconnect(struct sock *sk, int flags)
{
struct inet_sock *inet = inet_sk(sk);
/*
* 1003.1g - break association.
*/
sk->sk_state = TCP_CLOSE;
inet->inet_daddr = 0;
inet->inet_dport = 0;
sock_rps_reset_rxhash(sk);
sk->sk_bound_dev_if = 0;
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
sk->sk_prot->unhash(sk);
inet->inet_sport = 0;
}
sk_dst_reset(sk);
return 0;
}
EXPORT_SYMBOL(udp_disconnect);
void udp_lib_unhash(struct sock *sk)
{
if (sk_hashed(sk)) {
struct udp_table *udptable = sk->sk_prot->h.udp_table;
struct udp_hslot *hslot, *hslot2;
hslot = udp_hashslot(udptable, sock_net(sk),
udp_sk(sk)->udp_port_hash);
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
spin_lock_bh(&hslot->lock);
if (sk_nulls_del_node_init_rcu(sk)) {
hslot->count--;
inet_sk(sk)->inet_num = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_lock(&hslot2->lock);
hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
hslot2->count--;
spin_unlock(&hslot2->lock);
}
spin_unlock_bh(&hslot->lock);
}
}
EXPORT_SYMBOL(udp_lib_unhash);
/*
* inet_rcv_saddr was changed, we must rehash secondary hash
*/
void udp_lib_rehash(struct sock *sk, u16 newhash)
{
if (sk_hashed(sk)) {
struct udp_table *udptable = sk->sk_prot->h.udp_table;
struct udp_hslot *hslot, *hslot2, *nhslot2;
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
nhslot2 = udp_hashslot2(udptable, newhash);
udp_sk(sk)->udp_portaddr_hash = newhash;
if (hslot2 != nhslot2) {
hslot = udp_hashslot(udptable, sock_net(sk),
udp_sk(sk)->udp_port_hash);
/* we must lock primary chain too */
spin_lock_bh(&hslot->lock);
spin_lock(&hslot2->lock);
hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
hslot2->count--;
spin_unlock(&hslot2->lock);
spin_lock(&nhslot2->lock);
hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
&nhslot2->head);
nhslot2->count++;
spin_unlock(&nhslot2->lock);
spin_unlock_bh(&hslot->lock);
}
}
}
EXPORT_SYMBOL(udp_lib_rehash);
static void udp_v4_rehash(struct sock *sk)
{
u16 new_hash = udp4_portaddr_hash(sock_net(sk),
inet_sk(sk)->inet_rcv_saddr,
inet_sk(sk)->inet_num);
udp_lib_rehash(sk, new_hash);
}
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
if (inet_sk(sk)->inet_daddr) {
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
sk_incoming_cpu_update(sk);
}
rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
/* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM)
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
is_udplite);
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
kfree_skb(skb);
trace_udp_fail_queue_rcv_skb(rc, sk);
return -1;
}
return 0;
}
static struct static_key udp_encap_needed __read_mostly;
void udp_encap_enable(void)
{
if (!static_key_enabled(&udp_encap_needed))
static_key_slow_inc(&udp_encap_needed);
}
EXPORT_SYMBOL(udp_encap_enable);
/* returns:
* -1: error
* 0: success
* >0: "udp encap" protocol resubmission
*
* Note that in the success and error cases, the skb is assumed to
* have either been requeued or freed.
*/
int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int rc;
int is_udplite = IS_UDPLITE(sk);
/*
* Charge it to the socket, dropping if the queue is full.
*/
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
nf_reset(skb);
if (static_key_false(&udp_encap_needed) && up->encap_type) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
/*
* This is an encapsulation socket so pass the skb to
* the socket's udp_encap_rcv() hook. Otherwise, just
* fall through and pass this up the UDP socket.
* up->encap_rcv() returns the following value:
* =0 if skb was successfully passed to the encap
* handler or was discarded by it.
* >0 if skb should be passed on to UDP.
* <0 if skb should be resubmitted as proto -N
*/
/* if we're overly short, let UDP handle it */
encap_rcv = ACCESS_ONCE(up->encap_rcv);
if (skb->len > sizeof(struct udphdr) && encap_rcv) {
int ret;
/* Verify checksum before giving to encap */
if (udp_lib_checksum_complete(skb))
goto csum_error;
ret = encap_rcv(sk, skb);
if (ret <= 0) {
UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INDATAGRAMS,
is_udplite);
return -ret;
}
}
/* FALLTHROUGH -- it's a UDP Packet */
}
/*
* UDP-Lite specific tests, ignored on UDP sockets
*/
if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
/*
* MIB statistics other than incrementing the error count are
* disabled for the following two types of errors: these depend
* on the application settings, not on the functioning of the
* protocol stack as such.
*
* RFC 3828 here recommends (sec 3.3): "There should also be a
* way ... to ... at least let the receiving application block
* delivery of packets with coverage values less than a value
* provided by the application."
*/
if (up->pcrlen == 0) { /* full coverage was set */
net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
UDP_SKB_CB(skb)->cscov, skb->len);
goto drop;
}
/* The next case involves violating the min. coverage requested
* by the receiver. This is subtle: if receiver wants x and x is
* greater than the buffersize/MTU then receiver will complain
* that it wants x while sender emits packets of smaller size y.
* Therefore the above ...()->partial_cov statement is essential.
*/
if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
UDP_SKB_CB(skb)->cscov, up->pcrlen);
goto drop;
}
}
if (rcu_access_pointer(sk->sk_filter) &&
udp_lib_checksum_complete(skb))
goto csum_error;
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
is_udplite);
goto drop;
}
rc = 0;
ipv4_pktinfo_prepare(sk, skb);
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
rc = __udp_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk);
goto drop;
}
bh_unlock_sock(sk);
return rc;
csum_error:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return -1;
}
static void flush_stack(struct sock **stack, unsigned int count,
struct sk_buff *skb, unsigned int final)
{
unsigned int i;
struct sk_buff *skb1 = NULL;
struct sock *sk;
for (i = 0; i < count; i++) {
sk = stack[i];
if (likely(!skb1))
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
atomic_inc(&sk->sk_drops);
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
}
if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
skb1 = NULL;
sock_put(sk);
}
if (unlikely(skb1))
kfree_skb(skb1);
}
/* For TCP sockets, sk_rx_dst is protected by socket lock
* For UDP, we use xchg() to guard against concurrent changes.
*/
static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old;
dst_hold(dst);
old = xchg(&sk->sk_rx_dst, dst);
dst_release(old);
}
/*
* Multicasts and broadcasts go to each listener.
*
* Note: called only from the BH handler context.
*/
static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
struct udphdr *uh,
__be32 saddr, __be32 daddr,
struct udp_table *udptable,
int proto)
{
struct sock *sk, *stack[256 / sizeof(struct sock *)];
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(uh->dest);
struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
int dif = skb->dev->ifindex;
unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
bool inner_flushed = false;
if (use_hash2) {
hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
udp_table.mask;
hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask;
start_lookup:
hslot = &udp_table.hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
spin_lock(&hslot->lock);
sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
if (__udp_is_mcast_sock(net, sk,
uh->dest, daddr,
uh->source, saddr,
dif, hnum)) {
if (unlikely(count == ARRAY_SIZE(stack))) {
flush_stack(stack, count, skb, ~0);
inner_flushed = true;
count = 0;
}
stack[count++] = sk;
sock_hold(sk);
}
}
spin_unlock(&hslot->lock);
/* Also lookup *:port if we are using hash2 and haven't done so yet. */
if (use_hash2 && hash2 != hash2_any) {
hash2 = hash2_any;
goto start_lookup;
}
/*
* do the slow work with no lock held
*/
if (count) {
flush_stack(stack, count, skb, count - 1);
} else {
if (!inner_flushed)
UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
proto == IPPROTO_UDPLITE);
consume_skb(skb);
}
return 0;
}
/* Initialize UDP checksum. If exited with zero value (success),
* CHECKSUM_UNNECESSARY means, that no more checks are required.
* Otherwise, csum completion requires chacksumming packet body,
* including udp header and folding it to skb->csum.
*/
static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
int proto)
{
int err;
UDP_SKB_CB(skb)->partial_cov = 0;
UDP_SKB_CB(skb)->cscov = skb->len;
if (proto == IPPROTO_UDPLITE) {
err = udplite_checksum_init(skb, uh);
if (err)
return err;
}
return skb_checksum_init_zero_check(skb, proto, uh->check,
inet_compute_pseudo);
}
/*
* All we need to do is get the socket, and then do a checksum.
*/
int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
struct sock *sk;
struct udphdr *uh;
unsigned short ulen;
struct rtable *rt = skb_rtable(skb);
__be32 saddr, daddr;
struct net *net = dev_net(skb->dev);
/*
* Validate the packet.
*/
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto drop; /* No space for header. */
uh = udp_hdr(skb);
ulen = ntohs(uh->len);
saddr = ip_hdr(skb)->saddr;
daddr = ip_hdr(skb)->daddr;
if (ulen > skb->len)
goto short_packet;
if (proto == IPPROTO_UDP) {
/* UDP validates ulen. */
if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
goto short_packet;
uh = udp_hdr(skb);
}
if (udp4_csum_init(skb, uh, proto))
goto csum_error;
sk = skb_steal_sock(skb);
if (sk) {
struct dst_entry *dst = skb_dst(skb);
int ret;
if (unlikely(sk->sk_rx_dst != dst))
udp_sk_rx_dst_set(sk, dst);
ret = udp_queue_rcv_skb(sk, skb);
sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
if (ret > 0)
return -ret;
return 0;
}
if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
return __udp4_lib_mcast_deliver(net, skb, uh,
saddr, daddr, udptable, proto);
sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk) {
int ret;
if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
inet_compute_pseudo);
ret = udp_queue_rcv_skb(sk, skb);
sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
if (ret > 0)
return -ret;
return 0;
}
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
nf_reset(skb);
/* No socket. Drop packet silently, if checksum is wrong */
if (udp_lib_checksum_complete(skb))
goto csum_error;
UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
/*
* Hmm. We got an UDP packet to a port to which we
* don't wanna listen. Ignore it.
*/
kfree_skb(skb);
return 0;
short_packet:
net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
proto == IPPROTO_UDPLITE ? "Lite" : "",
&saddr, ntohs(uh->source),
ulen, skb->len,
&daddr, ntohs(uh->dest));
goto drop;
csum_error:
/*
* RFC1122: OK. Discards the bad packet silently (as far as
* the network is concerned, anyway) as per 4.1.3.4 (MUST).
*/
net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
proto == IPPROTO_UDPLITE ? "Lite" : "",
&saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
ulen);
UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
drop:
UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
kfree_skb(skb);
return 0;
}
/* We can only early demux multicast if there is a single matching socket.
* If more than one socket found returns NULL
*/
static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
__be16 loc_port, __be32 loc_addr,
__be16 rmt_port, __be32 rmt_addr,
int dif)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(loc_port);
unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
struct udp_hslot *hslot = &udp_table.hash[slot];
/* Do not bother scanning a too big list */
if (hslot->count > 10)
return NULL;
rcu_read_lock();
begin:
count = 0;
result = NULL;
sk_nulls_for_each_rcu(sk, node, &hslot->head) {
if (__udp_is_mcast_sock(net, sk,
loc_port, loc_addr,
rmt_port, rmt_addr,
dif, hnum)) {
result = sk;
++count;
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot)
goto begin;
if (result) {
if (count != 1 ||
unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(!__udp_is_mcast_sock(net, result,
loc_port, loc_addr,
rmt_port, rmt_addr,
dif, hnum))) {
sock_put(result);
result = NULL;
}
}
rcu_read_unlock();
return result;
}
/* For unicast we should only early demux connected sockets or we can
* break forwarding setups. The chains here can be long so only check
* if the first socket is an exact match and if not move on.
*/
static struct sock *__udp4_lib_demux_lookup(struct net *net,
__be16 loc_port, __be32 loc_addr,
__be16 rmt_port, __be32 rmt_addr,
int dif)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(loc_port);
unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
unsigned int slot2 = hash2 & udp_table.mask;
struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
rcu_read_lock();
result = NULL;
udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
if (INET_MATCH(sk, net, acookie,
rmt_addr, loc_addr, ports, dif))
result = sk;
/* Only check first socket in chain */
break;
}
if (result) {
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(!INET_MATCH(sk, net, acookie,
rmt_addr, loc_addr,
ports, dif))) {
sock_put(result);
result = NULL;
}
}
rcu_read_unlock();
return result;
}
void udp_v4_early_demux(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
const struct iphdr *iph;
const struct udphdr *uh;
struct sock *sk;
struct dst_entry *dst;
int dif = skb->dev->ifindex;
int ours;
/* validate the packet */
if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
return;
iph = ip_hdr(skb);
uh = udp_hdr(skb);
if (skb->pkt_type == PACKET_BROADCAST ||
skb->pkt_type == PACKET_MULTICAST) {
struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
if (!in_dev)
return;
ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
iph->protocol);
if (!ours)
return;
sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
uh->source, iph->saddr, dif);
} else if (skb->pkt_type == PACKET_HOST) {
sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
uh->source, iph->saddr, dif);
} else {
return;
}
if (!sk)
return;
skb->sk = sk;
skb->destructor = sock_efree;
dst = READ_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
if (dst) {
/* DST_NOCACHE can not be used without taking a reference */
if (dst->flags & DST_NOCACHE) {
if (likely(atomic_inc_not_zero(&dst->__refcnt)))
skb_dst_set(skb, dst);
} else {
skb_dst_set_noref(skb, dst);
}
}
}
int udp_rcv(struct sk_buff *skb)
{
return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
}
void udp_destroy_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
bool slow = lock_sock_fast(sk);
udp_flush_pending_frames(sk);
unlock_sock_fast(sk, slow);
if (static_key_false(&udp_encap_needed) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
encap_destroy = ACCESS_ONCE(up->encap_destroy);
if (encap_destroy)
encap_destroy(sk);
}
}
/*
* Socket option code for UDP
*/
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen,
int (*push_pending_frames)(struct sock *))
{
struct udp_sock *up = udp_sk(sk);
int val, valbool;
int err = 0;
int is_udplite = IS_UDPLITE(sk);
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
valbool = val ? 1 : 0;
switch (optname) {
case UDP_CORK:
if (val != 0) {
up->corkflag = 1;
} else {
up->corkflag = 0;
lock_sock(sk);
push_pending_frames(sk);
release_sock(sk);
}
break;
case UDP_ENCAP:
switch (val) {
case 0:
case UDP_ENCAP_ESPINUDP:
case UDP_ENCAP_ESPINUDP_NON_IKE:
up->encap_rcv = xfrm4_udp_encap_rcv;
/* FALLTHROUGH */
case UDP_ENCAP_L2TPINUDP:
up->encap_type = val;
udp_encap_enable();
break;
default:
err = -ENOPROTOOPT;
break;
}
break;
case UDP_NO_CHECK6_TX:
up->no_check6_tx = valbool;
break;
case UDP_NO_CHECK6_RX:
up->no_check6_rx = valbool;
break;
/*
* UDP-Lite's partial checksum coverage (RFC 3828).
*/
/* The sender sets actual checksum coverage length via this option.
* The case coverage > packet length is handled by send module. */
case UDPLITE_SEND_CSCOV:
if (!is_udplite) /* Disable the option on UDP sockets */
return -ENOPROTOOPT;
if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
val = 8;
else if (val > USHRT_MAX)
val = USHRT_MAX;
up->pcslen = val;
up->pcflag |= UDPLITE_SEND_CC;
break;
/* The receiver specifies a minimum checksum coverage value. To make
* sense, this should be set to at least 8 (as done below). If zero is
* used, this again means full checksum coverage. */
case UDPLITE_RECV_CSCOV:
if (!is_udplite) /* Disable the option on UDP sockets */
return -ENOPROTOOPT;
if (val != 0 && val < 8) /* Avoid silly minimal values. */
val = 8;
else if (val > USHRT_MAX)
val = USHRT_MAX;
up->pcrlen = val;
up->pcflag |= UDPLITE_RECV_CC;
break;
default:
err = -ENOPROTOOPT;
break;
}
return err;
}
EXPORT_SYMBOL(udp_lib_setsockopt);
int udp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_push_pending_frames);
return ip_setsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
int compat_udp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_push_pending_frames);
return compat_ip_setsockopt(sk, level, optname, optval, optlen);
}
#endif
int udp_lib_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
struct udp_sock *up = udp_sk(sk);
int val, len;
if (get_user(len, optlen))
return -EFAULT;
len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;
switch (optname) {
case UDP_CORK:
val = up->corkflag;
break;
case UDP_ENCAP:
val = up->encap_type;
break;
case UDP_NO_CHECK6_TX:
val = up->no_check6_tx;
break;
case UDP_NO_CHECK6_RX:
val = up->no_check6_rx;
break;
/* The following two cannot be changed on UDP sockets, the return is
* always 0 (which corresponds to the full checksum coverage of UDP). */
case UDPLITE_SEND_CSCOV:
val = up->pcslen;
break;
case UDPLITE_RECV_CSCOV:
val = up->pcrlen;
break;
default:
return -ENOPROTOOPT;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
EXPORT_SYMBOL(udp_lib_getsockopt);
int udp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return ip_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
int compat_udp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return compat_ip_getsockopt(sk, level, optname, optval, optlen);
}
#endif
/**
* udp_poll - wait for a UDP event.
* @file - file struct
* @sock - socket
* @wait - poll table
*
* This is same as datagram poll, except for the special case of
* blocking sockets. If application is using a blocking fd
* and a packet with checksum error is in the queue;
* then it could get return from select indicating data available
* but then block when reading it. Add special case code
* to work around these arguably broken applications.
*/
unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
unsigned int mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
sock_rps_record_flow(sk);
/* Check for false positives due to checksum errors */
if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
!(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk))
mask &= ~(POLLIN | POLLRDNORM);
return mask;
}
EXPORT_SYMBOL(udp_poll);
struct proto udp_prot = {
.name = "UDP",
.owner = THIS_MODULE,
.close = udp_lib_close,
.connect = ip4_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = udp_destroy_sock,
.setsockopt = udp_setsockopt,
.getsockopt = udp_getsockopt,
.sendmsg = udp_sendmsg,
.recvmsg = udp_recvmsg,
.sendpage = udp_sendpage,
.backlog_rcv = __udp_queue_rcv_skb,
.release_cb = ip4_datagram_release_cb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.rehash = udp_v4_rehash,
.get_port = udp_v4_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
.sysctl_wmem = &sysctl_udp_wmem_min,
.sysctl_rmem = &sysctl_udp_rmem_min,
.obj_size = sizeof(struct udp_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.h.udp_table = &udp_table,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udp_setsockopt,
.compat_getsockopt = compat_udp_getsockopt,
#endif
.clear_sk = sk_prot_clear_portaddr_nulls,
};
EXPORT_SYMBOL(udp_prot);
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
static struct sock *udp_get_first(struct seq_file *seq, int start)
{
struct sock *sk;
struct udp_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
for (state->bucket = start; state->bucket <= state->udp_table->mask;
++state->bucket) {
struct hlist_nulls_node *node;
struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
if (hlist_nulls_empty(&hslot->head))
continue;
spin_lock_bh(&hslot->lock);
sk_nulls_for_each(sk, node, &hslot->head) {
if (!net_eq(sock_net(sk), net))
continue;
if (sk->sk_family == state->family)
goto found;
}
spin_unlock_bh(&hslot->lock);
}
sk = NULL;
found:
return sk;
}
static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
{
struct udp_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
do {
sk = sk_nulls_next(sk);
} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
if (!sk) {
if (state->bucket <= state->udp_table->mask)
spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
return udp_get_first(seq, state->bucket + 1);
}
return sk;
}
static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
{
struct sock *sk = udp_get_first(seq, 0);
if (sk)
while (pos && (sk = udp_get_next(seq, sk)) != NULL)
--pos;
return pos ? NULL : sk;
}
static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
{
struct udp_iter_state *state = seq->private;
state->bucket = MAX_UDP_PORTS;
return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
}
static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct sock *sk;
if (v == SEQ_START_TOKEN)
sk = udp_get_idx(seq, 0);
else
sk = udp_get_next(seq, v);
++*pos;
return sk;
}
static void udp_seq_stop(struct seq_file *seq, void *v)
{
struct udp_iter_state *state = seq->private;
if (state->bucket <= state->udp_table->mask)
spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
}
int udp_seq_open(struct inode *inode, struct file *file)
{
struct udp_seq_afinfo *afinfo = PDE_DATA(inode);
struct udp_iter_state *s;
int err;
err = seq_open_net(inode, file, &afinfo->seq_ops,
sizeof(struct udp_iter_state));
if (err < 0)
return err;
s = ((struct seq_file *)file->private_data)->private;
s->family = afinfo->family;
s->udp_table = afinfo->udp_table;
return err;
}
EXPORT_SYMBOL(udp_seq_open);
/* ------------------------------------------------------------------------ */
int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
{
struct proc_dir_entry *p;
int rc = 0;
afinfo->seq_ops.start = udp_seq_start;
afinfo->seq_ops.next = udp_seq_next;
afinfo->seq_ops.stop = udp_seq_stop;
p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
afinfo->seq_fops, afinfo);
if (!p)
rc = -ENOMEM;
return rc;
}
EXPORT_SYMBOL(udp_proc_register);
void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
{
remove_proc_entry(afinfo->name, net->proc_net);
}
EXPORT_SYMBOL(udp_proc_unregister);
/* ------------------------------------------------------------------------ */
static void udp4_format_sock(struct sock *sp, struct seq_file *f,
int bucket)
{
struct inet_sock *inet = inet_sk(sp);
__be32 dest = inet->inet_daddr;
__be32 src = inet->inet_rcv_saddr;
__u16 destp = ntohs(inet->inet_dport);
__u16 srcp = ntohs(inet->inet_sport);
seq_printf(f, "%5d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
bucket, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
atomic_read(&sp->sk_drops));
}
int udp4_seq_show(struct seq_file *seq, void *v)
{
seq_setwidth(seq, 127);
if (v == SEQ_START_TOKEN)
seq_puts(seq, " sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout "
"inode ref pointer drops");
else {
struct udp_iter_state *state = seq->private;
udp4_format_sock(v, seq, state->bucket);
}
seq_pad(seq, '\n');
return 0;
}
static const struct file_operations udp_afinfo_seq_fops = {
.owner = THIS_MODULE,
.open = udp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net
};
/* ------------------------------------------------------------------------ */
static struct udp_seq_afinfo udp4_seq_afinfo = {
.name = "udp",
.family = AF_INET,
.udp_table = &udp_table,
.seq_fops = &udp_afinfo_seq_fops,
.seq_ops = {
.show = udp4_seq_show,
},
};
static int __net_init udp4_proc_init_net(struct net *net)
{
return udp_proc_register(net, &udp4_seq_afinfo);
}
static void __net_exit udp4_proc_exit_net(struct net *net)
{
udp_proc_unregister(net, &udp4_seq_afinfo);
}
static struct pernet_operations udp4_net_ops = {
.init = udp4_proc_init_net,
.exit = udp4_proc_exit_net,
};
int __init udp4_proc_init(void)
{
return register_pernet_subsys(&udp4_net_ops);
}
void udp4_proc_exit(void)
{
unregister_pernet_subsys(&udp4_net_ops);
}
#endif /* CONFIG_PROC_FS */
static __initdata unsigned long uhash_entries;
static int __init set_uhash_entries(char *str)
{
ssize_t ret;
if (!str)
return 0;
ret = kstrtoul(str, 0, &uhash_entries);
if (ret)
return 0;
if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
uhash_entries = UDP_HTABLE_SIZE_MIN;
return 1;
}
__setup("uhash_entries=", set_uhash_entries);
void __init udp_table_init(struct udp_table *table, const char *name)
{
unsigned int i;
table->hash = alloc_large_system_hash(name,
2 * sizeof(struct udp_hslot),
uhash_entries,
21, /* one slot per 2 MB */
0,
&table->log,
&table->mask,
UDP_HTABLE_SIZE_MIN,
64 * 1024);
table->hash2 = table->hash + (table->mask + 1);
for (i = 0; i <= table->mask; i++) {
INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
table->hash[i].count = 0;
spin_lock_init(&table->hash[i].lock);
}
for (i = 0; i <= table->mask; i++) {
INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i);
table->hash2[i].count = 0;
spin_lock_init(&table->hash2[i].lock);
}
}
u32 udp_flow_hashrnd(void)
{
static u32 hashrnd __read_mostly;
net_get_random_once(&hashrnd, sizeof(hashrnd));
return hashrnd;
}
EXPORT_SYMBOL(udp_flow_hashrnd);
void __init udp_init(void)
{
unsigned long limit;
udp_table_init(&udp_table, "UDP");
limit = nr_free_buffer_pages() / 8;
limit = max(limit, 128UL);
sysctl_udp_mem[0] = limit / 4 * 3;
sysctl_udp_mem[1] = limit;
sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
sysctl_udp_rmem_min = SK_MEM_QUANTUM;
sysctl_udp_wmem_min = SK_MEM_QUANTUM;
}
| ./CrossVul/dataset_final_sorted/CWE-358/c/good_4847_0 |
crossvul-cpp_data_bad_3239_0 | /* Copyright (C) 2007-2012 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include "suricata-common.h"
#include "conf.h"
#include "defrag-hash.h"
#include "defrag-queue.h"
#include "defrag-config.h"
#include "util-random.h"
#include "util-byte.h"
#include "util-misc.h"
#include "util-hash-lookup3.h"
static DefragTracker *DefragTrackerGetUsedDefragTracker(void);
/** queue with spare tracker */
static DefragTrackerQueue defragtracker_spare_q;
uint32_t DefragTrackerSpareQueueGetSize(void)
{
return DefragTrackerQueueLen(&defragtracker_spare_q);
}
void DefragTrackerMoveToSpare(DefragTracker *h)
{
DefragTrackerEnqueue(&defragtracker_spare_q, h);
(void) SC_ATOMIC_SUB(defragtracker_counter, 1);
}
DefragTracker *DefragTrackerAlloc(void)
{
if (!(DEFRAG_CHECK_MEMCAP(sizeof(DefragTracker)))) {
return NULL;
}
(void) SC_ATOMIC_ADD(defrag_memuse, sizeof(DefragTracker));
DefragTracker *dt = SCMalloc(sizeof(DefragTracker));
if (unlikely(dt == NULL))
goto error;
memset(dt, 0x00, sizeof(DefragTracker));
SCMutexInit(&dt->lock, NULL);
SC_ATOMIC_INIT(dt->use_cnt);
return dt;
error:
return NULL;
}
void DefragTrackerFree(DefragTracker *dt)
{
if (dt != NULL) {
DefragTrackerClearMemory(dt);
SCMutexDestroy(&dt->lock);
SCFree(dt);
(void) SC_ATOMIC_SUB(defrag_memuse, sizeof(DefragTracker));
}
}
#define DefragTrackerIncrUsecnt(dt) \
SC_ATOMIC_ADD((dt)->use_cnt, 1)
#define DefragTrackerDecrUsecnt(dt) \
SC_ATOMIC_SUB((dt)->use_cnt, 1)
static void DefragTrackerInit(DefragTracker *dt, Packet *p)
{
/* copy address */
COPY_ADDRESS(&p->src, &dt->src_addr);
COPY_ADDRESS(&p->dst, &dt->dst_addr);
if (PKT_IS_IPV4(p)) {
dt->id = (int32_t)IPV4_GET_IPID(p);
dt->af = AF_INET;
} else {
dt->id = (int32_t)IPV6_EXTHDR_GET_FH_ID(p);
dt->af = AF_INET6;
}
dt->vlan_id[0] = p->vlan_id[0];
dt->vlan_id[1] = p->vlan_id[1];
dt->policy = DefragGetOsPolicy(p);
dt->host_timeout = DefragPolicyGetHostTimeout(p);
dt->remove = 0;
dt->seen_last = 0;
TAILQ_INIT(&dt->frags);
(void) DefragTrackerIncrUsecnt(dt);
}
void DefragTrackerRelease(DefragTracker *t)
{
(void) DefragTrackerDecrUsecnt(t);
SCMutexUnlock(&t->lock);
}
void DefragTrackerClearMemory(DefragTracker *dt)
{
DefragTrackerFreeFrags(dt);
SC_ATOMIC_DESTROY(dt->use_cnt);
}
#define DEFRAG_DEFAULT_HASHSIZE 4096
#define DEFRAG_DEFAULT_MEMCAP 16777216
#define DEFRAG_DEFAULT_PREALLOC 1000
/** \brief initialize the configuration
* \warning Not thread safe */
void DefragInitConfig(char quiet)
{
SCLogDebug("initializing defrag engine...");
memset(&defrag_config, 0, sizeof(defrag_config));
//SC_ATOMIC_INIT(flow_flags);
SC_ATOMIC_INIT(defragtracker_counter);
SC_ATOMIC_INIT(defrag_memuse);
SC_ATOMIC_INIT(defragtracker_prune_idx);
DefragTrackerQueueInit(&defragtracker_spare_q);
#ifndef AFLFUZZ_NO_RANDOM
unsigned int seed = RandomTimePreseed();
/* set defaults */
defrag_config.hash_rand = (int)(DEFRAG_DEFAULT_HASHSIZE * (rand_r(&seed) / RAND_MAX + 1.0));
#endif
defrag_config.hash_size = DEFRAG_DEFAULT_HASHSIZE;
defrag_config.memcap = DEFRAG_DEFAULT_MEMCAP;
defrag_config.prealloc = DEFRAG_DEFAULT_PREALLOC;
/* Check if we have memcap and hash_size defined at config */
char *conf_val;
uint32_t configval = 0;
/** set config values for memcap, prealloc and hash_size */
if ((ConfGet("defrag.memcap", &conf_val)) == 1)
{
if (ParseSizeStringU64(conf_val, &defrag_config.memcap) < 0) {
SCLogError(SC_ERR_SIZE_PARSE, "Error parsing defrag.memcap "
"from conf file - %s. Killing engine",
conf_val);
exit(EXIT_FAILURE);
}
}
if ((ConfGet("defrag.hash-size", &conf_val)) == 1)
{
if (ByteExtractStringUint32(&configval, 10, strlen(conf_val),
conf_val) > 0) {
defrag_config.hash_size = configval;
} else {
WarnInvalidConfEntry("defrag.hash-size", "%"PRIu32, defrag_config.hash_size);
}
}
if ((ConfGet("defrag.trackers", &conf_val)) == 1)
{
if (ByteExtractStringUint32(&configval, 10, strlen(conf_val),
conf_val) > 0) {
defrag_config.prealloc = configval;
} else {
WarnInvalidConfEntry("defrag.trackers", "%"PRIu32, defrag_config.prealloc);
}
}
SCLogDebug("DefragTracker config from suricata.yaml: memcap: %"PRIu64", hash-size: "
"%"PRIu32", prealloc: %"PRIu32, defrag_config.memcap,
defrag_config.hash_size, defrag_config.prealloc);
/* alloc hash memory */
uint64_t hash_size = defrag_config.hash_size * sizeof(DefragTrackerHashRow);
if (!(DEFRAG_CHECK_MEMCAP(hash_size))) {
SCLogError(SC_ERR_DEFRAG_INIT, "allocating defrag hash failed: "
"max defrag memcap is smaller than projected hash size. "
"Memcap: %"PRIu64", Hash table size %"PRIu64". Calculate "
"total hash size by multiplying \"defrag.hash-size\" with %"PRIuMAX", "
"which is the hash bucket size.", defrag_config.memcap, hash_size,
(uintmax_t)sizeof(DefragTrackerHashRow));
exit(EXIT_FAILURE);
}
defragtracker_hash = SCCalloc(defrag_config.hash_size, sizeof(DefragTrackerHashRow));
if (unlikely(defragtracker_hash == NULL)) {
SCLogError(SC_ERR_FATAL, "Fatal error encountered in DefragTrackerInitConfig. Exiting...");
exit(EXIT_FAILURE);
}
memset(defragtracker_hash, 0, defrag_config.hash_size * sizeof(DefragTrackerHashRow));
uint32_t i = 0;
for (i = 0; i < defrag_config.hash_size; i++) {
DRLOCK_INIT(&defragtracker_hash[i]);
}
(void) SC_ATOMIC_ADD(defrag_memuse, (defrag_config.hash_size * sizeof(DefragTrackerHashRow)));
if (quiet == FALSE) {
SCLogConfig("allocated %llu bytes of memory for the defrag hash... "
"%" PRIu32 " buckets of size %" PRIuMAX "",
SC_ATOMIC_GET(defrag_memuse), defrag_config.hash_size,
(uintmax_t)sizeof(DefragTrackerHashRow));
}
if ((ConfGet("defrag.prealloc", &conf_val)) == 1)
{
if (ConfValIsTrue(conf_val)) {
/* pre allocate defrag trackers */
for (i = 0; i < defrag_config.prealloc; i++) {
if (!(DEFRAG_CHECK_MEMCAP(sizeof(DefragTracker)))) {
SCLogError(SC_ERR_DEFRAG_INIT, "preallocating defrag trackers failed: "
"max defrag memcap reached. Memcap %"PRIu64", "
"Memuse %"PRIu64".", defrag_config.memcap,
((uint64_t)SC_ATOMIC_GET(defrag_memuse) + (uint64_t)sizeof(DefragTracker)));
exit(EXIT_FAILURE);
}
DefragTracker *h = DefragTrackerAlloc();
if (h == NULL) {
SCLogError(SC_ERR_DEFRAG_INIT, "preallocating defrag failed: %s", strerror(errno));
exit(EXIT_FAILURE);
}
DefragTrackerEnqueue(&defragtracker_spare_q,h);
}
if (quiet == FALSE) {
SCLogConfig("preallocated %" PRIu32 " defrag trackers of size %" PRIuMAX "",
defragtracker_spare_q.len, (uintmax_t)sizeof(DefragTracker));
}
}
}
if (quiet == FALSE) {
SCLogConfig("defrag memory usage: %llu bytes, maximum: %"PRIu64,
SC_ATOMIC_GET(defrag_memuse), defrag_config.memcap);
}
return;
}
/** \brief print some defrag stats
* \warning Not thread safe */
static void DefragTrackerPrintStats (void)
{
}
/** \brief shutdown the flow engine
* \warning Not thread safe */
void DefragHashShutdown(void)
{
DefragTracker *dt;
uint32_t u;
DefragTrackerPrintStats();
/* free spare queue */
while((dt = DefragTrackerDequeue(&defragtracker_spare_q))) {
BUG_ON(SC_ATOMIC_GET(dt->use_cnt) > 0);
DefragTrackerFree(dt);
}
/* clear and free the hash */
if (defragtracker_hash != NULL) {
for (u = 0; u < defrag_config.hash_size; u++) {
dt = defragtracker_hash[u].head;
while (dt) {
DefragTracker *n = dt->hnext;
DefragTrackerClearMemory(dt);
DefragTrackerFree(dt);
dt = n;
}
DRLOCK_DESTROY(&defragtracker_hash[u]);
}
SCFree(defragtracker_hash);
defragtracker_hash = NULL;
}
(void) SC_ATOMIC_SUB(defrag_memuse, defrag_config.hash_size * sizeof(DefragTrackerHashRow));
DefragTrackerQueueDestroy(&defragtracker_spare_q);
SC_ATOMIC_DESTROY(defragtracker_prune_idx);
SC_ATOMIC_DESTROY(defrag_memuse);
SC_ATOMIC_DESTROY(defragtracker_counter);
//SC_ATOMIC_DESTROY(flow_flags);
return;
}
/** \brief compare two raw ipv6 addrs
*
* \note we don't care about the real ipv6 ip's, this is just
* to consistently fill the DefragHashKey6 struct, without all
* the ntohl calls.
*
* \warning do not use elsewhere unless you know what you're doing.
* detect-engine-address-ipv6.c's AddressIPv6GtU32 is likely
* what you are looking for.
*/
static inline int DefragHashRawAddressIPv6GtU32(uint32_t *a, uint32_t *b)
{
int i;
for (i = 0; i < 4; i++) {
if (a[i] > b[i])
return 1;
if (a[i] < b[i])
break;
}
return 0;
}
typedef struct DefragHashKey4_ {
union {
struct {
uint32_t src, dst;
uint32_t id;
uint16_t vlan_id[2];
};
uint32_t u32[4];
};
} DefragHashKey4;
typedef struct DefragHashKey6_ {
union {
struct {
uint32_t src[4], dst[4];
uint32_t id;
uint16_t vlan_id[2];
};
uint32_t u32[10];
};
} DefragHashKey6;
/* calculate the hash key for this packet
*
* we're using:
* hash_rand -- set at init time
* source address
* destination address
* id
* vlan_id
*/
static inline uint32_t DefragHashGetKey(Packet *p)
{
uint32_t key;
if (p->ip4h != NULL) {
DefragHashKey4 dhk;
if (p->src.addr_data32[0] > p->dst.addr_data32[0]) {
dhk.src = p->src.addr_data32[0];
dhk.dst = p->dst.addr_data32[0];
} else {
dhk.src = p->dst.addr_data32[0];
dhk.dst = p->src.addr_data32[0];
}
dhk.id = (uint32_t)IPV4_GET_IPID(p);
dhk.vlan_id[0] = p->vlan_id[0];
dhk.vlan_id[1] = p->vlan_id[1];
uint32_t hash = hashword(dhk.u32, 4, defrag_config.hash_rand);
key = hash % defrag_config.hash_size;
} else if (p->ip6h != NULL) {
DefragHashKey6 dhk;
if (DefragHashRawAddressIPv6GtU32(p->src.addr_data32, p->dst.addr_data32)) {
dhk.src[0] = p->src.addr_data32[0];
dhk.src[1] = p->src.addr_data32[1];
dhk.src[2] = p->src.addr_data32[2];
dhk.src[3] = p->src.addr_data32[3];
dhk.dst[0] = p->dst.addr_data32[0];
dhk.dst[1] = p->dst.addr_data32[1];
dhk.dst[2] = p->dst.addr_data32[2];
dhk.dst[3] = p->dst.addr_data32[3];
} else {
dhk.src[0] = p->dst.addr_data32[0];
dhk.src[1] = p->dst.addr_data32[1];
dhk.src[2] = p->dst.addr_data32[2];
dhk.src[3] = p->dst.addr_data32[3];
dhk.dst[0] = p->src.addr_data32[0];
dhk.dst[1] = p->src.addr_data32[1];
dhk.dst[2] = p->src.addr_data32[2];
dhk.dst[3] = p->src.addr_data32[3];
}
dhk.id = IPV6_EXTHDR_GET_FH_ID(p);
dhk.vlan_id[0] = p->vlan_id[0];
dhk.vlan_id[1] = p->vlan_id[1];
uint32_t hash = hashword(dhk.u32, 10, defrag_config.hash_rand);
key = hash % defrag_config.hash_size;
} else
key = 0;
return key;
}
/* Since two or more trackers can have the same hash key, we need to compare
* the tracker with the current tracker key. */
#define CMP_DEFRAGTRACKER(d1,d2,id) \
(((CMP_ADDR(&(d1)->src_addr, &(d2)->src) && \
CMP_ADDR(&(d1)->dst_addr, &(d2)->dst)) || \
(CMP_ADDR(&(d1)->src_addr, &(d2)->dst) && \
CMP_ADDR(&(d1)->dst_addr, &(d2)->src))) && \
(d1)->id == (id) && \
(d1)->vlan_id[0] == (d2)->vlan_id[0] && \
(d1)->vlan_id[1] == (d2)->vlan_id[1])
static inline int DefragTrackerCompare(DefragTracker *t, Packet *p)
{
uint32_t id;
if (PKT_IS_IPV4(p)) {
id = (uint32_t)IPV4_GET_IPID(p);
} else {
id = IPV6_EXTHDR_GET_FH_ID(p);
}
return CMP_DEFRAGTRACKER(t, p, id);
}
/**
* \brief Get a new defrag tracker
*
* Get a new defrag tracker. We're checking memcap first and will try to make room
* if the memcap is reached.
*
* \retval dt *LOCKED* tracker on succes, NULL on error.
*/
static DefragTracker *DefragTrackerGetNew(Packet *p)
{
DefragTracker *dt = NULL;
/* get a tracker from the spare queue */
dt = DefragTrackerDequeue(&defragtracker_spare_q);
if (dt == NULL) {
/* If we reached the max memcap, we get a used tracker */
if (!(DEFRAG_CHECK_MEMCAP(sizeof(DefragTracker)))) {
/* declare state of emergency */
//if (!(SC_ATOMIC_GET(defragtracker_flags) & DEFRAG_EMERGENCY)) {
// SC_ATOMIC_OR(defragtracker_flags, DEFRAG_EMERGENCY);
/* under high load, waking up the flow mgr each time leads
* to high cpu usage. Flows are not timed out much faster if
* we check a 1000 times a second. */
// FlowWakeupFlowManagerThread();
//}
dt = DefragTrackerGetUsedDefragTracker();
if (dt == NULL) {
return NULL;
}
/* freed a tracker, but it's unlocked */
} else {
/* now see if we can alloc a new tracker */
dt = DefragTrackerAlloc();
if (dt == NULL) {
return NULL;
}
/* tracker is initialized but *unlocked* */
}
} else {
/* tracker has been recycled before it went into the spare queue */
/* tracker is initialized (recylced) but *unlocked* */
}
(void) SC_ATOMIC_ADD(defragtracker_counter, 1);
SCMutexLock(&dt->lock);
return dt;
}
/* DefragGetTrackerFromHash
*
* Hash retrieval function for trackers. Looks up the hash bucket containing the
* tracker pointer. Then compares the packet with the found tracker to see if it is
* the tracker we need. If it isn't, walk the list until the right tracker is found.
*
* returns a *LOCKED* tracker or NULL
*/
DefragTracker *DefragGetTrackerFromHash (Packet *p)
{
DefragTracker *dt = NULL;
/* get the key to our bucket */
uint32_t key = DefragHashGetKey(p);
/* get our hash bucket and lock it */
DefragTrackerHashRow *hb = &defragtracker_hash[key];
DRLOCK_LOCK(hb);
/* see if the bucket already has a tracker */
if (hb->head == NULL) {
dt = DefragTrackerGetNew(p);
if (dt == NULL) {
DRLOCK_UNLOCK(hb);
return NULL;
}
/* tracker is locked */
hb->head = dt;
hb->tail = dt;
/* got one, now lock, initialize and return */
DefragTrackerInit(dt,p);
DRLOCK_UNLOCK(hb);
return dt;
}
/* ok, we have a tracker in the bucket. Let's find out if it is our tracker */
dt = hb->head;
/* see if this is the tracker we are looking for */
if (dt->remove || DefragTrackerCompare(dt, p) == 0) {
DefragTracker *pdt = NULL; /* previous tracker */
while (dt) {
pdt = dt;
dt = dt->hnext;
if (dt == NULL) {
dt = pdt->hnext = DefragTrackerGetNew(p);
if (dt == NULL) {
DRLOCK_UNLOCK(hb);
return NULL;
}
hb->tail = dt;
/* tracker is locked */
dt->hprev = pdt;
/* initialize and return */
DefragTrackerInit(dt,p);
DRLOCK_UNLOCK(hb);
return dt;
}
if (DefragTrackerCompare(dt, p) != 0) {
/* we found our tracker, lets put it on top of the
* hash list -- this rewards active trackers */
if (dt->hnext) {
dt->hnext->hprev = dt->hprev;
}
if (dt->hprev) {
dt->hprev->hnext = dt->hnext;
}
if (dt == hb->tail) {
hb->tail = dt->hprev;
}
dt->hnext = hb->head;
dt->hprev = NULL;
hb->head->hprev = dt;
hb->head = dt;
/* found our tracker, lock & return */
SCMutexLock(&dt->lock);
(void) DefragTrackerIncrUsecnt(dt);
DRLOCK_UNLOCK(hb);
return dt;
}
}
}
/* lock & return */
SCMutexLock(&dt->lock);
(void) DefragTrackerIncrUsecnt(dt);
DRLOCK_UNLOCK(hb);
return dt;
}
/** \brief look up a tracker in the hash
*
* \param a address to look up
*
* \retval h *LOCKED* tracker or NULL
*/
DefragTracker *DefragLookupTrackerFromHash (Packet *p)
{
DefragTracker *dt = NULL;
/* get the key to our bucket */
uint32_t key = DefragHashGetKey(p);
/* get our hash bucket and lock it */
DefragTrackerHashRow *hb = &defragtracker_hash[key];
DRLOCK_LOCK(hb);
/* see if the bucket already has a tracker */
if (hb->head == NULL) {
DRLOCK_UNLOCK(hb);
return dt;
}
/* ok, we have a tracker in the bucket. Let's find out if it is our tracker */
dt = hb->head;
/* see if this is the tracker we are looking for */
if (DefragTrackerCompare(dt, p) == 0) {
while (dt) {
dt = dt->hnext;
if (dt == NULL) {
DRLOCK_UNLOCK(hb);
return dt;
}
if (DefragTrackerCompare(dt, p) != 0) {
/* we found our tracker, lets put it on top of the
* hash list -- this rewards active tracker */
if (dt->hnext) {
dt->hnext->hprev = dt->hprev;
}
if (dt->hprev) {
dt->hprev->hnext = dt->hnext;
}
if (dt == hb->tail) {
hb->tail = dt->hprev;
}
dt->hnext = hb->head;
dt->hprev = NULL;
hb->head->hprev = dt;
hb->head = dt;
/* found our tracker, lock & return */
SCMutexLock(&dt->lock);
(void) DefragTrackerIncrUsecnt(dt);
DRLOCK_UNLOCK(hb);
return dt;
}
}
}
/* lock & return */
SCMutexLock(&dt->lock);
(void) DefragTrackerIncrUsecnt(dt);
DRLOCK_UNLOCK(hb);
return dt;
}
/** \internal
* \brief Get a tracker from the hash directly.
*
* Called in conditions where the spare queue is empty and memcap is reached.
*
* Walks the hash until a tracker can be freed. "defragtracker_prune_idx" atomic int makes
* sure we don't start at the top each time since that would clear the top of
* the hash leading to longer and longer search times under high pressure (observed).
*
* \retval dt tracker or NULL
*/
static DefragTracker *DefragTrackerGetUsedDefragTracker(void)
{
uint32_t idx = SC_ATOMIC_GET(defragtracker_prune_idx) % defrag_config.hash_size;
uint32_t cnt = defrag_config.hash_size;
while (cnt--) {
if (++idx >= defrag_config.hash_size)
idx = 0;
DefragTrackerHashRow *hb = &defragtracker_hash[idx];
if (DRLOCK_TRYLOCK(hb) != 0)
continue;
DefragTracker *dt = hb->tail;
if (dt == NULL) {
DRLOCK_UNLOCK(hb);
continue;
}
if (SCMutexTrylock(&dt->lock) != 0) {
DRLOCK_UNLOCK(hb);
continue;
}
/** never prune a tracker that is used by a packets
* we are currently processing in one of the threads */
if (SC_ATOMIC_GET(dt->use_cnt) > 0) {
DRLOCK_UNLOCK(hb);
SCMutexUnlock(&dt->lock);
continue;
}
/* remove from the hash */
if (dt->hprev != NULL)
dt->hprev->hnext = dt->hnext;
if (dt->hnext != NULL)
dt->hnext->hprev = dt->hprev;
if (hb->head == dt)
hb->head = dt->hnext;
if (hb->tail == dt)
hb->tail = dt->hprev;
dt->hnext = NULL;
dt->hprev = NULL;
DRLOCK_UNLOCK(hb);
DefragTrackerClearMemory(dt);
SCMutexUnlock(&dt->lock);
(void) SC_ATOMIC_ADD(defragtracker_prune_idx, (defrag_config.hash_size - cnt));
return dt;
}
return NULL;
}
| ./CrossVul/dataset_final_sorted/CWE-358/c/bad_3239_0 |
crossvul-cpp_data_bad_4196_0 | /*
* WindowsServiceControl.h - class for managing a Windows service
*
* Copyright (c) 2017-2020 Tobias Junghans <tobydox@veyon.io>
*
* This file is part of Veyon - https://veyon.io
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program (see COPYING); if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
*/
#include "WindowsCoreFunctions.h"
#include "WindowsServiceControl.h"
WindowsServiceControl::WindowsServiceControl( const QString& name ) :
m_name( name ),
m_serviceManager( nullptr ),
m_serviceHandle( nullptr )
{
m_serviceManager = OpenSCManager( nullptr, nullptr, SC_MANAGER_ALL_ACCESS );
if( m_serviceManager )
{
m_serviceHandle = OpenService( m_serviceManager, WindowsCoreFunctions::toConstWCharArray( m_name ),
SERVICE_ALL_ACCESS );
if( m_serviceHandle == nullptr )
{
vCritical() << "could not open service" << m_name;
}
}
else
{
vCritical() << "the Service Control Manager could not be contacted - service " << m_name << "can't be controlled.";
}
}
WindowsServiceControl::~WindowsServiceControl()
{
CloseServiceHandle( m_serviceHandle );
CloseServiceHandle( m_serviceManager );
}
bool WindowsServiceControl::isRegistered()
{
return m_serviceHandle != nullptr;
}
bool WindowsServiceControl::isRunning()
{
if( checkService() == false )
{
return false;
}
SERVICE_STATUS status;
if( QueryServiceStatus( m_serviceHandle, &status ) )
{
return status.dwCurrentState == SERVICE_RUNNING;
}
return false;
}
bool WindowsServiceControl::start()
{
if( checkService() == false )
{
return false;
}
SERVICE_STATUS status;
status.dwCurrentState = SERVICE_START_PENDING;
if( StartService( m_serviceHandle, 0, nullptr ) )
{
while( QueryServiceStatus( m_serviceHandle, &status ) )
{
if( status.dwCurrentState == SERVICE_START_PENDING )
{
Sleep( 1000 );
}
else
{
break;
}
}
}
if( status.dwCurrentState != SERVICE_RUNNING )
{
vWarning() << "service" << m_name << "could not be started.";
return false;
}
return true;
}
bool WindowsServiceControl::stop()
{
if( checkService() == false )
{
return false;
}
SERVICE_STATUS status;
// Try to stop the service
if( ControlService( m_serviceHandle, SERVICE_CONTROL_STOP, &status ) )
{
while( QueryServiceStatus( m_serviceHandle, &status ) )
{
if( status.dwCurrentState == SERVICE_STOP_PENDING )
{
Sleep( 1000 );
}
else
{
break;
}
}
if( status.dwCurrentState != SERVICE_STOPPED )
{
vWarning() << "service" << m_name << "could not be stopped.";
return false;
}
}
return true;
}
bool WindowsServiceControl::install( const QString& filePath, const QString& displayName )
{
m_serviceHandle = CreateService(
m_serviceManager, // SCManager database
WindowsCoreFunctions::toConstWCharArray( m_name ), // name of service
WindowsCoreFunctions::toConstWCharArray( displayName ),// name to display
SERVICE_ALL_ACCESS, // desired access
SERVICE_WIN32_OWN_PROCESS,
// service type
SERVICE_AUTO_START, // start type
SERVICE_ERROR_NORMAL, // error control type
WindowsCoreFunctions::toConstWCharArray( filePath ), // service's binary
nullptr, // no load ordering group
nullptr, // no tag identifier
L"Tcpip\0RpcSs\0\0", // dependencies
nullptr, // LocalSystem account
nullptr ); // no password
if( m_serviceHandle == nullptr )
{
const auto error = GetLastError();
if( error == ERROR_SERVICE_EXISTS )
{
vCritical() << qUtf8Printable( tr( "The service \"%1\" is already installed." ).arg( m_name ) );
}
else
{
vCritical() << qUtf8Printable( tr( "The service \"%1\" could not be installed." ).arg( m_name ) );
}
return false;
}
SC_ACTION serviceActions;
serviceActions.Delay = 10000;
serviceActions.Type = SC_ACTION_RESTART;
SERVICE_FAILURE_ACTIONS serviceFailureActions;
serviceFailureActions.dwResetPeriod = 0;
serviceFailureActions.lpRebootMsg = nullptr;
serviceFailureActions.lpCommand = nullptr;
serviceFailureActions.lpsaActions = &serviceActions;
serviceFailureActions.cActions = 1;
ChangeServiceConfig2( m_serviceHandle, SERVICE_CONFIG_FAILURE_ACTIONS, &serviceFailureActions );
// Everything went fine
vInfo() << qUtf8Printable( tr( "The service \"%1\" has been installed successfully." ).arg( m_name ) );
return true;
}
bool WindowsServiceControl::uninstall()
{
if( checkService() == false )
{
return false;
}
if( stop() == false )
{
return false;
}
if( DeleteService( m_serviceHandle ) == false )
{
vCritical() << qUtf8Printable( tr( "The service \"%1\" could not be uninstalled." ).arg( m_name ) );
return false;
}
vInfo() << qUtf8Printable( tr( "The service \"%1\" has been uninstalled successfully." ).arg( m_name ) );
return true;
}
int WindowsServiceControl::startType()
{
if( checkService() == false )
{
return InvalidStartType;
}
LPQUERY_SERVICE_CONFIG serviceConfig{nullptr};
DWORD bufferSize = 0;
DWORD bytesNeeded = 0;
if( QueryServiceConfig( m_serviceHandle, nullptr, 0, &bytesNeeded ) == false )
{
if( GetLastError() == ERROR_INSUFFICIENT_BUFFER )
{
bufferSize = bytesNeeded;
serviceConfig = LPQUERY_SERVICE_CONFIG(LocalAlloc(LMEM_FIXED, bufferSize));
}
else
{
return InvalidStartType;
}
}
else
{
return InvalidStartType;
}
if( QueryServiceConfig( m_serviceHandle, serviceConfig, bufferSize, &bytesNeeded ) == false )
{
const auto error = GetLastError();
vCritical() << error;
LocalFree( serviceConfig );
return InvalidStartType;
}
const auto startType = serviceConfig->dwStartType;
LocalFree( serviceConfig );
return startType;
}
bool WindowsServiceControl::setStartType( int startType )
{
if( checkService() == false || startType == InvalidStartType )
{
return false;
}
if( ChangeServiceConfig( m_serviceHandle,
SERVICE_NO_CHANGE, // dwServiceType
static_cast<DWORD>( startType ),
SERVICE_NO_CHANGE, // dwErrorControl
nullptr, // lpBinaryPathName
nullptr, // lpLoadOrderGroup
nullptr, // lpdwTagId
nullptr, // lpDependencies
nullptr, // lpServiceStartName
nullptr, // lpPassword
nullptr // lpDisplayName
) == false )
{
vCritical() << qUtf8Printable( tr( "The start type of service \"%1\" could not be changed." ).arg( m_name ) );
return false;
}
return true;
}
bool WindowsServiceControl::checkService() const
{
if( m_serviceHandle == nullptr )
{
vCritical() << qUtf8Printable( tr( "Service \"%1\" could not be found." ).arg( m_name ) );
return false;
}
return true;
}
| ./CrossVul/dataset_final_sorted/CWE-428/cpp/bad_4196_0 |
crossvul-cpp_data_good_4196_0 | /*
* WindowsServiceControl.h - class for managing a Windows service
*
* Copyright (c) 2017-2020 Tobias Junghans <tobydox@veyon.io>
*
* This file is part of Veyon - https://veyon.io
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program (see COPYING); if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
*/
#include "WindowsCoreFunctions.h"
#include "WindowsServiceControl.h"
WindowsServiceControl::WindowsServiceControl( const QString& name ) :
m_name( name ),
m_serviceManager( nullptr ),
m_serviceHandle( nullptr )
{
m_serviceManager = OpenSCManager( nullptr, nullptr, SC_MANAGER_ALL_ACCESS );
if( m_serviceManager )
{
m_serviceHandle = OpenService( m_serviceManager, WindowsCoreFunctions::toConstWCharArray( m_name ),
SERVICE_ALL_ACCESS );
if( m_serviceHandle == nullptr )
{
vCritical() << "could not open service" << m_name;
}
}
else
{
vCritical() << "the Service Control Manager could not be contacted - service " << m_name << "can't be controlled.";
}
}
WindowsServiceControl::~WindowsServiceControl()
{
CloseServiceHandle( m_serviceHandle );
CloseServiceHandle( m_serviceManager );
}
bool WindowsServiceControl::isRegistered()
{
return m_serviceHandle != nullptr;
}
bool WindowsServiceControl::isRunning()
{
if( checkService() == false )
{
return false;
}
SERVICE_STATUS status;
if( QueryServiceStatus( m_serviceHandle, &status ) )
{
return status.dwCurrentState == SERVICE_RUNNING;
}
return false;
}
bool WindowsServiceControl::start()
{
if( checkService() == false )
{
return false;
}
SERVICE_STATUS status;
status.dwCurrentState = SERVICE_START_PENDING;
if( StartService( m_serviceHandle, 0, nullptr ) )
{
while( QueryServiceStatus( m_serviceHandle, &status ) )
{
if( status.dwCurrentState == SERVICE_START_PENDING )
{
Sleep( 1000 );
}
else
{
break;
}
}
}
if( status.dwCurrentState != SERVICE_RUNNING )
{
vWarning() << "service" << m_name << "could not be started.";
return false;
}
return true;
}
bool WindowsServiceControl::stop()
{
if( checkService() == false )
{
return false;
}
SERVICE_STATUS status;
// Try to stop the service
if( ControlService( m_serviceHandle, SERVICE_CONTROL_STOP, &status ) )
{
while( QueryServiceStatus( m_serviceHandle, &status ) )
{
if( status.dwCurrentState == SERVICE_STOP_PENDING )
{
Sleep( 1000 );
}
else
{
break;
}
}
if( status.dwCurrentState != SERVICE_STOPPED )
{
vWarning() << "service" << m_name << "could not be stopped.";
return false;
}
}
return true;
}
bool WindowsServiceControl::install( const QString& filePath, const QString& displayName )
{
const auto binaryPath = QStringLiteral("\"%1\"").arg( QString( filePath ).replace( QLatin1Char('"'), QString() ) );
m_serviceHandle = CreateService(
m_serviceManager, // SCManager database
WindowsCoreFunctions::toConstWCharArray( m_name ), // name of service
WindowsCoreFunctions::toConstWCharArray( displayName ),// name to display
SERVICE_ALL_ACCESS, // desired access
SERVICE_WIN32_OWN_PROCESS,
// service type
SERVICE_AUTO_START, // start type
SERVICE_ERROR_NORMAL, // error control type
WindowsCoreFunctions::toConstWCharArray( binaryPath ), // service's binary
nullptr, // no load ordering group
nullptr, // no tag identifier
L"Tcpip\0RpcSs\0\0", // dependencies
nullptr, // LocalSystem account
nullptr ); // no password
if( m_serviceHandle == nullptr )
{
const auto error = GetLastError();
if( error == ERROR_SERVICE_EXISTS )
{
vCritical() << qUtf8Printable( tr( "The service \"%1\" is already installed." ).arg( m_name ) );
}
else
{
vCritical() << qUtf8Printable( tr( "The service \"%1\" could not be installed." ).arg( m_name ) );
}
return false;
}
SC_ACTION serviceActions;
serviceActions.Delay = 10000;
serviceActions.Type = SC_ACTION_RESTART;
SERVICE_FAILURE_ACTIONS serviceFailureActions;
serviceFailureActions.dwResetPeriod = 0;
serviceFailureActions.lpRebootMsg = nullptr;
serviceFailureActions.lpCommand = nullptr;
serviceFailureActions.lpsaActions = &serviceActions;
serviceFailureActions.cActions = 1;
ChangeServiceConfig2( m_serviceHandle, SERVICE_CONFIG_FAILURE_ACTIONS, &serviceFailureActions );
// Everything went fine
vInfo() << qUtf8Printable( tr( "The service \"%1\" has been installed successfully." ).arg( m_name ) );
return true;
}
bool WindowsServiceControl::uninstall()
{
if( checkService() == false )
{
return false;
}
if( stop() == false )
{
return false;
}
if( DeleteService( m_serviceHandle ) == false )
{
vCritical() << qUtf8Printable( tr( "The service \"%1\" could not be uninstalled." ).arg( m_name ) );
return false;
}
vInfo() << qUtf8Printable( tr( "The service \"%1\" has been uninstalled successfully." ).arg( m_name ) );
return true;
}
int WindowsServiceControl::startType()
{
if( checkService() == false )
{
return InvalidStartType;
}
LPQUERY_SERVICE_CONFIG serviceConfig{nullptr};
DWORD bufferSize = 0;
DWORD bytesNeeded = 0;
if( QueryServiceConfig( m_serviceHandle, nullptr, 0, &bytesNeeded ) == false )
{
if( GetLastError() == ERROR_INSUFFICIENT_BUFFER )
{
bufferSize = bytesNeeded;
serviceConfig = LPQUERY_SERVICE_CONFIG(LocalAlloc(LMEM_FIXED, bufferSize));
}
else
{
return InvalidStartType;
}
}
else
{
return InvalidStartType;
}
if( QueryServiceConfig( m_serviceHandle, serviceConfig, bufferSize, &bytesNeeded ) == false )
{
const auto error = GetLastError();
vCritical() << error;
LocalFree( serviceConfig );
return InvalidStartType;
}
const auto startType = serviceConfig->dwStartType;
LocalFree( serviceConfig );
return startType;
}
bool WindowsServiceControl::setStartType( int startType )
{
if( checkService() == false || startType == InvalidStartType )
{
return false;
}
if( ChangeServiceConfig( m_serviceHandle,
SERVICE_NO_CHANGE, // dwServiceType
static_cast<DWORD>( startType ),
SERVICE_NO_CHANGE, // dwErrorControl
nullptr, // lpBinaryPathName
nullptr, // lpLoadOrderGroup
nullptr, // lpdwTagId
nullptr, // lpDependencies
nullptr, // lpServiceStartName
nullptr, // lpPassword
nullptr // lpDisplayName
) == false )
{
vCritical() << qUtf8Printable( tr( "The start type of service \"%1\" could not be changed." ).arg( m_name ) );
return false;
}
return true;
}
bool WindowsServiceControl::checkService() const
{
if( m_serviceHandle == nullptr )
{
vCritical() << qUtf8Printable( tr( "Service \"%1\" could not be found." ).arg( m_name ) );
return false;
}
return true;
}
| ./CrossVul/dataset_final_sorted/CWE-428/cpp/good_4196_0 |
crossvul-cpp_data_good_2263_0 | /*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-2014 Facebook, Inc. (http://www.facebook.com) |
| Copyright (c) 1998-2010 Zend Technologies Ltd. (http://www.zend.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 2.00 of the Zend license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.zend.com/license/2_00.txt. |
| If you did not receive a copy of the Zend license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@zend.com so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/runtime/base/zend-string.h"
#include "hphp/runtime/base/zend-printf.h"
#include "hphp/runtime/base/zend-math.h"
#include "hphp/util/lock.h"
#include "hphp/util/overflow.h"
#include <math.h>
#include <monetary.h>
#include "hphp/runtime/base/bstring.h"
#include "hphp/runtime/base/exceptions.h"
#include "hphp/runtime/base/complex-types.h"
#include "hphp/runtime/base/string-buffer.h"
#include "hphp/runtime/base/runtime-error.h"
#include "hphp/runtime/base/type-conversions.h"
#include "hphp/runtime/base/string-util.h"
#include "hphp/runtime/base/builtin-functions.h"
#ifdef __APPLE__
#ifndef isnan
#define isnan(x) \
( sizeof (x) == sizeof(float ) ? __inline_isnanf((float)(x)) \
: sizeof (x) == sizeof(double) ? __inline_isnand((double)(x)) \
: __inline_isnan ((long double)(x)))
#endif
#ifndef isinf
#define isinf(x) \
( sizeof (x) == sizeof(float ) ? __inline_isinff((float)(x)) \
: sizeof (x) == sizeof(double) ? __inline_isinfd((double)(x)) \
: __inline_isinf ((long double)(x)))
#endif
#endif
#define PHP_QPRINT_MAXL 75
namespace HPHP {
///////////////////////////////////////////////////////////////////////////////
// helpers
bool string_substr_check(int len, int &f, int &l) {
if (l < 0 && -l > len) {
return false;
} else if (l > len) {
l = len;
}
if (f > len) {
return false;
} else if (f < 0 && -f > len) {
f = 0;
}
if (l < 0 && (l + len - f) < 0) {
return false;
}
// if "from" position is negative, count start position from the end
if (f < 0) {
f += len;
if (f < 0) {
f = 0;
}
}
if (f >= len) {
return false;
}
// if "length" position is negative, set it to the length
// needed to stop that many chars from the end of the string
if (l < 0) {
l += len - f;
if (l < 0) {
l = 0;
}
}
if ((unsigned int)f + (unsigned int)l > (unsigned int)len) {
l = len - f;
}
return true;
}
void string_charmask(const char *sinput, int len, char *mask) {
const unsigned char *input = (unsigned char *)sinput;
const unsigned char *end;
unsigned char c;
memset(mask, 0, 256);
for (end = input+len; input < end; input++) {
c=*input;
if ((input+3 < end) && input[1] == '.' && input[2] == '.'
&& input[3] >= c) {
memset(mask+c, 1, input[3] - c + 1);
input+=3;
} else if ((input+1 < end) && input[0] == '.' && input[1] == '.') {
/* Error, try to be as helpful as possible:
(a range ending/starting with '.' won't be captured here) */
if (end-len >= input) { /* there was no 'left' char */
throw_invalid_argument
("charlist: Invalid '..'-range, missing left of '..'");
continue;
}
if (input+2 >= end) { /* there is no 'right' char */
throw_invalid_argument
("charlist: Invalid '..'-range, missing right of '..'");
continue;
}
if (input[-1] > input[2]) { /* wrong order */
throw_invalid_argument
("charlist: '..'-range needs to be incrementing");
continue;
}
/* FIXME: better error (a..b..c is the only left possibility?) */
throw_invalid_argument("charlist: Invalid '..'-range");
continue;
} else {
mask[c]=1;
}
}
}
int string_copy(char *dst, const char *src, int siz) {
register char *d = dst;
register const char *s = src;
register size_t n = siz;
/* Copy as many bytes as will fit */
if (n != 0 && --n != 0) {
do {
if ((*d++ = *s++) == 0)
break;
} while (--n != 0);
}
/* Not enough room in dst, add NUL and traverse rest of src */
if (n == 0) {
if (siz != 0)
*d = '\0'; /* NUL-terminate dst */
while (*s++)
;
}
return(s - src - 1); /* count does not include NUL */
}
///////////////////////////////////////////////////////////////////////////////
// comparisons
int string_ncmp(const char *s1, const char *s2, int len) {
for (int i = 0; i < len; i++) {
char c1 = s1[i];
char c2 = s2[i];
if (c1 > c2) return 1;
if (c1 < c2) return -1;
}
return 0;
}
static int compare_right(char const **a, char const *aend,
char const **b, char const *bend) {
int bias = 0;
/* The longest run of digits wins. That aside, the greatest
value wins, but we can't know that it will until we've scanned
both numbers to know that they have the same magnitude, so we
remember it in BIAS. */
for(;; (*a)++, (*b)++) {
if ((*a == aend || !isdigit((int)(unsigned char)**a)) &&
(*b == bend || !isdigit((int)(unsigned char)**b)))
return bias;
else if (*a == aend || !isdigit((int)(unsigned char)**a))
return -1;
else if (*b == bend || !isdigit((int)(unsigned char)**b))
return +1;
else if (**a < **b) {
if (!bias)
bias = -1;
} else if (**a > **b) {
if (!bias)
bias = +1;
}
}
return 0;
}
static int compare_left(char const **a, char const *aend,
char const **b, char const *bend) {
/* Compare two left-aligned numbers: the first to have a
different value wins. */
for(;; (*a)++, (*b)++) {
if ((*a == aend || !isdigit((int)(unsigned char)**a)) &&
(*b == bend || !isdigit((int)(unsigned char)**b)))
return 0;
else if (*a == aend || !isdigit((int)(unsigned char)**a))
return -1;
else if (*b == bend || !isdigit((int)(unsigned char)**b))
return +1;
else if (**a < **b)
return -1;
else if (**a > **b)
return +1;
}
return 0;
}
int string_natural_cmp(char const *a, size_t a_len,
char const *b, size_t b_len, int fold_case) {
char ca, cb;
char const *ap, *bp;
char const *aend = a + a_len, *bend = b + b_len;
int fractional, result;
if (a_len == 0 || b_len == 0)
return a_len - b_len;
ap = a;
bp = b;
while (1) {
ca = *ap; cb = *bp;
/* skip over leading spaces or zeros */
while (isspace((int)(unsigned char)ca))
ca = *++ap;
while (isspace((int)(unsigned char)cb))
cb = *++bp;
/* process run of digits */
if (isdigit((int)(unsigned char)ca) && isdigit((int)(unsigned char)cb)) {
fractional = (ca == '0' || cb == '0');
if (fractional)
result = compare_left(&ap, aend, &bp, bend);
else
result = compare_right(&ap, aend, &bp, bend);
if (result != 0)
return result;
else if (ap == aend && bp == bend)
/* End of the strings. Let caller sort them out. */
return 0;
else {
/* Keep on comparing from the current point. */
ca = *ap; cb = *bp;
}
}
if (fold_case) {
ca = toupper((int)(unsigned char)ca);
cb = toupper((int)(unsigned char)cb);
}
if (ca < cb)
return -1;
else if (ca > cb)
return +1;
++ap; ++bp;
if (ap >= aend && bp >= bend)
/* The strings compare the same. Perhaps the caller
will want to call strcmp to break the tie. */
return 0;
else if (ap >= aend)
return -1;
else if (bp >= bend)
return 1;
}
}
///////////////////////////////////////////////////////////////////////////////
void string_to_case(String& s, int (*tocase)(int)) {
assert(!s.isNull());
assert(tocase);
auto data = s.bufferSlice().ptr;
auto len = s.size();
for (int i = 0; i < len; i++) {
data[i] = tocase(data[i]);
}
}
///////////////////////////////////////////////////////////////////////////////
#define STR_PAD_LEFT 0
#define STR_PAD_RIGHT 1
#define STR_PAD_BOTH 2
String string_pad(const char *input, int len, int pad_length,
const char *pad_string, int pad_str_len,
int pad_type) {
assert(input);
int num_pad_chars = pad_length - len;
/* If resulting string turns out to be shorter than input string,
we simply copy the input and return. */
if (pad_length < 0 || num_pad_chars < 0) {
return String(input, len, CopyString);
}
/* Setup the padding string values if specified. */
if (pad_str_len == 0) {
throw_invalid_argument("pad_string: (empty)");
return String();
}
String ret(pad_length, ReserveString);
char *result = ret.bufferSlice().ptr;
/* We need to figure out the left/right padding lengths. */
int left_pad, right_pad;
switch (pad_type) {
case STR_PAD_RIGHT:
left_pad = 0;
right_pad = num_pad_chars;
break;
case STR_PAD_LEFT:
left_pad = num_pad_chars;
right_pad = 0;
break;
case STR_PAD_BOTH:
left_pad = num_pad_chars / 2;
right_pad = num_pad_chars - left_pad;
break;
default:
throw_invalid_argument("pad_type: %d", pad_type);
return String();
}
/* First we pad on the left. */
int result_len = 0;
for (int i = 0; i < left_pad; i++) {
result[result_len++] = pad_string[i % pad_str_len];
}
/* Then we copy the input string. */
memcpy(result + result_len, input, len);
result_len += len;
/* Finally, we pad on the right. */
for (int i = 0; i < right_pad; i++) {
result[result_len++] = pad_string[i % pad_str_len];
}
ret.setSize(result_len);
return ret;
}
///////////////////////////////////////////////////////////////////////////////
int string_find(const char *input, int len, char ch, int pos,
bool case_sensitive) {
assert(input);
if (pos < 0 || pos > len) {
return -1;
}
const void *ptr;
if (case_sensitive) {
ptr = memchr(input + pos, ch, len - pos);
} else {
ptr = bstrcasechr(input + pos, ch, len - pos);
}
if (ptr != nullptr) {
return (int)((const char *)ptr - input);
}
return -1;
}
int string_rfind(const char *input, int len, char ch, int pos,
bool case_sensitive) {
assert(input);
if (pos < -len || pos > len) {
return -1;
}
const void *ptr;
if (case_sensitive) {
if (pos >= 0) {
ptr = memrchr(input + pos, ch, len - pos);
} else {
ptr = memrchr(input, ch, len + pos + 1);
}
} else {
if (pos >= 0) {
ptr = bstrrcasechr(input + pos, ch, len - pos);
} else {
ptr = bstrrcasechr(input, ch, len + pos + 1);
}
}
if (ptr != nullptr) {
return (int)((const char *)ptr - input);
}
return -1;
}
int string_find(const char *input, int len, const char *s, int s_len,
int pos, bool case_sensitive) {
assert(input);
assert(s);
if (!s_len || pos < 0 || pos > len) {
return -1;
}
void *ptr;
if (case_sensitive) {
ptr = (void*)string_memnstr(input + pos, s, s_len, input + len);
} else {
ptr = bstrcasestr(input + pos, len - pos, s, s_len);
}
if (ptr != nullptr) {
return (int)((const char *)ptr - input);
}
return -1;
}
int string_rfind(const char *input, int len, const char *s, int s_len,
int pos, bool case_sensitive) {
assert(input);
assert(s);
if (!s_len || pos < -len || pos > len) {
return -1;
}
void *ptr;
if (case_sensitive) {
if (pos >= 0) {
ptr = bstrrstr(input + pos, len - pos, s, s_len);
} else {
ptr = bstrrstr(input, len + pos + s_len, s, s_len);
}
} else {
if (pos >= 0) {
ptr = bstrrcasestr(input + pos, len - pos, s, s_len);
} else {
ptr = bstrrcasestr(input, len + pos + s_len, s, s_len);
}
}
if (ptr != nullptr) {
return (int)((const char *)ptr - input);
}
return -1;
}
const char *string_memnstr(const char *haystack, const char *needle,
int needle_len, const char *end) {
const char *p = haystack;
char ne = needle[needle_len-1];
end -= needle_len;
while (p <= end) {
if ((p = (char *)memchr(p, *needle, (end-p+1))) && ne == p[needle_len-1]) {
if (!memcmp(needle, p, needle_len-1)) {
return p;
}
}
if (p == nullptr) {
return nullptr;
}
p++;
}
return nullptr;
}
String string_replace(const char *s, int len, int start, int length,
const char *replacement, int len_repl) {
assert(s);
assert(replacement);
assert(len >= 0);
// if "start" position is negative, count start position from the end
// of the string
if (start < 0) {
start = len + start;
if (start < 0) {
start = 0;
}
}
if (start > len) {
start = len;
}
// if "length" position is negative, set it to the length
// needed to stop that many chars from the end of the string
if (length < 0) {
length = (len - start) + length;
if (length < 0) {
length = 0;
}
}
// check if length is too large
if (length > len) {
length = len;
}
// check if the length is too large adjusting for non-zero start
// Write this way instead of start + length > len to avoid overflow
if (length > len - start) {
length = len - start;
}
String retString(len + len_repl - length, ReserveString);
char *ret = retString.bufferSlice().ptr;
int ret_len = 0;
if (start) {
memcpy(ret, s, start);
ret_len += start;
}
if (len_repl) {
memcpy(ret + ret_len, replacement, len_repl);
ret_len += len_repl;
}
len -= (start + length);
if (len) {
memcpy(ret + ret_len, s + start + length, len);
ret_len += len;
}
retString.setSize(ret_len);
return retString;
}
String string_replace(const char *input, int len,
const char *search, int len_search,
const char *replacement, int len_replace,
int &count, bool case_sensitive) {
assert(input);
assert(search && len_search);
assert(len >= 0);
assert(len_search >= 0);
assert(len_replace >= 0);
if (len == 0) {
return String();
}
smart::vector<int> founds;
founds.reserve(16);
if (len_search == 1) {
for (int pos = string_find(input, len, *search, 0, case_sensitive);
pos >= 0;
pos = string_find(input, len, *search, pos + len_search,
case_sensitive)) {
founds.push_back(pos);
}
} else {
for (int pos = string_find(input, len, search, len_search, 0,
case_sensitive);
pos >= 0;
pos = string_find(input, len, search, len_search,
pos + len_search, case_sensitive)) {
founds.push_back(pos);
}
}
count = founds.size();
if (count == 0) {
return String(); // not found
}
int reserve;
// Make sure the new size of the string wouldn't overflow int32_t. Don't
// bother if the replacement wouldn't make the string longer.
if (len_replace > len_search) {
auto raise = [&] { raise_error("String too large"); };
if (mul_overflow(len_replace - len_search, count)) {
raise();
}
int diff = (len_replace - len_search) * count;
if (add_overflow(len, diff)) {
raise();
}
reserve = len + diff;
} else {
reserve = len + (len_replace - len_search) * count;
}
String retString(reserve, ReserveString);
char *ret = retString.bufferSlice().ptr;
char *p = ret;
int pos = 0; // last position in input that hasn't been copied over yet
int n;
for (unsigned int i = 0; i < founds.size(); i++) {
n = founds[i];
if (n > pos) {
n -= pos;
memcpy(p, input, n);
p += n;
input += n;
pos += n;
}
if (len_replace) {
memcpy(p, replacement, len_replace);
p += len_replace;
}
input += len_search;
pos += len_search;
}
n = len;
if (n > pos) {
n -= pos;
memcpy(p, input, n);
p += n;
}
retString.setSize(p - ret);
return retString;
}
///////////////////////////////////////////////////////////////////////////////
String string_chunk_split(const char *src, int srclen, const char *end,
int endlen, int chunklen) {
int chunks = srclen / chunklen; // complete chunks!
int restlen = srclen - chunks * chunklen; /* srclen % chunklen */
String ret(
safe_address(
chunks + 1,
endlen,
srclen
),
ReserveString
);
char *dest = ret.bufferSlice().ptr;
const char *p; char *q;
const char *pMax = src + srclen - chunklen + 1;
for (p = src, q = dest; p < pMax; ) {
memcpy(q, p, chunklen);
q += chunklen;
memcpy(q, end, endlen);
q += endlen;
p += chunklen;
}
if (restlen) {
memcpy(q, p, restlen);
q += restlen;
memcpy(q, end, endlen);
q += endlen;
}
ret.setSize(q - dest);
return ret;
}
///////////////////////////////////////////////////////////////////////////////
#define PHP_TAG_BUF_SIZE 1023
/**
* Check if tag is in a set of tags
*
* states:
*
* 0 start tag
* 1 first non-whitespace char seen
*/
static int string_tag_find(const char *tag, int len, const char *set) {
char c, *n;
const char *t;
int state=0, done=0;
char *norm;
if (len <= 0) {
return 0;
}
norm = (char *)smart_malloc(len+1);
n = norm;
t = tag;
c = tolower(*t);
/*
normalize the tag removing leading and trailing whitespace
and turn any <a whatever...> into just <a> and any </tag>
into <tag>
*/
while (!done) {
switch (c) {
case '<':
*(n++) = c;
break;
case '>':
done =1;
break;
default:
if (!isspace((int)c)) {
if (state == 0) {
state=1;
}
if (c != '/') {
*(n++) = c;
}
} else {
if (state == 1)
done=1;
}
break;
}
c = tolower(*(++t));
}
*(n++) = '>';
*n = '\0';
if (strstr(set, norm)) {
done=1;
} else {
done=0;
}
smart_free(norm);
return done;
}
/**
* A simple little state-machine to strip out html and php tags
*
* State 0 is the output state, State 1 means we are inside a
* normal html tag and state 2 means we are inside a php tag.
*
* The state variable is passed in to allow a function like fgetss
* to maintain state across calls to the function.
*
* lc holds the last significant character read and br is a bracket
* counter.
*
* When an allow string is passed in we keep track of the string
* in state 1 and when the tag is closed check it against the
* allow string to see if we should allow it.
* swm: Added ability to strip <?xml tags without assuming it PHP
* code.
*/
String string_strip_tags(const char *s, const int len,
const char *allow, const int allow_len,
bool allow_tag_spaces) {
const char *abuf, *p;
char *rbuf, *tbuf, *tp, *rp, c, lc;
int br, i=0, depth=0, in_q = 0;
int state = 0, pos;
assert(s);
assert(allow);
String retString(s, len, CopyString);
rbuf = retString.bufferSlice().ptr;
String allowString;
c = *s;
lc = '\0';
p = s;
rp = rbuf;
br = 0;
if (allow_len) {
assert(allow);
allowString = String(allow_len, ReserveString);
char *atmp = allowString.bufferSlice().ptr;
for (const char *tmp = allow; *tmp; tmp++, atmp++) {
*atmp = tolower((int)*(const unsigned char *)tmp);
}
allowString.setSize(allow_len);
abuf = allowString.data();
tbuf = (char *)smart_malloc(PHP_TAG_BUF_SIZE+1);
tp = tbuf;
} else {
abuf = nullptr;
tbuf = tp = nullptr;
}
auto move = [&pos, &tbuf, &tp]() {
if (tp - tbuf >= PHP_TAG_BUF_SIZE) {
pos = tp - tbuf;
tbuf = (char*)smart_realloc(tbuf, (tp - tbuf) + PHP_TAG_BUF_SIZE + 1);
tp = tbuf + pos;
}
};
while (i < len) {
switch (c) {
case '\0':
break;
case '<':
if (isspace(*(p + 1)) && !allow_tag_spaces) {
goto reg_char;
}
if (state == 0) {
lc = '<';
state = 1;
if (allow_len) {
move();
*(tp++) = '<';
}
} else if (state == 1) {
depth++;
}
break;
case '(':
if (state == 2) {
if (lc != '"' && lc != '\'') {
lc = '(';
br++;
}
} else if (allow_len && state == 1) {
move();
*(tp++) = c;
} else if (state == 0) {
*(rp++) = c;
}
break;
case ')':
if (state == 2) {
if (lc != '"' && lc != '\'') {
lc = ')';
br--;
}
} else if (allow_len && state == 1) {
move();
*(tp++) = c;
} else if (state == 0) {
*(rp++) = c;
}
break;
case '>':
if (depth) {
depth--;
break;
}
if (in_q) {
break;
}
switch (state) {
case 1: /* HTML/XML */
lc = '>';
in_q = state = 0;
if (allow_len) {
move();
*(tp++) = '>';
*tp='\0';
if (string_tag_find(tbuf, tp-tbuf, abuf)) {
memcpy(rp, tbuf, tp-tbuf);
rp += tp-tbuf;
}
tp = tbuf;
}
break;
case 2: /* PHP */
if (!br && lc != '\"' && *(p-1) == '?') {
in_q = state = 0;
tp = tbuf;
}
break;
case 3:
in_q = state = 0;
tp = tbuf;
break;
case 4: /* JavaScript/CSS/etc... */
if (p >= s + 2 && *(p-1) == '-' && *(p-2) == '-') {
in_q = state = 0;
tp = tbuf;
}
break;
default:
*(rp++) = c;
break;
}
break;
case '"':
case '\'':
if (state == 4) {
/* Inside <!-- comment --> */
break;
} else if (state == 2 && *(p-1) != '\\') {
if (lc == c) {
lc = '\0';
} else if (lc != '\\') {
lc = c;
}
} else if (state == 0) {
*(rp++) = c;
} else if (allow_len && state == 1) {
move();
*(tp++) = c;
}
if (state && p != s && *(p-1) != '\\' && (!in_q || *p == in_q)) {
if (in_q) {
in_q = 0;
} else {
in_q = *p;
}
}
break;
case '!':
/* JavaScript & Other HTML scripting languages */
if (state == 1 && *(p-1) == '<') {
state = 3;
lc = c;
} else {
if (state == 0) {
*(rp++) = c;
} else if (allow_len && state == 1) {
move();
*(tp++) = c;
}
}
break;
case '-':
if (state == 3 && p >= s + 2 && *(p-1) == '-' && *(p-2) == '!') {
state = 4;
} else {
goto reg_char;
}
break;
case '?':
if (state == 1 && *(p-1) == '<') {
br=0;
state=2;
break;
}
case 'E':
case 'e':
/* !DOCTYPE exception */
if (state==3 && p > s+6
&& tolower(*(p-1)) == 'p'
&& tolower(*(p-2)) == 'y'
&& tolower(*(p-3)) == 't'
&& tolower(*(p-4)) == 'c'
&& tolower(*(p-5)) == 'o'
&& tolower(*(p-6)) == 'd') {
state = 1;
break;
}
/* fall-through */
case 'l':
/* swm: If we encounter '<?xml' then we shouldn't be in
* state == 2 (PHP). Switch back to HTML.
*/
if (state == 2 && p > s+2 && *(p-1) == 'm' && *(p-2) == 'x') {
state = 1;
break;
}
/* fall-through */
default:
reg_char:
if (state == 0) {
*(rp++) = c;
} else if (allow_len && state == 1) {
move();
*(tp++) = c;
}
break;
}
c = *(++p);
i++;
}
if (rp < rbuf + len) {
*rp = '\0';
}
if (allow_len) {
smart_free(tbuf);
}
retString.setSize(rp - rbuf);
return retString;
}
///////////////////////////////////////////////////////////////////////////////
String string_addslashes(const char *str, int length) {
assert(str);
if (length == 0) {
return String();
}
String retString((length << 1) + 1, ReserveString);
char *new_str = retString.bufferSlice().ptr;
const char *source = str;
const char *end = source + length;
char *target = new_str;
while (source < end) {
switch (*source) {
case '\0':
*target++ = '\\';
*target++ = '0';
break;
case '\'':
case '\"':
case '\\':
*target++ = '\\';
/* break is missing *intentionally* */
default:
*target++ = *source;
break;
}
source++;
}
retString.setSize(target - new_str);
return retString;
}
///////////////////////////////////////////////////////////////////////////////
static char string_hex2int(int c) {
if (isdigit(c)) {
return c - '0';
}
if (c >= 'A' && c <= 'F') {
return c - 'A' + 10;
}
if (c >= 'a' && c <= 'f') {
return c - 'a' + 10;
}
return -1;
}
String string_quoted_printable_encode(const char *input, int len) {
size_t length = len;
const unsigned char *str = (unsigned char*)input;
unsigned long lp = 0;
unsigned char c;
char *d, *buffer;
char *hex = "0123456789ABCDEF";
String ret(
safe_address(
3,
length + ((safe_address(3, length, 0)/(PHP_QPRINT_MAXL-9)) + 1),
1),
ReserveString
);
d = buffer = ret.bufferSlice().ptr;
while (length--) {
if (((c = *str++) == '\015') && (*str == '\012') && length > 0) {
*d++ = '\015';
*d++ = *str++;
length--;
lp = 0;
} else {
if (iscntrl (c) || (c == 0x7f) || (c & 0x80) ||
(c == '=') || ((c == ' ') && (*str == '\015'))) {
if ((((lp+= 3) > PHP_QPRINT_MAXL) && (c <= 0x7f))
|| ((c > 0x7f) && (c <= 0xdf) && ((lp + 3) > PHP_QPRINT_MAXL))
|| ((c > 0xdf) && (c <= 0xef) && ((lp + 6) > PHP_QPRINT_MAXL))
|| ((c > 0xef) && (c <= 0xf4) && ((lp + 9) > PHP_QPRINT_MAXL))) {
*d++ = '=';
*d++ = '\015';
*d++ = '\012';
lp = 3;
}
*d++ = '=';
*d++ = hex[c >> 4];
*d++ = hex[c & 0xf];
} else {
if ((++lp) > PHP_QPRINT_MAXL) {
*d++ = '=';
*d++ = '\015';
*d++ = '\012';
lp = 1;
}
*d++ = c;
}
}
}
len = d - buffer;
ret.setSize(len);
return ret;
}
String string_quoted_printable_decode(const char *input, int len, bool is_q) {
assert(input);
if (len == 0) {
return String();
}
int i = 0, j = 0, k;
const char *str_in = input;
String ret(len, ReserveString);
char *str_out = ret.bufferSlice().ptr;
while (i < len && str_in[i]) {
switch (str_in[i]) {
case '=':
if (i + 2 < len && str_in[i + 1] && str_in[i + 2] &&
isxdigit((int) str_in[i + 1]) && isxdigit((int) str_in[i + 2]))
{
str_out[j++] = (string_hex2int((int) str_in[i + 1]) << 4)
+ string_hex2int((int) str_in[i + 2]);
i += 3;
} else /* check for soft line break according to RFC 2045*/ {
k = 1;
while (str_in[i + k] &&
((str_in[i + k] == 32) || (str_in[i + k] == 9))) {
/* Possibly, skip spaces/tabs at the end of line */
k++;
}
if (!str_in[i + k]) {
/* End of line reached */
i += k;
}
else if ((str_in[i + k] == 13) && (str_in[i + k + 1] == 10)) {
/* CRLF */
i += k + 2;
}
else if ((str_in[i + k] == 13) || (str_in[i + k] == 10)) {
/* CR or LF */
i += k + 1;
}
else {
str_out[j++] = str_in[i++];
}
}
break;
case '_':
if (is_q) {
str_out[j++] = ' ';
i++;
} else {
str_out[j++] = str_in[i++];
}
break;
default:
str_out[j++] = str_in[i++];
}
}
ret.setSize(j);
return ret;
}
Variant string_base_to_numeric(const char *s, int len, int base) {
int64_t num = 0;
double fnum = 0;
int mode = 0;
int64_t cutoff;
int cutlim;
assert(string_validate_base(base));
cutoff = LONG_MAX / base;
cutlim = LONG_MAX % base;
for (int i = len; i > 0; i--) {
char c = *s++;
/* might not work for EBCDIC */
if (c >= '0' && c <= '9')
c -= '0';
else if (c >= 'A' && c <= 'Z')
c -= 'A' - 10;
else if (c >= 'a' && c <= 'z')
c -= 'a' - 10;
else
continue;
if (c >= base)
continue;
switch (mode) {
case 0: /* Integer */
if (num < cutoff || (num == cutoff && c <= cutlim)) {
num = num * base + c;
break;
} else {
fnum = num;
mode = 1;
}
/* fall-through */
case 1: /* Float */
fnum = fnum * base + c;
}
}
if (mode == 1) {
return fnum;
}
return num;
}
String string_long_to_base(unsigned long value, int base) {
static char digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
char buf[(sizeof(unsigned long) << 3) + 1];
char *ptr, *end;
assert(string_validate_base(base));
end = ptr = buf + sizeof(buf) - 1;
do {
*--ptr = digits[value % base];
value /= base;
} while (ptr > buf && value);
return String(ptr, end - ptr, CopyString);
}
String string_numeric_to_base(const Variant& value, int base) {
static char digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
assert(string_validate_base(base));
if ((!value.isInteger() && !value.isDouble())) {
return empty_string();
}
if (value.isDouble()) {
double fvalue = floor(value.toDouble()); /* floor it just in case */
char *ptr, *end;
char buf[(sizeof(double) << 3) + 1];
/* Don't try to convert +/- infinity */
if (fvalue == HUGE_VAL || fvalue == -HUGE_VAL) {
raise_warning("Number too large");
return empty_string();
}
end = ptr = buf + sizeof(buf) - 1;
do {
*--ptr = digits[(int) fmod(fvalue, base)];
fvalue /= base;
} while (ptr > buf && fabs(fvalue) >= 1);
return String(ptr, end - ptr, CopyString);
}
return string_long_to_base(value.toInt64(), base);
}
///////////////////////////////////////////////////////////////////////////////
// uuencode
#define PHP_UU_ENC(c) \
((c) ? ((c) & 077) + ' ' : '`')
#define PHP_UU_ENC_C2(c) \
PHP_UU_ENC(((*(c) << 4) & 060) | ((*((c) + 1) >> 4) & 017))
#define PHP_UU_ENC_C3(c) \
PHP_UU_ENC(((*(c + 1) << 2) & 074) | ((*((c) + 2) >> 6) & 03))
#define PHP_UU_DEC(c) \
(((c) - ' ') & 077)
String string_uuencode(const char *src, int src_len) {
assert(src);
assert(src_len);
int len = 45;
char *p;
const char *s, *e, *ee;
char *dest;
/* encoded length is ~ 38% greater then the original */
String ret((int)ceil(src_len * 1.38) + 45, ReserveString);
p = dest = ret.bufferSlice().ptr;
s = src;
e = src + src_len;
while ((s + 3) < e) {
ee = s + len;
if (ee > e) {
ee = e;
len = ee - s;
if (len % 3) {
ee = s + (int) (floor(len / 3) * 3);
}
}
*p++ = PHP_UU_ENC(len);
while (s < ee) {
*p++ = PHP_UU_ENC(*s >> 2);
*p++ = PHP_UU_ENC_C2(s);
*p++ = PHP_UU_ENC_C3(s);
*p++ = PHP_UU_ENC(*(s + 2) & 077);
s += 3;
}
if (len == 45) {
*p++ = '\n';
}
}
if (s < e) {
if (len == 45) {
*p++ = PHP_UU_ENC(e - s);
len = 0;
}
*p++ = PHP_UU_ENC(*s >> 2);
*p++ = PHP_UU_ENC_C2(s);
*p++ = ((e - s) > 1) ? PHP_UU_ENC_C3(s) : PHP_UU_ENC('\0');
*p++ = ((e - s) > 2) ? PHP_UU_ENC(*(s + 2) & 077) : PHP_UU_ENC('\0');
}
if (len < 45) {
*p++ = '\n';
}
*p++ = PHP_UU_ENC('\0');
*p++ = '\n';
*p = '\0';
ret.setSize(p - dest);
return ret;
}
String string_uudecode(const char *src, int src_len) {
int total_len = 0;
int len;
const char *s, *e, *ee;
char *p, *dest;
String ret(ceil(src_len * 0.75), ReserveString);
p = dest = ret.bufferSlice().ptr;
s = src;
e = src + src_len;
while (s < e) {
if ((len = PHP_UU_DEC(*s++)) <= 0) {
break;
}
/* sanity check */
if (len > src_len) {
goto err;
}
total_len += len;
ee = s + (len == 45 ? 60 : (int) floor(len * 1.33));
/* sanity check */
if (ee > e) {
goto err;
}
while (s < ee) {
if (s + 4 > e) goto err;
*p++ = PHP_UU_DEC(*s) << 2 | PHP_UU_DEC(*(s + 1)) >> 4;
*p++ = PHP_UU_DEC(*(s + 1)) << 4 | PHP_UU_DEC(*(s + 2)) >> 2;
*p++ = PHP_UU_DEC(*(s + 2)) << 6 | PHP_UU_DEC(*(s + 3));
s += 4;
}
if (len < 45) {
break;
}
/* skip \n */
s++;
}
if ((len = total_len > (p - dest))) {
*p++ = PHP_UU_DEC(*s) << 2 | PHP_UU_DEC(*(s + 1)) >> 4;
if (len > 1) {
*p++ = PHP_UU_DEC(*(s + 1)) << 4 | PHP_UU_DEC(*(s + 2)) >> 2;
if (len > 2) {
*p++ = PHP_UU_DEC(*(s + 2)) << 6 | PHP_UU_DEC(*(s + 3));
}
}
}
ret.setSize(total_len);
return ret;
err:
return String();
}
///////////////////////////////////////////////////////////////////////////////
// base64
static const char base64_table[] = {
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/', '\0'
};
static const char base64_pad = '=';
static const short base64_reverse_table[256] = {
-2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -2, -2, -1, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, 62, -2, -2, -2, 63,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -2, -2, -2, -2, -2, -2,
-2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -2, -2, -2, -2, -2,
-2, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2
};
static String php_base64_encode(const unsigned char *str, int length) {
const unsigned char *current = str;
unsigned char *p;
unsigned char *result;
if ((length + 2) < 0 || ((length + 2) / 3) >= (1 << (sizeof(int) * 8 - 2))) {
return String();
}
String ret(((length + 2) / 3) * 4, ReserveString);
p = result = (unsigned char *)ret.bufferSlice().ptr;
while (length > 2) { /* keep going until we have less than 24 bits */
*p++ = base64_table[current[0] >> 2];
*p++ = base64_table[((current[0] & 0x03) << 4) + (current[1] >> 4)];
*p++ = base64_table[((current[1] & 0x0f) << 2) + (current[2] >> 6)];
*p++ = base64_table[current[2] & 0x3f];
current += 3;
length -= 3; /* we just handle 3 octets of data */
}
/* now deal with the tail end of things */
if (length != 0) {
*p++ = base64_table[current[0] >> 2];
if (length > 1) {
*p++ = base64_table[((current[0] & 0x03) << 4) + (current[1] >> 4)];
*p++ = base64_table[(current[1] & 0x0f) << 2];
*p++ = base64_pad;
} else {
*p++ = base64_table[(current[0] & 0x03) << 4];
*p++ = base64_pad;
*p++ = base64_pad;
}
}
ret.setSize(p - result);
return ret;
}
static String php_base64_decode(const char *str, int length, bool strict) {
const unsigned char *current = (unsigned char*)str;
int ch, i = 0, j = 0, k;
/* this sucks for threaded environments */
String retString(length, ReserveString);
unsigned char* result = (unsigned char*)retString.bufferSlice().ptr;
/* run through the whole string, converting as we go */
while ((ch = *current++) != '\0' && length-- > 0) {
if (ch == base64_pad) {
if (*current != '=' && ((i % 4) == 1 || (strict && length > 0))) {
if ((i % 4) != 1) {
while (isspace(*(++current))) {
continue;
}
if (*current == '\0') {
continue;
}
}
return String();
}
continue;
}
ch = base64_reverse_table[ch];
if ((!strict && ch < 0) || ch == -1) {
/* a space or some other separator character, we simply skip over */
continue;
} else if (ch == -2) {
return String();
}
switch(i % 4) {
case 0:
result[j] = ch << 2;
break;
case 1:
result[j++] |= ch >> 4;
result[j] = (ch & 0x0f) << 4;
break;
case 2:
result[j++] |= ch >>2;
result[j] = (ch & 0x03) << 6;
break;
case 3:
result[j++] |= ch;
break;
}
i++;
}
k = j;
/* mop things up if we ended on a boundary */
if (ch == base64_pad) {
switch(i % 4) {
case 1:
return String();
case 2:
k++;
case 3:
result[k] = 0;
}
}
retString.setSize(j);
return retString;
}
String string_base64_encode(const char *input, int len) {
return php_base64_encode((unsigned char *)input, len);
}
String string_base64_decode(const char *input, int len, bool strict) {
return php_base64_decode(input, len, strict);
}
///////////////////////////////////////////////////////////////////////////////
String string_escape_shell_arg(const char *str) {
int x, y, l;
char *cmd;
y = 0;
l = strlen(str);
String ret(safe_address(l, 4, 3), ReserveString); /* worst case */
cmd = ret.bufferSlice().ptr;
cmd[y++] = '\'';
for (x = 0; x < l; x++) {
switch (str[x]) {
case '\'':
cmd[y++] = '\'';
cmd[y++] = '\\';
cmd[y++] = '\'';
/* fall-through */
default:
cmd[y++] = str[x];
}
}
cmd[y++] = '\'';
ret.setSize(y);
return ret;
}
String string_escape_shell_cmd(const char *str) {
register int x, y, l;
char *cmd;
char *p = nullptr;
l = strlen(str);
String ret(safe_address(l, 2, 1), ReserveString);
cmd = ret.bufferSlice().ptr;
for (x = 0, y = 0; x < l; x++) {
switch (str[x]) {
case '"':
case '\'':
if (!p && (p = (char *)memchr(str + x + 1, str[x], l - x - 1))) {
/* noop */
} else if (p && *p == str[x]) {
p = nullptr;
} else {
cmd[y++] = '\\';
}
cmd[y++] = str[x];
break;
case '#': /* This is character-set independent */
case '&':
case ';':
case '`':
case '|':
case '*':
case '?':
case '~':
case '<':
case '>':
case '^':
case '(':
case ')':
case '[':
case ']':
case '{':
case '}':
case '$':
case '\\':
case '\x0A': /* excluding these two */
case '\xFF':
cmd[y++] = '\\';
/* fall-through */
default:
cmd[y++] = str[x];
}
}
ret.setSize(y);
return ret;
}
///////////////////////////////////////////////////////////////////////////////
static void string_similar_str(const char *txt1, int len1,
const char *txt2, int len2,
int *pos1, int *pos2, int *max) {
const char *p, *q;
const char *end1 = txt1 + len1;
const char *end2 = txt2 + len2;
int l;
*max = 0;
for (p = txt1; p < end1; p++) {
for (q = txt2; q < end2; q++) {
for (l = 0; (p + l < end1) && (q + l < end2) && (p[l] == q[l]); l++);
if (l > *max) {
*max = l;
*pos1 = p - txt1;
*pos2 = q - txt2;
}
}
}
}
static int string_similar_char(const char *txt1, int len1,
const char *txt2, int len2) {
int sum;
int pos1 = 0, pos2 = 0, max;
string_similar_str(txt1, len1, txt2, len2, &pos1, &pos2, &max);
if ((sum = max)) {
if (pos1 && pos2) {
sum += string_similar_char(txt1, pos1, txt2, pos2);
}
if ((pos1 + max < len1) && (pos2 + max < len2)) {
sum += string_similar_char(txt1 + pos1 + max, len1 - pos1 - max,
txt2 + pos2 + max, len2 - pos2 - max);
}
}
return sum;
}
int string_similar_text(const char *t1, int len1,
const char *t2, int len2, float *percent) {
if (len1 == 0 && len2 == 0) {
if (percent) *percent = 0.0;
return 0;
}
int sim = string_similar_char(t1, len1, t2, len2);
if (percent) *percent = sim * 200.0 / (len1 + len2);
return sim;
}
///////////////////////////////////////////////////////////////////////////////
#define LEVENSHTEIN_MAX_LENTH 255
// reference implementation, only optimized for memory usage, not speed
int string_levenshtein(const char *s1, int l1, const char *s2, int l2,
int cost_ins, int cost_rep, int cost_del ) {
int *p1, *p2, *tmp;
int i1, i2, c0, c1, c2;
if (l1==0) return l2*cost_ins;
if (l2==0) return l1*cost_del;
if ((l1>LEVENSHTEIN_MAX_LENTH)||(l2>LEVENSHTEIN_MAX_LENTH)) {
raise_warning("levenshtein(): Argument string(s) too long");
return -1;
}
p1 = (int*)smart_malloc((l2+1) * sizeof(int));
p2 = (int*)smart_malloc((l2+1) * sizeof(int));
for(i2=0;i2<=l2;i2++) {
p1[i2] = i2*cost_ins;
}
for(i1=0;i1<l1;i1++) {
p2[0]=p1[0]+cost_del;
for(i2=0;i2<l2;i2++) {
c0=p1[i2]+((s1[i1]==s2[i2])?0:cost_rep);
c1=p1[i2+1]+cost_del; if (c1<c0) c0=c1;
c2=p2[i2]+cost_ins; if (c2<c0) c0=c2;
p2[i2+1]=c0;
}
tmp=p1; p1=p2; p2=tmp;
}
c0=p1[l2];
smart_free(p1);
smart_free(p2);
return c0;
}
///////////////////////////////////////////////////////////////////////////////
String string_money_format(const char *format, double value) {
bool check = false;
const char *p = format;
while ((p = strchr(p, '%'))) {
if (*(p + 1) == '%') {
p += 2;
} else if (!check) {
check = true;
p++;
} else {
throw_invalid_argument
("format: Only a single %%i or %%n token can be used");
return String();
}
}
int format_len = strlen(format);
int str_len = safe_address(format_len, 1, 1024);
String ret(str_len, ReserveString);
char *str = ret.bufferSlice().ptr;
if ((str_len = strfmon(str, str_len, format, value)) < 0) {
return String();
}
ret.setSize(str_len);
return ret;
}
///////////////////////////////////////////////////////////////////////////////
String string_number_format(double d, int dec,
const String& dec_point,
const String& thousand_sep) {
char *tmpbuf = nullptr, *resbuf;
char *s, *t; /* source, target */
char *dp;
int integral;
int tmplen, reslen=0;
int count=0;
int is_negative=0;
if (d < 0) {
is_negative = 1;
d = -d;
}
if (dec < 0) dec = 0;
d = php_math_round(d, dec);
// departure from PHP: we got rid of dependencies on spprintf() here.
String tmpstr(63, ReserveString);
tmpbuf = tmpstr.bufferSlice().ptr;
snprintf(tmpbuf, 64, "%.*F", dec, d);
tmplen = strlen(tmpbuf);
if (tmpbuf == nullptr || !isdigit((int)tmpbuf[0])) {
tmpstr.setSize(tmplen);
return tmpstr;
}
/* find decimal point, if expected */
if (dec) {
dp = strpbrk(tmpbuf, ".,");
} else {
dp = nullptr;
}
/* calculate the length of the return buffer */
if (dp) {
integral = dp - tmpbuf;
} else {
/* no decimal point was found */
integral = tmplen;
}
/* allow for thousand separators */
if (!thousand_sep.empty()) {
integral += ((integral-1) / 3) * thousand_sep.size();
}
reslen = integral;
if (dec) {
reslen += dec;
if (!dec_point.empty()) {
reslen += dec_point.size();
}
}
/* add a byte for minus sign */
if (is_negative) {
reslen++;
}
String resstr(reslen, ReserveString);
resbuf = resstr.bufferSlice().ptr;
s = tmpbuf+tmplen-1;
t = resbuf+reslen-1;
/* copy the decimal places.
* Take care, as the sprintf implementation may return less places than
* we requested due to internal buffer limitations */
if (dec) {
int declen = dp ? s - dp : 0;
int topad = dec > declen ? dec - declen : 0;
/* pad with '0's */
while (topad--) {
*t-- = '0';
}
if (dp) {
s -= declen + 1; /* +1 to skip the point */
t -= declen;
/* now copy the chars after the point */
memcpy(t + 1, dp + 1, declen);
}
/* add decimal point */
if (!dec_point.empty()) {
memcpy(t + (1 - dec_point.size()), dec_point.data(), dec_point.size());
t -= dec_point.size();
}
}
/* copy the numbers before the decimal point, adding thousand
* separator every three digits */
while(s >= tmpbuf) {
*t-- = *s--;
if (thousand_sep && (++count%3)==0 && s>=tmpbuf) {
memcpy(t + (1 - thousand_sep.size()),
thousand_sep.data(),
thousand_sep.size());
t -= thousand_sep.size();
}
}
/* and a minus sign, if needed */
if (is_negative) {
*t-- = '-';
}
resstr.setSize(reslen);
return resstr;
}
///////////////////////////////////////////////////////////////////////////////
// soundex
/* Simple soundex algorithm as described by Knuth in TAOCP, vol 3 */
String string_soundex(const String& str) {
assert(!str.empty());
int _small, code, last;
String retString(4, ReserveString);
char* soundex = retString.bufferSlice().ptr;
static char soundex_table[26] = {
0, /* A */
'1', /* B */
'2', /* C */
'3', /* D */
0, /* E */
'1', /* F */
'2', /* G */
0, /* H */
0, /* I */
'2', /* J */
'2', /* K */
'4', /* L */
'5', /* M */
'5', /* N */
0, /* O */
'1', /* P */
'2', /* Q */
'6', /* R */
'2', /* S */
'3', /* T */
0, /* U */
'1', /* V */
0, /* W */
'2', /* X */
0, /* Y */
'2' /* Z */
};
/* build soundex string */
last = -1;
const char *p = str.slice().ptr;
for (_small = 0; *p && _small < 4; p++) {
/* convert chars to upper case and strip non-letter chars */
/* BUG: should also map here accented letters used in non */
/* English words or names (also found in English text!): */
/* esstsett, thorn, n-tilde, c-cedilla, s-caron, ... */
code = toupper((int)(unsigned char)(*p));
if (code >= 'A' && code <= 'Z') {
if (_small == 0) {
/* remember first valid char */
soundex[_small++] = code;
last = soundex_table[code - 'A'];
} else {
/* ignore sequences of consonants with same soundex */
/* code in trail, and vowels unless they separate */
/* consonant letters */
code = soundex_table[code - 'A'];
if (code != last) {
if (code != 0) {
soundex[_small++] = code;
}
last = code;
}
}
}
}
/* pad with '0' and terminate with 0 ;-) */
while (_small < 4) {
soundex[_small++] = '0';
}
retString.setSize(4);
return retString;
}
///////////////////////////////////////////////////////////////////////////////
// metaphone
/**
* this is now the original code by Michael G Schwern:
* i've changed it just a slightly bit (use emalloc,
* get rid of includes etc)
* - thies - 13.09.1999
*/
/*----------------------------- */
/* this used to be "metaphone.h" */
/*----------------------------- */
/* Special encodings */
#define SH 'X'
#define TH '0'
/*----------------------------- */
/* end of "metaphone.h" */
/*----------------------------- */
/*----------------------------- */
/* this used to be "metachar.h" */
/*----------------------------- */
/* Metachar.h ... little bits about characters for metaphone */
/*-- Character encoding array & accessing macros --*/
/* Stolen directly out of the book... */
char _codes[26] = { 1,16,4,16,9,2,4,16,9,2,0,2,2,2,1,4,0,2,4,4,1,0,0,0,8,0};
#define ENCODE(c) (isalpha(c) ? _codes[((toupper(c)) - 'A')] : 0)
#define isvowel(c) (ENCODE(c) & 1) /* AEIOU */
/* These letters are passed through unchanged */
#define NOCHANGE(c) (ENCODE(c) & 2) /* FJMNR */
/* These form dipthongs when preceding H */
#define AFFECTH(c) (ENCODE(c) & 4) /* CGPST */
/* These make C and G soft */
#define MAKESOFT(c) (ENCODE(c) & 8) /* EIY */
/* These prevent GH from becoming F */
#define NOGHTOF(c) (ENCODE(c) & 16) /* BDH */
/*----------------------------- */
/* end of "metachar.h" */
/*----------------------------- */
/* I suppose I could have been using a character pointer instead of
* accesssing the array directly... */
/* Look at the next letter in the word */
#define Next_Letter ((char)toupper(word[w_idx+1]))
/* Look at the current letter in the word */
#define Curr_Letter ((char)toupper(word[w_idx]))
/* Go N letters back. */
#define Look_Back_Letter(n) (w_idx >= n ? (char)toupper(word[w_idx-n]) : '\0')
/* Previous letter. I dunno, should this return null on failure? */
#define Prev_Letter (Look_Back_Letter(1))
/* Look two letters down. It makes sure you don't walk off the string. */
#define After_Next_Letter (Next_Letter != '\0' ? (char)toupper(word[w_idx+2]) \
: '\0')
#define Look_Ahead_Letter(n) ((char)toupper(Lookahead(word+w_idx, n)))
/* Allows us to safely look ahead an arbitrary # of letters */
/* I probably could have just used strlen... */
static char Lookahead(unsigned char *word, int how_far) {
char letter_ahead = '\0'; /* null by default */
int idx;
for (idx = 0; word[idx] != '\0' && idx < how_far; idx++);
/* Edge forward in the string... */
letter_ahead = (char)word[idx]; /* idx will be either == to how_far or
* at the end of the string
*/
return letter_ahead;
}
/* phonize one letter
* We don't know the buffers size in advance. On way to solve this is to just
* re-allocate the buffer size. We're using an extra of 2 characters (this
* could be one though; or more too). */
#define Phonize(c) { buffer.append(c); }
/* How long is the phoned word? */
#define Phone_Len (buffer.size())
/* Note is a letter is a 'break' in the word */
#define Isbreak(c) (!isalpha(c))
String string_metaphone(const char *input, int word_len, long max_phonemes,
int traditional) {
unsigned char *word = (unsigned char *)input;
int w_idx = 0; /* point in the phonization we're at. */
int max_buffer_len = 0; /* maximum length of the destination buffer */
/*-- Parameter checks --*/
/* Negative phoneme length is meaningless */
if (max_phonemes < 0)
return String();
/* Empty/null string is meaningless */
/* Overly paranoid */
/* always_assert(word != NULL && word[0] != '\0'); */
if (word == nullptr)
return String();
/*-- Allocate memory for our phoned_phrase --*/
if (max_phonemes == 0) { /* Assume largest possible */
max_buffer_len = word_len;
} else {
max_buffer_len = max_phonemes;
}
StringBuffer buffer(max_buffer_len);
/*-- The first phoneme has to be processed specially. --*/
/* Find our first letter */
for (; !isalpha(Curr_Letter); w_idx++) {
/* On the off chance we were given nothing but crap... */
if (Curr_Letter == '\0') {
return buffer.detach(); /* For testing */
}
}
switch (Curr_Letter) {
/* AE becomes E */
case 'A':
if (Next_Letter == 'E') {
Phonize('E');
w_idx += 2;
}
/* Remember, preserve vowels at the beginning */
else {
Phonize('A');
w_idx++;
}
break;
/* [GKP]N becomes N */
case 'G':
case 'K':
case 'P':
if (Next_Letter == 'N') {
Phonize('N');
w_idx += 2;
}
break;
/* WH becomes H,
WR becomes R
W if followed by a vowel */
case 'W':
if (Next_Letter == 'H' ||
Next_Letter == 'R') {
Phonize(Next_Letter);
w_idx += 2;
} else if (isvowel(Next_Letter)) {
Phonize('W');
w_idx += 2;
}
/* else ignore */
break;
/* X becomes S */
case 'X':
Phonize('S');
w_idx++;
break;
/* Vowels are kept */
/* We did A already
case 'A':
case 'a':
*/
case 'E':
case 'I':
case 'O':
case 'U':
Phonize(Curr_Letter);
w_idx++;
break;
default:
/* do nothing */
break;
}
/* On to the metaphoning */
for (; Curr_Letter != '\0' &&
(max_phonemes == 0 || Phone_Len < max_phonemes);
w_idx++) {
/* How many letters to skip because an eariler encoding handled
* multiple letters */
unsigned short int skip_letter = 0;
/* THOUGHT: It would be nice if, rather than having things like...
* well, SCI. For SCI you encode the S, then have to remember
* to skip the C. So the phonome SCI invades both S and C. It would
* be better, IMHO, to skip the C from the S part of the encoding.
* Hell, I'm trying it.
*/
/* Ignore non-alphas */
if (!isalpha(Curr_Letter))
continue;
/* Drop duplicates, except CC */
if (Curr_Letter == Prev_Letter &&
Curr_Letter != 'C')
continue;
switch (Curr_Letter) {
/* B -> B unless in MB */
case 'B':
if (Prev_Letter != 'M')
Phonize('B');
break;
/* 'sh' if -CIA- or -CH, but not SCH, except SCHW.
* (SCHW is handled in S)
* S if -CI-, -CE- or -CY-
* dropped if -SCI-, SCE-, -SCY- (handed in S)
* else K
*/
case 'C':
if (MAKESOFT(Next_Letter)) { /* C[IEY] */
if (After_Next_Letter == 'A' &&
Next_Letter == 'I') { /* CIA */
Phonize(SH);
}
/* SC[IEY] */
else if (Prev_Letter == 'S') {
/* Dropped */
} else {
Phonize('S');
}
} else if (Next_Letter == 'H') {
if ((!traditional) && (After_Next_Letter == 'R' ||
Prev_Letter == 'S')) { /* Christ, School */
Phonize('K');
} else {
Phonize(SH);
}
skip_letter++;
} else {
Phonize('K');
}
break;
/* J if in -DGE-, -DGI- or -DGY-
* else T
*/
case 'D':
if (Next_Letter == 'G' && MAKESOFT(After_Next_Letter)) {
Phonize('J');
skip_letter++;
} else
Phonize('T');
break;
/* F if in -GH and not B--GH, D--GH, -H--GH, -H---GH
* else dropped if -GNED, -GN,
* else dropped if -DGE-, -DGI- or -DGY- (handled in D)
* else J if in -GE-, -GI, -GY and not GG
* else K
*/
case 'G':
if (Next_Letter == 'H') {
if (!(NOGHTOF(Look_Back_Letter(3)) || Look_Back_Letter(4) == 'H')) {
Phonize('F');
skip_letter++;
} else {
/* silent */
}
} else if (Next_Letter == 'N') {
if (Isbreak(After_Next_Letter) ||
(After_Next_Letter == 'E' && Look_Ahead_Letter(3) == 'D')) {
/* dropped */
} else
Phonize('K');
} else if (MAKESOFT(Next_Letter) && Prev_Letter != 'G') {
Phonize('J');
} else {
Phonize('K');
}
break;
/* H if before a vowel and not after C,G,P,S,T */
case 'H':
if (isvowel(Next_Letter) && !AFFECTH(Prev_Letter))
Phonize('H');
break;
/* dropped if after C
* else K
*/
case 'K':
if (Prev_Letter != 'C')
Phonize('K');
break;
/* F if before H
* else P
*/
case 'P':
if (Next_Letter == 'H') {
Phonize('F');
} else {
Phonize('P');
}
break;
/* K
*/
case 'Q':
Phonize('K');
break;
/* 'sh' in -SH-, -SIO- or -SIA- or -SCHW-
* else S
*/
case 'S':
if (Next_Letter == 'I' &&
(After_Next_Letter == 'O' || After_Next_Letter == 'A')) {
Phonize(SH);
} else if (Next_Letter == 'H') {
Phonize(SH);
skip_letter++;
} else if ((!traditional) &&
(Next_Letter == 'C' && Look_Ahead_Letter(2) == 'H' &&
Look_Ahead_Letter(3) == 'W')) {
Phonize(SH);
skip_letter += 2;
} else {
Phonize('S');
}
break;
/* 'sh' in -TIA- or -TIO-
* else 'th' before H
* else T
*/
case 'T':
if (Next_Letter == 'I' &&
(After_Next_Letter == 'O' || After_Next_Letter == 'A')) {
Phonize(SH);
} else if (Next_Letter == 'H') {
Phonize(TH);
skip_letter++;
} else {
Phonize('T');
}
break;
/* F */
case 'V':
Phonize('F');
break;
/* W before a vowel, else dropped */
case 'W':
if (isvowel(Next_Letter))
Phonize('W');
break;
/* KS */
case 'X':
Phonize('K');
Phonize('S');
break;
/* Y if followed by a vowel */
case 'Y':
if (isvowel(Next_Letter))
Phonize('Y');
break;
/* S */
case 'Z':
Phonize('S');
break;
/* No transformation */
case 'F':
case 'J':
case 'L':
case 'M':
case 'N':
case 'R':
Phonize(Curr_Letter);
break;
default:
/* nothing */
break;
} /* END SWITCH */
w_idx += skip_letter;
} /* END FOR */
return buffer.detach();
}
///////////////////////////////////////////////////////////////////////////////
// Cyrillic
/**
* This is codetables for different Cyrillic charsets (relative to koi8-r).
* Each table contains data for 128-255 symbols from ASCII table.
* First 256 symbols are for conversion from koi8-r to corresponding charset,
* second 256 symbols are for reverse conversion, from charset to koi8-r.
*
* Here we have the following tables:
* _cyr_win1251 - for windows-1251 charset
* _cyr_iso88595 - for iso8859-5 charset
* _cyr_cp866 - for x-cp866 charset
* _cyr_mac - for x-mac-cyrillic charset
*/
typedef unsigned char _cyr_charset_table[512];
static const _cyr_charset_table _cyr_win1251 = {
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,
46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,
154,174,190,46,159,189,46,46,179,191,180,157,46,46,156,183,
46,46,182,166,173,46,46,158,163,152,164,155,46,46,46,167,
225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240,
242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241,
193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208,
210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,209,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
32,32,32,184,186,32,179,191,32,32,32,32,32,180,162,32,
32,32,32,168,170,32,178,175,32,32,32,32,32,165,161,169,
254,224,225,246,228,229,244,227,245,232,233,234,235,236,237,238,
239,255,240,241,242,243,230,226,252,251,231,248,253,249,247,250,
222,192,193,214,196,197,212,195,213,200,201,202,203,204,205,206,
207,223,208,209,210,211,198,194,220,219,199,216,221,217,215,218,
};
static const _cyr_charset_table _cyr_cp866 = {
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240,
242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241,
193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208,
35,35,35,124,124,124,124,43,43,124,124,43,43,43,43,43,
43,45,45,124,45,43,124,124,43,43,45,45,124,45,43,45,
45,45,45,43,43,43,43,43,43,43,43,35,35,124,124,35,
210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,209,
179,163,180,164,183,167,190,174,32,149,158,32,152,159,148,154,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
205,186,213,241,243,201,32,245,187,212,211,200,190,32,247,198,
199,204,181,240,242,185,32,244,203,207,208,202,216,32,246,32,
238,160,161,230,164,165,228,163,229,168,169,170,171,172,173,174,
175,239,224,225,226,227,166,162,236,235,167,232,237,233,231,234,
158,128,129,150,132,133,148,131,149,136,137,138,139,140,141,142,
143,159,144,145,146,147,134,130,156,155,135,152,157,153,151,154,
};
static const _cyr_charset_table _cyr_iso88595 = {
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
32,179,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240,
242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241,
193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208,
210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,209,
32,163,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
32,32,32,241,32,32,32,32,32,32,32,32,32,32,32,32,
32,32,32,161,32,32,32,32,32,32,32,32,32,32,32,32,
238,208,209,230,212,213,228,211,229,216,217,218,219,220,221,222,
223,239,224,225,226,227,214,210,236,235,215,232,237,233,231,234,
206,176,177,198,180,181,196,179,197,184,185,186,187,188,189,190,
191,207,192,193,194,195,182,178,204,203,183,200,205,201,199,202,
};
static const _cyr_charset_table _cyr_mac = {
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240,
242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241,
160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,
176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,
128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
144,145,146,147,148,149,150,151,152,153,154,155,156,179,163,209,
193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208,
210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,255,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,
208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,
160,161,162,222,164,165,166,167,168,169,170,171,172,173,174,175,
176,177,178,221,180,181,182,183,184,185,186,187,188,189,190,191,
254,224,225,246,228,229,244,227,245,232,233,234,235,236,237,238,
239,223,240,241,242,243,230,226,252,251,231,248,253,249,247,250,
158,128,129,150,132,133,148,131,149,136,137,138,139,140,141,142,
143,159,144,145,146,147,134,130,156,155,135,152,157,153,151,154,
};
/**
* This is the function that performs real in-place conversion of the string
* between charsets.
* Parameters:
* str - string to be converted
* from,to - one-symbol label of source and destination charset
* The following symbols are used as labels:
* k - koi8-r
* w - windows-1251
* i - iso8859-5
* a - x-cp866
* d - x-cp866
* m - x-mac-cyrillic
*/
String string_convert_cyrillic_string(const String& input, char from, char to) {
const unsigned char *from_table, *to_table;
unsigned char tmp;
const unsigned char *uinput = (unsigned char *)input.slice().ptr;
String retString(input.size(), ReserveString);
unsigned char *str = (unsigned char *)retString.bufferSlice().ptr;
from_table = nullptr;
to_table = nullptr;
switch (toupper((int)(unsigned char)from)) {
case 'W': from_table = _cyr_win1251; break;
case 'A':
case 'D': from_table = _cyr_cp866; break;
case 'I': from_table = _cyr_iso88595; break;
case 'M': from_table = _cyr_mac; break;
case 'K':
break;
default:
throw_invalid_argument("Unknown source charset: %c", from);
break;
}
switch (toupper((int)(unsigned char)to)) {
case 'W': to_table = _cyr_win1251; break;
case 'A':
case 'D': to_table = _cyr_cp866; break;
case 'I': to_table = _cyr_iso88595; break;
case 'M': to_table = _cyr_mac; break;
case 'K':
break;
default:
throw_invalid_argument("Unknown destination charset: %c", to);
break;
}
for (int i = 0; i < input.size(); i++) {
tmp = from_table == nullptr ? uinput[i] : from_table[uinput[i]];
str[i] = to_table == nullptr ? tmp : to_table[tmp + 256];
}
retString.setSize(input.size());
return retString;
}
///////////////////////////////////////////////////////////////////////////////
// Hebrew
#define HEB_BLOCK_TYPE_ENG 1
#define HEB_BLOCK_TYPE_HEB 2
#define isheb(c) \
(((((unsigned char) c) >= 224) && (((unsigned char) c) <= 250)) ? 1 : 0)
#define _isblank(c) \
(((((unsigned char) c) == ' ' || ((unsigned char) c) == '\t')) ? 1 : 0)
#define _isnewline(c) \
(((((unsigned char) c) == '\n' || ((unsigned char) c) == '\r')) ? 1 : 0)
/**
* Converts Logical Hebrew text (Hebrew Windows style) to Visual text
* Cheers/complaints/flames - Zeev Suraski <zeev@php.net>
*/
String string_convert_hebrew_string(const String& inStr,
int max_chars_per_line,
int convert_newlines) {
assert(!inStr.empty());
auto str = inStr.data();
auto str_len = inStr.size();
const char *tmp;
char *heb_str, *broken_str;
char *target;
int block_start, block_end, block_type, block_length, i;
long max_chars=0;
int begin, end, char_count, orig_begin;
tmp = str;
block_start=block_end=0;
heb_str = (char *) smart_malloc(str_len + 1);
SCOPE_EXIT { smart_free(heb_str); };
target = heb_str+str_len;
*target = 0;
target--;
block_length=0;
if (isheb(*tmp)) {
block_type = HEB_BLOCK_TYPE_HEB;
} else {
block_type = HEB_BLOCK_TYPE_ENG;
}
do {
if (block_type == HEB_BLOCK_TYPE_HEB) {
while ((isheb((int)*(tmp+1)) ||
_isblank((int)*(tmp+1)) ||
ispunct((int)*(tmp+1)) ||
(int)*(tmp+1)=='\n' ) && block_end<str_len-1) {
tmp++;
block_end++;
block_length++;
}
for (i = block_start; i<= block_end; i++) {
*target = str[i];
switch (*target) {
case '(': *target = ')'; break;
case ')': *target = '('; break;
case '[': *target = ']'; break;
case ']': *target = '['; break;
case '{': *target = '}'; break;
case '}': *target = '{'; break;
case '<': *target = '>'; break;
case '>': *target = '<'; break;
case '\\': *target = '/'; break;
case '/': *target = '\\'; break;
default:
break;
}
target--;
}
block_type = HEB_BLOCK_TYPE_ENG;
} else {
while (!isheb(*(tmp+1)) &&
(int)*(tmp+1)!='\n' && block_end < str_len-1) {
tmp++;
block_end++;
block_length++;
}
while ((_isblank((int)*tmp) ||
ispunct((int)*tmp)) && *tmp!='/' &&
*tmp!='-' && block_end > block_start) {
tmp--;
block_end--;
}
for (i = block_end; i >= block_start; i--) {
*target = str[i];
target--;
}
block_type = HEB_BLOCK_TYPE_HEB;
}
block_start=block_end+1;
} while (block_end < str_len-1);
String brokenStr(str_len, ReserveString);
broken_str = brokenStr.bufferSlice().ptr;
begin=end=str_len-1;
target = broken_str;
while (1) {
char_count=0;
while ((!max_chars || char_count < max_chars) && begin > 0) {
char_count++;
begin--;
if (begin <= 0 || _isnewline(heb_str[begin])) {
while (begin > 0 && _isnewline(heb_str[begin-1])) {
begin--;
char_count++;
}
break;
}
}
if (char_count == max_chars) { /* try to avoid breaking words */
int new_char_count=char_count, new_begin=begin;
while (new_char_count > 0) {
if (_isblank(heb_str[new_begin]) || _isnewline(heb_str[new_begin])) {
break;
}
new_begin++;
new_char_count--;
}
if (new_char_count > 0) {
char_count=new_char_count;
begin=new_begin;
}
}
orig_begin=begin;
if (_isblank(heb_str[begin])) {
heb_str[begin]='\n';
}
while (begin <= end && _isnewline(heb_str[begin])) {
/* skip leading newlines */
begin++;
}
for (i = begin; i <= end; i++) { /* copy content */
*target = heb_str[i];
target++;
}
for (i = orig_begin; i <= end && _isnewline(heb_str[i]); i++) {
*target = heb_str[i];
target++;
}
begin=orig_begin;
if (begin <= 0) {
*target = 0;
break;
}
begin--;
end=begin;
}
if (convert_newlines) {
int count;
auto ret = string_replace(broken_str, str_len, "\n", strlen("\n"),
"<br />\n", strlen("<br />\n"), count, true);
if (!ret.isNull()) {
return ret;
}
}
brokenStr.setSize(str_len);
return brokenStr;
}
#if defined(__APPLE__)
void *memrchr(const void *s, int c, size_t n) {
for (const char *p = (const char *)s + n - 1; p >= s; p--) {
if (*p == c) return (void *)p;
}
return nullptr;
}
#endif
///////////////////////////////////////////////////////////////////////////////
}
| ./CrossVul/dataset_final_sorted/CWE-189/cpp/good_2263_0 |
crossvul-cpp_data_good_3618_0 | /***************************************************************************
copyright : (C) 2002 - 2008 by Scott Wheeler
email : wheeler@kde.org
***************************************************************************/
/***************************************************************************
* This library is free software; you can redistribute it and/or modify *
* it under the terms of the GNU Lesser General Public License version *
* 2.1 as published by the Free Software Foundation. *
* *
* This library is distributed in the hope that it will be useful, but *
* WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
* Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public *
* License along with this library; if not, write to the Free Software *
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA *
* 02110-1301 USA *
* *
* Alternatively, this file is available under the Mozilla Public *
* License Version 1.1. You may obtain a copy of the License at *
* http://www.mozilla.org/MPL/ *
***************************************************************************/
#include <ostream>
#include <tstring.h>
#include <tdebug.h>
#include <string.h>
#include "tbytevector.h"
// This is a bit ugly to keep writing over and over again.
// A rather obscure feature of the C++ spec that I hadn't thought of that makes
// working with C libs much more effecient. There's more here:
//
// http://www.informit.com/isapi/product_id~{9C84DAB4-FE6E-49C5-BB0A-FB50331233EA}/content/index.asp
#define DATA(x) (&(x->data[0]))
namespace TagLib {
static const char hexTable[17] = "0123456789abcdef";
static const uint crcTable[256] = {
0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b,
0x1a864db2, 0x1e475005, 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7,
0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3,
0x709f7b7a, 0x745e66cd, 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, 0xbe2b5b58, 0xbaea46ef,
0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb,
0xceb42022, 0xca753d95, 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, 0x34867077, 0x30476dc0,
0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x018aeb13, 0x054bf6a4,
0x0808d07d, 0x0cc9cdca, 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, 0x5e9f46bf, 0x5a5e5b08,
0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc,
0xb6238b25, 0xb2e29692, 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, 0xe0b41de7, 0xe4750050,
0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34,
0xdc3abded, 0xd8fba05a, 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, 0x4f040d56, 0x4bc510e1,
0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5,
0x3f9b762c, 0x3b5a6b9b, 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff,
0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, 0xf12f560e, 0xf5ee4bb9,
0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd,
0xcda1f604, 0xc960ebb3, 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, 0x9b3660c6, 0x9ff77d71,
0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2,
0x470cdd2b, 0x43cdc09c, 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, 0x119b4be9, 0x155a565e,
0x18197087, 0x1cd86d30, 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec,
0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a,
0x2d15ebe3, 0x29d4f654, 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, 0xe3a1cbc1, 0xe760d676,
0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662,
0x933eb0bb, 0x97ffad0c, 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
};
/*!
* A templatized KMP find that works both with a ByteVector and a ByteVectorMirror.
*/
template <class Vector>
int vectorFind(const Vector &v, const Vector &pattern, uint offset, int byteAlign)
{
if(pattern.size() > v.size() || offset > v.size() - 1)
return -1;
// Let's go ahead and special case a pattern of size one since that's common
// and easy to make fast.
if(pattern.size() == 1) {
char p = pattern[0];
for(uint i = offset; i < v.size(); i++) {
if(v[i] == p && (i - offset) % byteAlign == 0)
return i;
}
return -1;
}
uchar lastOccurrence[256];
for(uint i = 0; i < 256; ++i)
lastOccurrence[i] = uchar(pattern.size());
for(uint i = 0; i < pattern.size() - 1; ++i)
lastOccurrence[uchar(pattern[i])] = uchar(pattern.size() - i - 1);
for(uint i = pattern.size() - 1 + offset; i < v.size(); i += lastOccurrence[uchar(v.at(i))]) {
int iBuffer = i;
int iPattern = pattern.size() - 1;
while(iPattern >= 0 && v.at(iBuffer) == pattern[iPattern]) {
--iBuffer;
--iPattern;
}
if(-1 == iPattern && (iBuffer + 1 - offset) % byteAlign == 0)
return iBuffer + 1;
}
return -1;
}
/*!
* Wraps the accessors to a ByteVector to make the search algorithm access the
* elements in reverse.
*
* \see vectorFind()
* \see ByteVector::rfind()
*/
class ByteVectorMirror
{
public:
ByteVectorMirror(const ByteVector &source) : v(source) {}
char operator[](int index) const
{
return v[v.size() - index - 1];
}
char at(int index) const
{
return v.at(v.size() - index - 1);
}
ByteVectorMirror mid(uint index, uint length = 0xffffffff) const
{
return length == 0xffffffff ? v.mid(0, index) : v.mid(index - length, length);
}
uint size() const
{
return v.size();
}
int find(const ByteVectorMirror &pattern, uint offset = 0, int byteAlign = 1) const
{
ByteVectorMirror v(*this);
if(offset > 0) {
offset = size() - offset - pattern.size();
if(offset >= size())
offset = 0;
}
const int pos = vectorFind<ByteVectorMirror>(v, pattern, offset, byteAlign);
// If the offset is zero then we need to adjust the location in the search
// to be appropriately reversed. If not we need to account for the fact
// that the recursive call (called from the above line) has already ajusted
// for this but that the normal templatized find above will add the offset
// to the returned value.
//
// This is a little confusing at first if you don't first stop to think
// through the logic involved in the forward search.
if(pos == -1)
return -1;
return size() - pos - pattern.size();
}
private:
const ByteVector &v;
};
template <class T>
T toNumber(const std::vector<char> &data, bool mostSignificantByteFirst)
{
T sum = 0;
if(data.size() <= 0) {
debug("ByteVectorMirror::toNumber<T>() -- data is empty, returning 0");
return sum;
}
uint size = sizeof(T);
uint last = data.size() > size ? size - 1 : data.size() - 1;
for(uint i = 0; i <= last; i++)
sum |= (T) uchar(data[i]) << ((mostSignificantByteFirst ? last - i : i) * 8);
return sum;
}
template <class T>
ByteVector fromNumber(T value, bool mostSignificantByteFirst)
{
int size = sizeof(T);
ByteVector v(size, 0);
for(int i = 0; i < size; i++)
v[i] = uchar(value >> ((mostSignificantByteFirst ? size - 1 - i : i) * 8) & 0xff);
return v;
}
}
using namespace TagLib;
class ByteVector::ByteVectorPrivate : public RefCounter
{
public:
ByteVectorPrivate() : RefCounter(), size(0) {}
ByteVectorPrivate(const std::vector<char> &v) : RefCounter(), data(v), size(v.size()) {}
ByteVectorPrivate(TagLib::uint len, char value) : RefCounter(), data(len, value), size(len) {}
std::vector<char> data;
// std::vector<T>::size() is very slow, so we'll cache the value
uint size;
};
////////////////////////////////////////////////////////////////////////////////
// static members
////////////////////////////////////////////////////////////////////////////////
ByteVector ByteVector::null;
ByteVector ByteVector::fromCString(const char *s, uint length)
{
ByteVector v;
if(length == 0xffffffff)
v.setData(s);
else
v.setData(s, length);
return v;
}
ByteVector ByteVector::fromUInt(uint value, bool mostSignificantByteFirst)
{
return fromNumber<uint>(value, mostSignificantByteFirst);
}
ByteVector ByteVector::fromShort(short value, bool mostSignificantByteFirst)
{
return fromNumber<short>(value, mostSignificantByteFirst);
}
ByteVector ByteVector::fromLongLong(long long value, bool mostSignificantByteFirst)
{
return fromNumber<long long>(value, mostSignificantByteFirst);
}
////////////////////////////////////////////////////////////////////////////////
// public members
////////////////////////////////////////////////////////////////////////////////
ByteVector::ByteVector()
{
d = new ByteVectorPrivate;
}
ByteVector::ByteVector(uint size, char value)
{
d = new ByteVectorPrivate(size, value);
}
ByteVector::ByteVector(const ByteVector &v) : d(v.d)
{
d->ref();
}
ByteVector::ByteVector(char c)
{
d = new ByteVectorPrivate;
d->data.push_back(c);
d->size = 1;
}
ByteVector::ByteVector(const char *data, uint length)
{
d = new ByteVectorPrivate;
setData(data, length);
}
ByteVector::ByteVector(const char *data)
{
d = new ByteVectorPrivate;
setData(data);
}
ByteVector::~ByteVector()
{
if(d->deref())
delete d;
}
ByteVector &ByteVector::setData(const char *data, uint length)
{
detach();
resize(length);
if(length > 0)
::memcpy(DATA(d), data, length);
return *this;
}
ByteVector &ByteVector::setData(const char *data)
{
return setData(data, ::strlen(data));
}
char *ByteVector::data()
{
detach();
return size() > 0 ? DATA(d) : 0;
}
const char *ByteVector::data() const
{
return size() > 0 ? DATA(d) : 0;
}
ByteVector ByteVector::mid(uint index, uint length) const
{
ByteVector v;
if(index > size())
return v;
ConstIterator endIt;
if(length < size() - index)
endIt = d->data.begin() + index + length;
else
endIt = d->data.end();
v.d->data.insert(v.d->data.begin(), ConstIterator(d->data.begin() + index), endIt);
v.d->size = v.d->data.size();
return v;
}
char ByteVector::at(uint index) const
{
return index < size() ? d->data[index] : 0;
}
int ByteVector::find(const ByteVector &pattern, uint offset, int byteAlign) const
{
return vectorFind<ByteVector>(*this, pattern, offset, byteAlign);
}
int ByteVector::rfind(const ByteVector &pattern, uint offset, int byteAlign) const
{
// Ok, this is a little goofy, but pretty cool after it sinks in. Instead of
// reversing the find method's Boyer-Moore search algorithm I created a "mirror"
// for a ByteVector to reverse the behavior of the accessors.
ByteVectorMirror v(*this);
ByteVectorMirror p(pattern);
return v.find(p, offset, byteAlign);
}
bool ByteVector::containsAt(const ByteVector &pattern, uint offset, uint patternOffset, uint patternLength) const
{
if(pattern.size() < patternLength)
patternLength = pattern.size();
// do some sanity checking -- all of these things are needed for the search to be valid
if(patternLength > size() || offset >= size() || patternOffset >= pattern.size() || patternLength == 0)
return false;
// loop through looking for a mismatch
for(uint i = 0; i < patternLength - patternOffset; i++) {
if(at(i + offset) != pattern[i + patternOffset])
return false;
}
return true;
}
bool ByteVector::startsWith(const ByteVector &pattern) const
{
return containsAt(pattern, 0);
}
bool ByteVector::endsWith(const ByteVector &pattern) const
{
return containsAt(pattern, size() - pattern.size());
}
ByteVector &ByteVector::replace(const ByteVector &pattern, const ByteVector &with)
{
if(pattern.size() == 0 || pattern.size() > size())
return *this;
const uint withSize = with.size();
const uint patternSize = pattern.size();
int offset = 0;
if(withSize == patternSize) {
// I think this case might be common enough to optimize it
detach();
offset = find(pattern);
while(offset >= 0) {
::memcpy(data() + offset, with.data(), withSize);
offset = find(pattern, offset + withSize);
}
return *this;
}
// calculate new size:
uint newSize = 0;
for(;;) {
int next = find(pattern, offset);
if(next < 0) {
if(offset == 0)
// pattern not found, do nothing:
return *this;
newSize += size() - offset;
break;
}
newSize += (next - offset) + withSize;
offset = next + patternSize;
}
// new private data of appropriate size:
ByteVectorPrivate *newData = new ByteVectorPrivate(newSize, 0);
char *target = DATA(newData);
const char *source = data();
// copy modified data into new private data:
offset = 0;
for(;;) {
int next = find(pattern, offset);
if(next < 0) {
::memcpy(target, source + offset, size() - offset);
break;
}
int chunkSize = next - offset;
::memcpy(target, source + offset, chunkSize);
target += chunkSize;
::memcpy(target, with.data(), withSize);
target += withSize;
offset += chunkSize + patternSize;
}
// replace private data:
if(d->deref())
delete d;
d = newData;
return *this;
}
int ByteVector::endsWithPartialMatch(const ByteVector &pattern) const
{
if(pattern.size() > size())
return -1;
const int startIndex = size() - pattern.size();
// try to match the last n-1 bytes from the vector (where n is the pattern
// size) -- continue trying to match n-2, n-3...1 bytes
for(uint i = 1; i < pattern.size(); i++) {
if(containsAt(pattern, startIndex + i, 0, pattern.size() - i))
return startIndex + i;
}
return -1;
}
ByteVector &ByteVector::append(const ByteVector &v)
{
if(v.d->size == 0)
return *this; // Simply return if appending nothing.
detach();
uint originalSize = d->size;
resize(d->size + v.d->size);
::memcpy(DATA(d) + originalSize, DATA(v.d), v.size());
return *this;
}
ByteVector &ByteVector::clear()
{
detach();
d->data.clear();
d->size = 0;
return *this;
}
TagLib::uint ByteVector::size() const
{
return d->size;
}
ByteVector &ByteVector::resize(uint size, char padding)
{
if(d->size < size) {
d->data.reserve(size);
d->data.insert(d->data.end(), size - d->size, padding);
}
else
d->data.erase(d->data.begin() + size, d->data.end());
d->size = size;
return *this;
}
ByteVector::Iterator ByteVector::begin()
{
return d->data.begin();
}
ByteVector::ConstIterator ByteVector::begin() const
{
return d->data.begin();
}
ByteVector::Iterator ByteVector::end()
{
return d->data.end();
}
ByteVector::ConstIterator ByteVector::end() const
{
return d->data.end();
}
bool ByteVector::isNull() const
{
return d == null.d;
}
bool ByteVector::isEmpty() const
{
return d->data.size() == 0;
}
TagLib::uint ByteVector::checksum() const
{
uint sum = 0;
for(ByteVector::ConstIterator it = begin(); it != end(); ++it)
sum = (sum << 8) ^ crcTable[((sum >> 24) & 0xff) ^ uchar(*it)];
return sum;
}
TagLib::uint ByteVector::toUInt(bool mostSignificantByteFirst) const
{
return toNumber<uint>(d->data, mostSignificantByteFirst);
}
short ByteVector::toShort(bool mostSignificantByteFirst) const
{
return toNumber<unsigned short>(d->data, mostSignificantByteFirst);
}
unsigned short ByteVector::toUShort(bool mostSignificantByteFirst) const
{
return toNumber<unsigned short>(d->data, mostSignificantByteFirst);
}
long long ByteVector::toLongLong(bool mostSignificantByteFirst) const
{
return toNumber<unsigned long long>(d->data, mostSignificantByteFirst);
}
const char &ByteVector::operator[](int index) const
{
return d->data[index];
}
char &ByteVector::operator[](int index)
{
detach();
return d->data[index];
}
bool ByteVector::operator==(const ByteVector &v) const
{
if(d->size != v.d->size)
return false;
return ::memcmp(data(), v.data(), size()) == 0;
}
bool ByteVector::operator!=(const ByteVector &v) const
{
return !operator==(v);
}
bool ByteVector::operator==(const char *s) const
{
if(d->size != ::strlen(s))
return false;
return ::memcmp(data(), s, d->size) == 0;
}
bool ByteVector::operator!=(const char *s) const
{
return !operator==(s);
}
bool ByteVector::operator<(const ByteVector &v) const
{
int result = ::memcmp(data(), v.data(), d->size < v.d->size ? d->size : v.d->size);
if(result != 0)
return result < 0;
else
return size() < v.size();
}
bool ByteVector::operator>(const ByteVector &v) const
{
return v < *this;
}
ByteVector ByteVector::operator+(const ByteVector &v) const
{
ByteVector sum(*this);
sum.append(v);
return sum;
}
ByteVector &ByteVector::operator=(const ByteVector &v)
{
if(&v == this)
return *this;
if(d->deref())
delete d;
d = v.d;
d->ref();
return *this;
}
ByteVector &ByteVector::operator=(char c)
{
*this = ByteVector(c);
return *this;
}
ByteVector &ByteVector::operator=(const char *data)
{
*this = ByteVector(data);
return *this;
}
ByteVector ByteVector::toHex() const
{
ByteVector encoded(size() * 2);
uint j = 0;
for(uint i = 0; i < size(); i++) {
unsigned char c = d->data[i];
encoded[j++] = hexTable[(c >> 4) & 0x0F];
encoded[j++] = hexTable[(c ) & 0x0F];
}
return encoded;
}
////////////////////////////////////////////////////////////////////////////////
// protected members
////////////////////////////////////////////////////////////////////////////////
void ByteVector::detach()
{
if(d->count() > 1) {
d->deref();
d = new ByteVectorPrivate(d->data);
}
}
////////////////////////////////////////////////////////////////////////////////
// related functions
////////////////////////////////////////////////////////////////////////////////
std::ostream &operator<<(std::ostream &s, const ByteVector &v)
{
for(TagLib::uint i = 0; i < v.size(); i++)
s << v[i];
return s;
}
| ./CrossVul/dataset_final_sorted/CWE-189/cpp/good_3618_0 |
crossvul-cpp_data_bad_1605_4 | /*
Copyright 2008-2013 LibRaw LLC (info@libraw.org)
LibRaw is free software; you can redistribute it and/or modify
it under the terms of the one of three licenses as you choose:
1. GNU LESSER GENERAL PUBLIC LICENSE version 2.1
(See file LICENSE.LGPL provided in LibRaw distribution archive for details).
2. COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
(See file LICENSE.CDDL provided in LibRaw distribution archive for details).
3. LibRaw Software License 27032010
(See file LICENSE.LibRaw.pdf provided in LibRaw distribution archive for details).
This file is generated from Dave Coffin's dcraw.c
dcraw.c -- Dave Coffin's raw photo decoder
Copyright 1997-2010 by Dave Coffin, dcoffin a cybercom o net
Look into dcraw homepage (probably http://cybercom.net/~dcoffin/dcraw/)
for more information
*/
#line 4090 "dcraw/dcraw.c"
#include <math.h>
#define CLASS LibRaw::
#include "libraw/libraw_types.h"
#define LIBRAW_LIBRARY_BUILD
#include "libraw/libraw.h"
#include "internal/defines.h"
#include "internal/var_defines.h"
#line 4101 "dcraw/dcraw.c"
/*
Seach from the current directory up to the root looking for
a ".badpixels" file, and fix those pixels now.
*/
void CLASS bad_pixels (const char *cfname)
{
FILE *fp=NULL;
#ifndef LIBRAW_LIBRARY_BUILD
char *fname, *cp, line[128];
int len, time, row, col, r, c, rad, tot, n, fixed=0;
#else
char *cp, line[128];
int time, row, col, r, c, rad, tot, n;
#ifdef DCRAW_VERBOSE
int fixed = 0;
#endif
#endif
if (!filters) return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,0,2);
#endif
if (cfname)
fp = fopen (cfname, "r");
#line 4151 "dcraw/dcraw.c"
if (!fp)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_BADPIXELMAP;
#endif
return;
}
while (fgets (line, 128, fp)) {
cp = strchr (line, '#');
if (cp) *cp = 0;
if (sscanf (line, "%d %d %d", &col, &row, &time) != 3) continue;
if ((unsigned) col >= width || (unsigned) row >= height) continue;
if (time > timestamp) continue;
for (tot=n=0, rad=1; rad < 3 && n==0; rad++)
for (r = row-rad; r <= row+rad; r++)
for (c = col-rad; c <= col+rad; c++)
if ((unsigned) r < height && (unsigned) c < width &&
(r != row || c != col) && fcol(r,c) == fcol(row,col)) {
tot += BAYER2(r,c);
n++;
}
BAYER2(row,col) = tot/n;
#ifdef DCRAW_VERBOSE
if (verbose) {
if (!fixed++)
fprintf (stderr,_("Fixed dead pixels at:"));
fprintf (stderr, " %d,%d", col, row);
}
#endif
}
#ifdef DCRAW_VERBOSE
if (fixed) fputc ('\n', stderr);
#endif
fclose (fp);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,1,2);
#endif
}
void CLASS subtract (const char *fname)
{
FILE *fp;
int dim[3]={0,0,0}, comment=0, number=0, error=0, nd=0, c, row, col;
ushort *pixel;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,0,2);
#endif
if (!(fp = fopen (fname, "rb"))) {
#ifdef DCRAW_VERBOSE
perror (fname);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_FILE;
#endif
return;
}
if (fgetc(fp) != 'P' || fgetc(fp) != '5') error = 1;
while (!error && nd < 3 && (c = fgetc(fp)) != EOF) {
if (c == '#') comment = 1;
if (c == '\n') comment = 0;
if (comment) continue;
if (isdigit(c)) number = 1;
if (number) {
if (isdigit(c)) dim[nd] = dim[nd]*10 + c -'0';
else if (isspace(c)) {
number = 0; nd++;
} else error = 1;
}
}
if (error || nd < 3) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s is not a valid PGM file!\n"), fname);
#endif
fclose (fp); return;
} else if (dim[0] != width || dim[1] != height || dim[2] != 65535) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s has the wrong dimensions!\n"), fname);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_DIM;
#endif
fclose (fp); return;
}
pixel = (ushort *) calloc (width, sizeof *pixel);
merror (pixel, "subtract()");
for (row=0; row < height; row++) {
fread (pixel, 2, width, fp);
for (col=0; col < width; col++)
BAYER(row,col) = MAX (BAYER(row,col) - ntohs(pixel[col]), 0);
}
free (pixel);
fclose (fp);
memset (cblack, 0, sizeof cblack);
black = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,1,2);
#endif
}
#line 10213 "dcraw/dcraw.c"
#ifndef NO_LCMS
void CLASS apply_profile (const char *input, const char *output)
{
char *prof;
cmsHPROFILE hInProfile=0, hOutProfile=0;
cmsHTRANSFORM hTransform;
FILE *fp;
unsigned size;
#ifndef USE_LCMS2
cmsErrorAction (LCMS_ERROR_SHOW);
#endif
if (strcmp (input, "embed"))
hInProfile = cmsOpenProfileFromFile (input, "r");
else if (profile_length) {
#ifndef LIBRAW_LIBRARY_BUILD
prof = (char *) malloc (profile_length);
merror (prof, "apply_profile()");
fseek (ifp, profile_offset, SEEK_SET);
fread (prof, 1, profile_length, ifp);
hInProfile = cmsOpenProfileFromMem (prof, profile_length);
free (prof);
#else
hInProfile = cmsOpenProfileFromMem (imgdata.color.profile, profile_length);
#endif
} else
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_EMBEDDED_PROFILE;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s has no embedded profile.\n"), ifname);
#endif
}
if (!hInProfile)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_INPUT_PROFILE;
#endif
return;
}
if (!output)
hOutProfile = cmsCreate_sRGBProfile();
else if ((fp = fopen (output, "rb"))) {
fread (&size, 4, 1, fp);
fseek (fp, 0, SEEK_SET);
oprof = (unsigned *) malloc (size = ntohl(size));
merror (oprof, "apply_profile()");
fread (oprof, 1, size, fp);
fclose (fp);
if (!(hOutProfile = cmsOpenProfileFromMem (oprof, size))) {
free (oprof);
oprof = 0;
}
}
#ifdef DCRAW_VERBOSE
else
fprintf (stderr,_("Cannot open file %s!\n"), output);
#endif
if (!hOutProfile)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_OUTPUT_PROFILE;
#endif
goto quit;
}
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Applying color profile...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,0,2);
#endif
hTransform = cmsCreateTransform (hInProfile, TYPE_RGBA_16,
hOutProfile, TYPE_RGBA_16, INTENT_PERCEPTUAL, 0);
cmsDoTransform (hTransform, image, image, width*height);
raw_color = 1; /* Don't use rgb_cam with a profile */
cmsDeleteTransform (hTransform);
cmsCloseProfile (hOutProfile);
quit:
cmsCloseProfile (hInProfile);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,1,2);
#endif
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-189/cpp/bad_1605_4 |
crossvul-cpp_data_good_1605_4 | /*
Copyright 2008-2013 LibRaw LLC (info@libraw.org)
LibRaw is free software; you can redistribute it and/or modify
it under the terms of the one of three licenses as you choose:
1. GNU LESSER GENERAL PUBLIC LICENSE version 2.1
(See file LICENSE.LGPL provided in LibRaw distribution archive for details).
2. COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
(See file LICENSE.CDDL provided in LibRaw distribution archive for details).
3. LibRaw Software License 27032010
(See file LICENSE.LibRaw.pdf provided in LibRaw distribution archive for details).
This file is generated from Dave Coffin's dcraw.c
dcraw.c -- Dave Coffin's raw photo decoder
Copyright 1997-2010 by Dave Coffin, dcoffin a cybercom o net
Look into dcraw homepage (probably http://cybercom.net/~dcoffin/dcraw/)
for more information
*/
#line 4091 "dcraw/dcraw.c"
#include <math.h>
#define CLASS LibRaw::
#include "libraw/libraw_types.h"
#define LIBRAW_LIBRARY_BUILD
#include "libraw/libraw.h"
#include "internal/defines.h"
#include "internal/var_defines.h"
#line 4102 "dcraw/dcraw.c"
/*
Seach from the current directory up to the root looking for
a ".badpixels" file, and fix those pixels now.
*/
void CLASS bad_pixels (const char *cfname)
{
FILE *fp=NULL;
#ifndef LIBRAW_LIBRARY_BUILD
char *fname, *cp, line[128];
int len, time, row, col, r, c, rad, tot, n, fixed=0;
#else
char *cp, line[128];
int time, row, col, r, c, rad, tot, n;
#ifdef DCRAW_VERBOSE
int fixed = 0;
#endif
#endif
if (!filters) return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,0,2);
#endif
if (cfname)
fp = fopen (cfname, "r");
#line 4152 "dcraw/dcraw.c"
if (!fp)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_BADPIXELMAP;
#endif
return;
}
while (fgets (line, 128, fp)) {
cp = strchr (line, '#');
if (cp) *cp = 0;
if (sscanf (line, "%d %d %d", &col, &row, &time) != 3) continue;
if ((unsigned) col >= width || (unsigned) row >= height) continue;
if (time > timestamp) continue;
for (tot=n=0, rad=1; rad < 3 && n==0; rad++)
for (r = row-rad; r <= row+rad; r++)
for (c = col-rad; c <= col+rad; c++)
if ((unsigned) r < height && (unsigned) c < width &&
(r != row || c != col) && fcol(r,c) == fcol(row,col)) {
tot += BAYER2(r,c);
n++;
}
BAYER2(row,col) = tot/n;
#ifdef DCRAW_VERBOSE
if (verbose) {
if (!fixed++)
fprintf (stderr,_("Fixed dead pixels at:"));
fprintf (stderr, " %d,%d", col, row);
}
#endif
}
#ifdef DCRAW_VERBOSE
if (fixed) fputc ('\n', stderr);
#endif
fclose (fp);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,1,2);
#endif
}
void CLASS subtract (const char *fname)
{
FILE *fp;
int dim[3]={0,0,0}, comment=0, number=0, error=0, nd=0, c, row, col;
ushort *pixel;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,0,2);
#endif
if (!(fp = fopen (fname, "rb"))) {
#ifdef DCRAW_VERBOSE
perror (fname);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_FILE;
#endif
return;
}
if (fgetc(fp) != 'P' || fgetc(fp) != '5') error = 1;
while (!error && nd < 3 && (c = fgetc(fp)) != EOF) {
if (c == '#') comment = 1;
if (c == '\n') comment = 0;
if (comment) continue;
if (isdigit(c)) number = 1;
if (number) {
if (isdigit(c)) dim[nd] = dim[nd]*10 + c -'0';
else if (isspace(c)) {
number = 0; nd++;
} else error = 1;
}
}
if (error || nd < 3) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s is not a valid PGM file!\n"), fname);
#endif
fclose (fp); return;
} else if (dim[0] != width || dim[1] != height || dim[2] != 65535) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s has the wrong dimensions!\n"), fname);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_DIM;
#endif
fclose (fp); return;
}
pixel = (ushort *) calloc (width, sizeof *pixel);
merror (pixel, "subtract()");
for (row=0; row < height; row++) {
fread (pixel, 2, width, fp);
for (col=0; col < width; col++)
BAYER(row,col) = MAX (BAYER(row,col) - ntohs(pixel[col]), 0);
}
free (pixel);
fclose (fp);
memset (cblack, 0, sizeof cblack);
black = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,1,2);
#endif
}
#line 10214 "dcraw/dcraw.c"
#ifndef NO_LCMS
void CLASS apply_profile (const char *input, const char *output)
{
char *prof;
cmsHPROFILE hInProfile=0, hOutProfile=0;
cmsHTRANSFORM hTransform;
FILE *fp;
unsigned size;
#ifndef USE_LCMS2
cmsErrorAction (LCMS_ERROR_SHOW);
#endif
if (strcmp (input, "embed"))
hInProfile = cmsOpenProfileFromFile (input, "r");
else if (profile_length) {
#ifndef LIBRAW_LIBRARY_BUILD
prof = (char *) malloc (profile_length);
merror (prof, "apply_profile()");
fseek (ifp, profile_offset, SEEK_SET);
fread (prof, 1, profile_length, ifp);
hInProfile = cmsOpenProfileFromMem (prof, profile_length);
free (prof);
#else
hInProfile = cmsOpenProfileFromMem (imgdata.color.profile, profile_length);
#endif
} else
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_EMBEDDED_PROFILE;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s has no embedded profile.\n"), ifname);
#endif
}
if (!hInProfile)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_INPUT_PROFILE;
#endif
return;
}
if (!output)
hOutProfile = cmsCreate_sRGBProfile();
else if ((fp = fopen (output, "rb"))) {
fread (&size, 4, 1, fp);
fseek (fp, 0, SEEK_SET);
oprof = (unsigned *) malloc (size = ntohl(size));
merror (oprof, "apply_profile()");
fread (oprof, 1, size, fp);
fclose (fp);
if (!(hOutProfile = cmsOpenProfileFromMem (oprof, size))) {
free (oprof);
oprof = 0;
}
}
#ifdef DCRAW_VERBOSE
else
fprintf (stderr,_("Cannot open file %s!\n"), output);
#endif
if (!hOutProfile)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_OUTPUT_PROFILE;
#endif
goto quit;
}
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Applying color profile...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,0,2);
#endif
hTransform = cmsCreateTransform (hInProfile, TYPE_RGBA_16,
hOutProfile, TYPE_RGBA_16, INTENT_PERCEPTUAL, 0);
cmsDoTransform (hTransform, image, image, width*height);
raw_color = 1; /* Don't use rgb_cam with a profile */
cmsDeleteTransform (hTransform);
cmsCloseProfile (hOutProfile);
quit:
cmsCloseProfile (hInProfile);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,1,2);
#endif
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-189/cpp/good_1605_4 |
crossvul-cpp_data_bad_2263_0 | /*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-2014 Facebook, Inc. (http://www.facebook.com) |
| Copyright (c) 1998-2010 Zend Technologies Ltd. (http://www.zend.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 2.00 of the Zend license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.zend.com/license/2_00.txt. |
| If you did not receive a copy of the Zend license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@zend.com so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/runtime/base/zend-string.h"
#include "hphp/runtime/base/zend-printf.h"
#include "hphp/runtime/base/zend-math.h"
#include "hphp/util/lock.h"
#include "hphp/util/overflow.h"
#include <math.h>
#include <monetary.h>
#include "hphp/runtime/base/bstring.h"
#include "hphp/runtime/base/exceptions.h"
#include "hphp/runtime/base/complex-types.h"
#include "hphp/runtime/base/string-buffer.h"
#include "hphp/runtime/base/runtime-error.h"
#include "hphp/runtime/base/type-conversions.h"
#include "hphp/runtime/base/string-util.h"
#include "hphp/runtime/base/builtin-functions.h"
#ifdef __APPLE__
#ifndef isnan
#define isnan(x) \
( sizeof (x) == sizeof(float ) ? __inline_isnanf((float)(x)) \
: sizeof (x) == sizeof(double) ? __inline_isnand((double)(x)) \
: __inline_isnan ((long double)(x)))
#endif
#ifndef isinf
#define isinf(x) \
( sizeof (x) == sizeof(float ) ? __inline_isinff((float)(x)) \
: sizeof (x) == sizeof(double) ? __inline_isinfd((double)(x)) \
: __inline_isinf ((long double)(x)))
#endif
#endif
#define PHP_QPRINT_MAXL 75
namespace HPHP {
///////////////////////////////////////////////////////////////////////////////
// helpers
bool string_substr_check(int len, int &f, int &l) {
if (l < 0 && -l > len) {
return false;
} else if (l > len) {
l = len;
}
if (f > len) {
return false;
} else if (f < 0 && -f > len) {
f = 0;
}
if (l < 0 && (l + len - f) < 0) {
return false;
}
// if "from" position is negative, count start position from the end
if (f < 0) {
f += len;
if (f < 0) {
f = 0;
}
}
if (f >= len) {
return false;
}
// if "length" position is negative, set it to the length
// needed to stop that many chars from the end of the string
if (l < 0) {
l += len - f;
if (l < 0) {
l = 0;
}
}
if ((unsigned int)f + (unsigned int)l > (unsigned int)len) {
l = len - f;
}
return true;
}
void string_charmask(const char *sinput, int len, char *mask) {
const unsigned char *input = (unsigned char *)sinput;
const unsigned char *end;
unsigned char c;
memset(mask, 0, 256);
for (end = input+len; input < end; input++) {
c=*input;
if ((input+3 < end) && input[1] == '.' && input[2] == '.'
&& input[3] >= c) {
memset(mask+c, 1, input[3] - c + 1);
input+=3;
} else if ((input+1 < end) && input[0] == '.' && input[1] == '.') {
/* Error, try to be as helpful as possible:
(a range ending/starting with '.' won't be captured here) */
if (end-len >= input) { /* there was no 'left' char */
throw_invalid_argument
("charlist: Invalid '..'-range, missing left of '..'");
continue;
}
if (input+2 >= end) { /* there is no 'right' char */
throw_invalid_argument
("charlist: Invalid '..'-range, missing right of '..'");
continue;
}
if (input[-1] > input[2]) { /* wrong order */
throw_invalid_argument
("charlist: '..'-range needs to be incrementing");
continue;
}
/* FIXME: better error (a..b..c is the only left possibility?) */
throw_invalid_argument("charlist: Invalid '..'-range");
continue;
} else {
mask[c]=1;
}
}
}
int string_copy(char *dst, const char *src, int siz) {
register char *d = dst;
register const char *s = src;
register size_t n = siz;
/* Copy as many bytes as will fit */
if (n != 0 && --n != 0) {
do {
if ((*d++ = *s++) == 0)
break;
} while (--n != 0);
}
/* Not enough room in dst, add NUL and traverse rest of src */
if (n == 0) {
if (siz != 0)
*d = '\0'; /* NUL-terminate dst */
while (*s++)
;
}
return(s - src - 1); /* count does not include NUL */
}
///////////////////////////////////////////////////////////////////////////////
// comparisons
int string_ncmp(const char *s1, const char *s2, int len) {
for (int i = 0; i < len; i++) {
char c1 = s1[i];
char c2 = s2[i];
if (c1 > c2) return 1;
if (c1 < c2) return -1;
}
return 0;
}
static int compare_right(char const **a, char const *aend,
char const **b, char const *bend) {
int bias = 0;
/* The longest run of digits wins. That aside, the greatest
value wins, but we can't know that it will until we've scanned
both numbers to know that they have the same magnitude, so we
remember it in BIAS. */
for(;; (*a)++, (*b)++) {
if ((*a == aend || !isdigit((int)(unsigned char)**a)) &&
(*b == bend || !isdigit((int)(unsigned char)**b)))
return bias;
else if (*a == aend || !isdigit((int)(unsigned char)**a))
return -1;
else if (*b == bend || !isdigit((int)(unsigned char)**b))
return +1;
else if (**a < **b) {
if (!bias)
bias = -1;
} else if (**a > **b) {
if (!bias)
bias = +1;
}
}
return 0;
}
static int compare_left(char const **a, char const *aend,
char const **b, char const *bend) {
/* Compare two left-aligned numbers: the first to have a
different value wins. */
for(;; (*a)++, (*b)++) {
if ((*a == aend || !isdigit((int)(unsigned char)**a)) &&
(*b == bend || !isdigit((int)(unsigned char)**b)))
return 0;
else if (*a == aend || !isdigit((int)(unsigned char)**a))
return -1;
else if (*b == bend || !isdigit((int)(unsigned char)**b))
return +1;
else if (**a < **b)
return -1;
else if (**a > **b)
return +1;
}
return 0;
}
int string_natural_cmp(char const *a, size_t a_len,
char const *b, size_t b_len, int fold_case) {
char ca, cb;
char const *ap, *bp;
char const *aend = a + a_len, *bend = b + b_len;
int fractional, result;
if (a_len == 0 || b_len == 0)
return a_len - b_len;
ap = a;
bp = b;
while (1) {
ca = *ap; cb = *bp;
/* skip over leading spaces or zeros */
while (isspace((int)(unsigned char)ca))
ca = *++ap;
while (isspace((int)(unsigned char)cb))
cb = *++bp;
/* process run of digits */
if (isdigit((int)(unsigned char)ca) && isdigit((int)(unsigned char)cb)) {
fractional = (ca == '0' || cb == '0');
if (fractional)
result = compare_left(&ap, aend, &bp, bend);
else
result = compare_right(&ap, aend, &bp, bend);
if (result != 0)
return result;
else if (ap == aend && bp == bend)
/* End of the strings. Let caller sort them out. */
return 0;
else {
/* Keep on comparing from the current point. */
ca = *ap; cb = *bp;
}
}
if (fold_case) {
ca = toupper((int)(unsigned char)ca);
cb = toupper((int)(unsigned char)cb);
}
if (ca < cb)
return -1;
else if (ca > cb)
return +1;
++ap; ++bp;
if (ap >= aend && bp >= bend)
/* The strings compare the same. Perhaps the caller
will want to call strcmp to break the tie. */
return 0;
else if (ap >= aend)
return -1;
else if (bp >= bend)
return 1;
}
}
///////////////////////////////////////////////////////////////////////////////
void string_to_case(String& s, int (*tocase)(int)) {
assert(!s.isNull());
assert(tocase);
auto data = s.bufferSlice().ptr;
auto len = s.size();
for (int i = 0; i < len; i++) {
data[i] = tocase(data[i]);
}
}
///////////////////////////////////////////////////////////////////////////////
#define STR_PAD_LEFT 0
#define STR_PAD_RIGHT 1
#define STR_PAD_BOTH 2
String string_pad(const char *input, int len, int pad_length,
const char *pad_string, int pad_str_len,
int pad_type) {
assert(input);
int num_pad_chars = pad_length - len;
/* If resulting string turns out to be shorter than input string,
we simply copy the input and return. */
if (pad_length < 0 || num_pad_chars < 0) {
return String(input, len, CopyString);
}
/* Setup the padding string values if specified. */
if (pad_str_len == 0) {
throw_invalid_argument("pad_string: (empty)");
return String();
}
String ret(pad_length, ReserveString);
char *result = ret.bufferSlice().ptr;
/* We need to figure out the left/right padding lengths. */
int left_pad, right_pad;
switch (pad_type) {
case STR_PAD_RIGHT:
left_pad = 0;
right_pad = num_pad_chars;
break;
case STR_PAD_LEFT:
left_pad = num_pad_chars;
right_pad = 0;
break;
case STR_PAD_BOTH:
left_pad = num_pad_chars / 2;
right_pad = num_pad_chars - left_pad;
break;
default:
throw_invalid_argument("pad_type: %d", pad_type);
return String();
}
/* First we pad on the left. */
int result_len = 0;
for (int i = 0; i < left_pad; i++) {
result[result_len++] = pad_string[i % pad_str_len];
}
/* Then we copy the input string. */
memcpy(result + result_len, input, len);
result_len += len;
/* Finally, we pad on the right. */
for (int i = 0; i < right_pad; i++) {
result[result_len++] = pad_string[i % pad_str_len];
}
ret.setSize(result_len);
return ret;
}
///////////////////////////////////////////////////////////////////////////////
int string_find(const char *input, int len, char ch, int pos,
bool case_sensitive) {
assert(input);
if (pos < 0 || pos > len) {
return -1;
}
const void *ptr;
if (case_sensitive) {
ptr = memchr(input + pos, ch, len - pos);
} else {
ptr = bstrcasechr(input + pos, ch, len - pos);
}
if (ptr != nullptr) {
return (int)((const char *)ptr - input);
}
return -1;
}
int string_rfind(const char *input, int len, char ch, int pos,
bool case_sensitive) {
assert(input);
if (pos < -len || pos > len) {
return -1;
}
const void *ptr;
if (case_sensitive) {
if (pos >= 0) {
ptr = memrchr(input + pos, ch, len - pos);
} else {
ptr = memrchr(input, ch, len + pos + 1);
}
} else {
if (pos >= 0) {
ptr = bstrrcasechr(input + pos, ch, len - pos);
} else {
ptr = bstrrcasechr(input, ch, len + pos + 1);
}
}
if (ptr != nullptr) {
return (int)((const char *)ptr - input);
}
return -1;
}
int string_find(const char *input, int len, const char *s, int s_len,
int pos, bool case_sensitive) {
assert(input);
assert(s);
if (!s_len || pos < 0 || pos > len) {
return -1;
}
void *ptr;
if (case_sensitive) {
ptr = (void*)string_memnstr(input + pos, s, s_len, input + len);
} else {
ptr = bstrcasestr(input + pos, len - pos, s, s_len);
}
if (ptr != nullptr) {
return (int)((const char *)ptr - input);
}
return -1;
}
int string_rfind(const char *input, int len, const char *s, int s_len,
int pos, bool case_sensitive) {
assert(input);
assert(s);
if (!s_len || pos < -len || pos > len) {
return -1;
}
void *ptr;
if (case_sensitive) {
if (pos >= 0) {
ptr = bstrrstr(input + pos, len - pos, s, s_len);
} else {
ptr = bstrrstr(input, len + pos + s_len, s, s_len);
}
} else {
if (pos >= 0) {
ptr = bstrrcasestr(input + pos, len - pos, s, s_len);
} else {
ptr = bstrrcasestr(input, len + pos + s_len, s, s_len);
}
}
if (ptr != nullptr) {
return (int)((const char *)ptr - input);
}
return -1;
}
const char *string_memnstr(const char *haystack, const char *needle,
int needle_len, const char *end) {
const char *p = haystack;
char ne = needle[needle_len-1];
end -= needle_len;
while (p <= end) {
if ((p = (char *)memchr(p, *needle, (end-p+1))) && ne == p[needle_len-1]) {
if (!memcmp(needle, p, needle_len-1)) {
return p;
}
}
if (p == nullptr) {
return nullptr;
}
p++;
}
return nullptr;
}
String string_replace(const char *s, int len, int start, int length,
const char *replacement, int len_repl) {
assert(s);
assert(replacement);
assert(len >= 0);
// if "start" position is negative, count start position from the end
// of the string
if (start < 0) {
start = len + start;
if (start < 0) {
start = 0;
}
}
if (start > len) {
start = len;
}
// if "length" position is negative, set it to the length
// needed to stop that many chars from the end of the string
if (length < 0) {
length = (len - start) + length;
if (length < 0) {
length = 0;
}
}
// check if length is too large
if (length > len) {
length = len;
}
// check if the length is too large adjusting for non-zero start
// Write this way instead of start + length > len to avoid overflow
if (length > len - start) {
length = len - start;
}
String retString(len + len_repl - length, ReserveString);
char *ret = retString.bufferSlice().ptr;
int ret_len = 0;
if (start) {
memcpy(ret, s, start);
ret_len += start;
}
if (len_repl) {
memcpy(ret + ret_len, replacement, len_repl);
ret_len += len_repl;
}
len -= (start + length);
if (len) {
memcpy(ret + ret_len, s + start + length, len);
ret_len += len;
}
retString.setSize(ret_len);
return retString;
}
String string_replace(const char *input, int len,
const char *search, int len_search,
const char *replacement, int len_replace,
int &count, bool case_sensitive) {
assert(input);
assert(search && len_search);
assert(len >= 0);
assert(len_search >= 0);
assert(len_replace >= 0);
if (len == 0) {
return String();
}
smart::vector<int> founds;
founds.reserve(16);
if (len_search == 1) {
for (int pos = string_find(input, len, *search, 0, case_sensitive);
pos >= 0;
pos = string_find(input, len, *search, pos + len_search,
case_sensitive)) {
founds.push_back(pos);
}
} else {
for (int pos = string_find(input, len, search, len_search, 0,
case_sensitive);
pos >= 0;
pos = string_find(input, len, search, len_search,
pos + len_search, case_sensitive)) {
founds.push_back(pos);
}
}
count = founds.size();
if (count == 0) {
return String(); // not found
}
int reserve;
// Make sure the new size of the string wouldn't overflow int32_t. Don't
// bother if the replacement wouldn't make the string longer.
if (len_replace > len_search) {
auto raise = [&] { raise_error("String too large"); };
if (mul_overflow(len_replace - len_search, count)) {
raise();
}
int diff = (len_replace - len_search) * count;
if (add_overflow(len, diff)) {
raise();
}
reserve = len + diff;
} else {
reserve = len + (len_replace - len_search) * count;
}
String retString(reserve, ReserveString);
char *ret = retString.bufferSlice().ptr;
char *p = ret;
int pos = 0; // last position in input that hasn't been copied over yet
int n;
for (unsigned int i = 0; i < founds.size(); i++) {
n = founds[i];
if (n > pos) {
n -= pos;
memcpy(p, input, n);
p += n;
input += n;
pos += n;
}
if (len_replace) {
memcpy(p, replacement, len_replace);
p += len_replace;
}
input += len_search;
pos += len_search;
}
n = len;
if (n > pos) {
n -= pos;
memcpy(p, input, n);
p += n;
}
retString.setSize(p - ret);
return retString;
}
///////////////////////////////////////////////////////////////////////////////
String string_chunk_split(const char *src, int srclen, const char *end,
int endlen, int chunklen) {
int chunks = srclen / chunklen; // complete chunks!
int restlen = srclen - chunks * chunklen; /* srclen % chunklen */
int out_len = (chunks + 1) * endlen + srclen;
String ret(out_len, ReserveString);
char *dest = ret.bufferSlice().ptr;
const char *p; char *q;
const char *pMax = src + srclen - chunklen + 1;
for (p = src, q = dest; p < pMax; ) {
memcpy(q, p, chunklen);
q += chunklen;
memcpy(q, end, endlen);
q += endlen;
p += chunklen;
}
if (restlen) {
memcpy(q, p, restlen);
q += restlen;
memcpy(q, end, endlen);
q += endlen;
}
ret.setSize(q - dest);
return ret;
}
///////////////////////////////////////////////////////////////////////////////
#define PHP_TAG_BUF_SIZE 1023
/**
* Check if tag is in a set of tags
*
* states:
*
* 0 start tag
* 1 first non-whitespace char seen
*/
static int string_tag_find(const char *tag, int len, const char *set) {
char c, *n;
const char *t;
int state=0, done=0;
char *norm;
if (len <= 0) {
return 0;
}
norm = (char *)smart_malloc(len+1);
n = norm;
t = tag;
c = tolower(*t);
/*
normalize the tag removing leading and trailing whitespace
and turn any <a whatever...> into just <a> and any </tag>
into <tag>
*/
while (!done) {
switch (c) {
case '<':
*(n++) = c;
break;
case '>':
done =1;
break;
default:
if (!isspace((int)c)) {
if (state == 0) {
state=1;
}
if (c != '/') {
*(n++) = c;
}
} else {
if (state == 1)
done=1;
}
break;
}
c = tolower(*(++t));
}
*(n++) = '>';
*n = '\0';
if (strstr(set, norm)) {
done=1;
} else {
done=0;
}
smart_free(norm);
return done;
}
/**
* A simple little state-machine to strip out html and php tags
*
* State 0 is the output state, State 1 means we are inside a
* normal html tag and state 2 means we are inside a php tag.
*
* The state variable is passed in to allow a function like fgetss
* to maintain state across calls to the function.
*
* lc holds the last significant character read and br is a bracket
* counter.
*
* When an allow string is passed in we keep track of the string
* in state 1 and when the tag is closed check it against the
* allow string to see if we should allow it.
* swm: Added ability to strip <?xml tags without assuming it PHP
* code.
*/
String string_strip_tags(const char *s, const int len,
const char *allow, const int allow_len,
bool allow_tag_spaces) {
const char *abuf, *p;
char *rbuf, *tbuf, *tp, *rp, c, lc;
int br, i=0, depth=0, in_q = 0;
int state = 0, pos;
assert(s);
assert(allow);
String retString(s, len, CopyString);
rbuf = retString.bufferSlice().ptr;
String allowString;
c = *s;
lc = '\0';
p = s;
rp = rbuf;
br = 0;
if (allow_len) {
assert(allow);
allowString = String(allow_len, ReserveString);
char *atmp = allowString.bufferSlice().ptr;
for (const char *tmp = allow; *tmp; tmp++, atmp++) {
*atmp = tolower((int)*(const unsigned char *)tmp);
}
allowString.setSize(allow_len);
abuf = allowString.data();
tbuf = (char *)smart_malloc(PHP_TAG_BUF_SIZE+1);
tp = tbuf;
} else {
abuf = nullptr;
tbuf = tp = nullptr;
}
auto move = [&pos, &tbuf, &tp]() {
if (tp - tbuf >= PHP_TAG_BUF_SIZE) {
pos = tp - tbuf;
tbuf = (char*)smart_realloc(tbuf, (tp - tbuf) + PHP_TAG_BUF_SIZE + 1);
tp = tbuf + pos;
}
};
while (i < len) {
switch (c) {
case '\0':
break;
case '<':
if (isspace(*(p + 1)) && !allow_tag_spaces) {
goto reg_char;
}
if (state == 0) {
lc = '<';
state = 1;
if (allow_len) {
move();
*(tp++) = '<';
}
} else if (state == 1) {
depth++;
}
break;
case '(':
if (state == 2) {
if (lc != '"' && lc != '\'') {
lc = '(';
br++;
}
} else if (allow_len && state == 1) {
move();
*(tp++) = c;
} else if (state == 0) {
*(rp++) = c;
}
break;
case ')':
if (state == 2) {
if (lc != '"' && lc != '\'') {
lc = ')';
br--;
}
} else if (allow_len && state == 1) {
move();
*(tp++) = c;
} else if (state == 0) {
*(rp++) = c;
}
break;
case '>':
if (depth) {
depth--;
break;
}
if (in_q) {
break;
}
switch (state) {
case 1: /* HTML/XML */
lc = '>';
in_q = state = 0;
if (allow_len) {
move();
*(tp++) = '>';
*tp='\0';
if (string_tag_find(tbuf, tp-tbuf, abuf)) {
memcpy(rp, tbuf, tp-tbuf);
rp += tp-tbuf;
}
tp = tbuf;
}
break;
case 2: /* PHP */
if (!br && lc != '\"' && *(p-1) == '?') {
in_q = state = 0;
tp = tbuf;
}
break;
case 3:
in_q = state = 0;
tp = tbuf;
break;
case 4: /* JavaScript/CSS/etc... */
if (p >= s + 2 && *(p-1) == '-' && *(p-2) == '-') {
in_q = state = 0;
tp = tbuf;
}
break;
default:
*(rp++) = c;
break;
}
break;
case '"':
case '\'':
if (state == 4) {
/* Inside <!-- comment --> */
break;
} else if (state == 2 && *(p-1) != '\\') {
if (lc == c) {
lc = '\0';
} else if (lc != '\\') {
lc = c;
}
} else if (state == 0) {
*(rp++) = c;
} else if (allow_len && state == 1) {
move();
*(tp++) = c;
}
if (state && p != s && *(p-1) != '\\' && (!in_q || *p == in_q)) {
if (in_q) {
in_q = 0;
} else {
in_q = *p;
}
}
break;
case '!':
/* JavaScript & Other HTML scripting languages */
if (state == 1 && *(p-1) == '<') {
state = 3;
lc = c;
} else {
if (state == 0) {
*(rp++) = c;
} else if (allow_len && state == 1) {
move();
*(tp++) = c;
}
}
break;
case '-':
if (state == 3 && p >= s + 2 && *(p-1) == '-' && *(p-2) == '!') {
state = 4;
} else {
goto reg_char;
}
break;
case '?':
if (state == 1 && *(p-1) == '<') {
br=0;
state=2;
break;
}
case 'E':
case 'e':
/* !DOCTYPE exception */
if (state==3 && p > s+6
&& tolower(*(p-1)) == 'p'
&& tolower(*(p-2)) == 'y'
&& tolower(*(p-3)) == 't'
&& tolower(*(p-4)) == 'c'
&& tolower(*(p-5)) == 'o'
&& tolower(*(p-6)) == 'd') {
state = 1;
break;
}
/* fall-through */
case 'l':
/* swm: If we encounter '<?xml' then we shouldn't be in
* state == 2 (PHP). Switch back to HTML.
*/
if (state == 2 && p > s+2 && *(p-1) == 'm' && *(p-2) == 'x') {
state = 1;
break;
}
/* fall-through */
default:
reg_char:
if (state == 0) {
*(rp++) = c;
} else if (allow_len && state == 1) {
move();
*(tp++) = c;
}
break;
}
c = *(++p);
i++;
}
if (rp < rbuf + len) {
*rp = '\0';
}
if (allow_len) {
smart_free(tbuf);
}
retString.setSize(rp - rbuf);
return retString;
}
///////////////////////////////////////////////////////////////////////////////
String string_addslashes(const char *str, int length) {
assert(str);
if (length == 0) {
return String();
}
String retString((length << 1) + 1, ReserveString);
char *new_str = retString.bufferSlice().ptr;
const char *source = str;
const char *end = source + length;
char *target = new_str;
while (source < end) {
switch (*source) {
case '\0':
*target++ = '\\';
*target++ = '0';
break;
case '\'':
case '\"':
case '\\':
*target++ = '\\';
/* break is missing *intentionally* */
default:
*target++ = *source;
break;
}
source++;
}
retString.setSize(target - new_str);
return retString;
}
///////////////////////////////////////////////////////////////////////////////
static char string_hex2int(int c) {
if (isdigit(c)) {
return c - '0';
}
if (c >= 'A' && c <= 'F') {
return c - 'A' + 10;
}
if (c >= 'a' && c <= 'f') {
return c - 'a' + 10;
}
return -1;
}
String string_quoted_printable_encode(const char *input, int len) {
size_t length = len;
const unsigned char *str = (unsigned char*)input;
unsigned long lp = 0;
unsigned char c;
char *d, *buffer;
char *hex = "0123456789ABCDEF";
String ret(
safe_address(
3,
length + ((safe_address(3, length, 0)/(PHP_QPRINT_MAXL-9)) + 1),
1),
ReserveString
);
d = buffer = ret.bufferSlice().ptr;
while (length--) {
if (((c = *str++) == '\015') && (*str == '\012') && length > 0) {
*d++ = '\015';
*d++ = *str++;
length--;
lp = 0;
} else {
if (iscntrl (c) || (c == 0x7f) || (c & 0x80) ||
(c == '=') || ((c == ' ') && (*str == '\015'))) {
if ((((lp+= 3) > PHP_QPRINT_MAXL) && (c <= 0x7f))
|| ((c > 0x7f) && (c <= 0xdf) && ((lp + 3) > PHP_QPRINT_MAXL))
|| ((c > 0xdf) && (c <= 0xef) && ((lp + 6) > PHP_QPRINT_MAXL))
|| ((c > 0xef) && (c <= 0xf4) && ((lp + 9) > PHP_QPRINT_MAXL))) {
*d++ = '=';
*d++ = '\015';
*d++ = '\012';
lp = 3;
}
*d++ = '=';
*d++ = hex[c >> 4];
*d++ = hex[c & 0xf];
} else {
if ((++lp) > PHP_QPRINT_MAXL) {
*d++ = '=';
*d++ = '\015';
*d++ = '\012';
lp = 1;
}
*d++ = c;
}
}
}
len = d - buffer;
ret.setSize(len);
return ret;
}
String string_quoted_printable_decode(const char *input, int len, bool is_q) {
assert(input);
if (len == 0) {
return String();
}
int i = 0, j = 0, k;
const char *str_in = input;
String ret(len, ReserveString);
char *str_out = ret.bufferSlice().ptr;
while (i < len && str_in[i]) {
switch (str_in[i]) {
case '=':
if (i + 2 < len && str_in[i + 1] && str_in[i + 2] &&
isxdigit((int) str_in[i + 1]) && isxdigit((int) str_in[i + 2]))
{
str_out[j++] = (string_hex2int((int) str_in[i + 1]) << 4)
+ string_hex2int((int) str_in[i + 2]);
i += 3;
} else /* check for soft line break according to RFC 2045*/ {
k = 1;
while (str_in[i + k] &&
((str_in[i + k] == 32) || (str_in[i + k] == 9))) {
/* Possibly, skip spaces/tabs at the end of line */
k++;
}
if (!str_in[i + k]) {
/* End of line reached */
i += k;
}
else if ((str_in[i + k] == 13) && (str_in[i + k + 1] == 10)) {
/* CRLF */
i += k + 2;
}
else if ((str_in[i + k] == 13) || (str_in[i + k] == 10)) {
/* CR or LF */
i += k + 1;
}
else {
str_out[j++] = str_in[i++];
}
}
break;
case '_':
if (is_q) {
str_out[j++] = ' ';
i++;
} else {
str_out[j++] = str_in[i++];
}
break;
default:
str_out[j++] = str_in[i++];
}
}
ret.setSize(j);
return ret;
}
Variant string_base_to_numeric(const char *s, int len, int base) {
int64_t num = 0;
double fnum = 0;
int mode = 0;
int64_t cutoff;
int cutlim;
assert(string_validate_base(base));
cutoff = LONG_MAX / base;
cutlim = LONG_MAX % base;
for (int i = len; i > 0; i--) {
char c = *s++;
/* might not work for EBCDIC */
if (c >= '0' && c <= '9')
c -= '0';
else if (c >= 'A' && c <= 'Z')
c -= 'A' - 10;
else if (c >= 'a' && c <= 'z')
c -= 'a' - 10;
else
continue;
if (c >= base)
continue;
switch (mode) {
case 0: /* Integer */
if (num < cutoff || (num == cutoff && c <= cutlim)) {
num = num * base + c;
break;
} else {
fnum = num;
mode = 1;
}
/* fall-through */
case 1: /* Float */
fnum = fnum * base + c;
}
}
if (mode == 1) {
return fnum;
}
return num;
}
String string_long_to_base(unsigned long value, int base) {
static char digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
char buf[(sizeof(unsigned long) << 3) + 1];
char *ptr, *end;
assert(string_validate_base(base));
end = ptr = buf + sizeof(buf) - 1;
do {
*--ptr = digits[value % base];
value /= base;
} while (ptr > buf && value);
return String(ptr, end - ptr, CopyString);
}
String string_numeric_to_base(const Variant& value, int base) {
static char digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
assert(string_validate_base(base));
if ((!value.isInteger() && !value.isDouble())) {
return empty_string();
}
if (value.isDouble()) {
double fvalue = floor(value.toDouble()); /* floor it just in case */
char *ptr, *end;
char buf[(sizeof(double) << 3) + 1];
/* Don't try to convert +/- infinity */
if (fvalue == HUGE_VAL || fvalue == -HUGE_VAL) {
raise_warning("Number too large");
return empty_string();
}
end = ptr = buf + sizeof(buf) - 1;
do {
*--ptr = digits[(int) fmod(fvalue, base)];
fvalue /= base;
} while (ptr > buf && fabs(fvalue) >= 1);
return String(ptr, end - ptr, CopyString);
}
return string_long_to_base(value.toInt64(), base);
}
///////////////////////////////////////////////////////////////////////////////
// uuencode
#define PHP_UU_ENC(c) \
((c) ? ((c) & 077) + ' ' : '`')
#define PHP_UU_ENC_C2(c) \
PHP_UU_ENC(((*(c) << 4) & 060) | ((*((c) + 1) >> 4) & 017))
#define PHP_UU_ENC_C3(c) \
PHP_UU_ENC(((*(c + 1) << 2) & 074) | ((*((c) + 2) >> 6) & 03))
#define PHP_UU_DEC(c) \
(((c) - ' ') & 077)
String string_uuencode(const char *src, int src_len) {
assert(src);
assert(src_len);
int len = 45;
char *p;
const char *s, *e, *ee;
char *dest;
/* encoded length is ~ 38% greater then the original */
String ret((int)ceil(src_len * 1.38) + 45, ReserveString);
p = dest = ret.bufferSlice().ptr;
s = src;
e = src + src_len;
while ((s + 3) < e) {
ee = s + len;
if (ee > e) {
ee = e;
len = ee - s;
if (len % 3) {
ee = s + (int) (floor(len / 3) * 3);
}
}
*p++ = PHP_UU_ENC(len);
while (s < ee) {
*p++ = PHP_UU_ENC(*s >> 2);
*p++ = PHP_UU_ENC_C2(s);
*p++ = PHP_UU_ENC_C3(s);
*p++ = PHP_UU_ENC(*(s + 2) & 077);
s += 3;
}
if (len == 45) {
*p++ = '\n';
}
}
if (s < e) {
if (len == 45) {
*p++ = PHP_UU_ENC(e - s);
len = 0;
}
*p++ = PHP_UU_ENC(*s >> 2);
*p++ = PHP_UU_ENC_C2(s);
*p++ = ((e - s) > 1) ? PHP_UU_ENC_C3(s) : PHP_UU_ENC('\0');
*p++ = ((e - s) > 2) ? PHP_UU_ENC(*(s + 2) & 077) : PHP_UU_ENC('\0');
}
if (len < 45) {
*p++ = '\n';
}
*p++ = PHP_UU_ENC('\0');
*p++ = '\n';
*p = '\0';
ret.setSize(p - dest);
return ret;
}
String string_uudecode(const char *src, int src_len) {
int total_len = 0;
int len;
const char *s, *e, *ee;
char *p, *dest;
String ret(ceil(src_len * 0.75), ReserveString);
p = dest = ret.bufferSlice().ptr;
s = src;
e = src + src_len;
while (s < e) {
if ((len = PHP_UU_DEC(*s++)) <= 0) {
break;
}
/* sanity check */
if (len > src_len) {
goto err;
}
total_len += len;
ee = s + (len == 45 ? 60 : (int) floor(len * 1.33));
/* sanity check */
if (ee > e) {
goto err;
}
while (s < ee) {
if (s + 4 > e) goto err;
*p++ = PHP_UU_DEC(*s) << 2 | PHP_UU_DEC(*(s + 1)) >> 4;
*p++ = PHP_UU_DEC(*(s + 1)) << 4 | PHP_UU_DEC(*(s + 2)) >> 2;
*p++ = PHP_UU_DEC(*(s + 2)) << 6 | PHP_UU_DEC(*(s + 3));
s += 4;
}
if (len < 45) {
break;
}
/* skip \n */
s++;
}
if ((len = total_len > (p - dest))) {
*p++ = PHP_UU_DEC(*s) << 2 | PHP_UU_DEC(*(s + 1)) >> 4;
if (len > 1) {
*p++ = PHP_UU_DEC(*(s + 1)) << 4 | PHP_UU_DEC(*(s + 2)) >> 2;
if (len > 2) {
*p++ = PHP_UU_DEC(*(s + 2)) << 6 | PHP_UU_DEC(*(s + 3));
}
}
}
ret.setSize(total_len);
return ret;
err:
return String();
}
///////////////////////////////////////////////////////////////////////////////
// base64
static const char base64_table[] = {
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/', '\0'
};
static const char base64_pad = '=';
static const short base64_reverse_table[256] = {
-2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -2, -2, -1, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, 62, -2, -2, -2, 63,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -2, -2, -2, -2, -2, -2,
-2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -2, -2, -2, -2, -2,
-2, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2
};
static String php_base64_encode(const unsigned char *str, int length) {
const unsigned char *current = str;
unsigned char *p;
unsigned char *result;
if ((length + 2) < 0 || ((length + 2) / 3) >= (1 << (sizeof(int) * 8 - 2))) {
return String();
}
String ret(((length + 2) / 3) * 4, ReserveString);
p = result = (unsigned char *)ret.bufferSlice().ptr;
while (length > 2) { /* keep going until we have less than 24 bits */
*p++ = base64_table[current[0] >> 2];
*p++ = base64_table[((current[0] & 0x03) << 4) + (current[1] >> 4)];
*p++ = base64_table[((current[1] & 0x0f) << 2) + (current[2] >> 6)];
*p++ = base64_table[current[2] & 0x3f];
current += 3;
length -= 3; /* we just handle 3 octets of data */
}
/* now deal with the tail end of things */
if (length != 0) {
*p++ = base64_table[current[0] >> 2];
if (length > 1) {
*p++ = base64_table[((current[0] & 0x03) << 4) + (current[1] >> 4)];
*p++ = base64_table[(current[1] & 0x0f) << 2];
*p++ = base64_pad;
} else {
*p++ = base64_table[(current[0] & 0x03) << 4];
*p++ = base64_pad;
*p++ = base64_pad;
}
}
ret.setSize(p - result);
return ret;
}
static String php_base64_decode(const char *str, int length, bool strict) {
const unsigned char *current = (unsigned char*)str;
int ch, i = 0, j = 0, k;
/* this sucks for threaded environments */
String retString(length, ReserveString);
unsigned char* result = (unsigned char*)retString.bufferSlice().ptr;
/* run through the whole string, converting as we go */
while ((ch = *current++) != '\0' && length-- > 0) {
if (ch == base64_pad) {
if (*current != '=' && ((i % 4) == 1 || (strict && length > 0))) {
if ((i % 4) != 1) {
while (isspace(*(++current))) {
continue;
}
if (*current == '\0') {
continue;
}
}
return String();
}
continue;
}
ch = base64_reverse_table[ch];
if ((!strict && ch < 0) || ch == -1) {
/* a space or some other separator character, we simply skip over */
continue;
} else if (ch == -2) {
return String();
}
switch(i % 4) {
case 0:
result[j] = ch << 2;
break;
case 1:
result[j++] |= ch >> 4;
result[j] = (ch & 0x0f) << 4;
break;
case 2:
result[j++] |= ch >>2;
result[j] = (ch & 0x03) << 6;
break;
case 3:
result[j++] |= ch;
break;
}
i++;
}
k = j;
/* mop things up if we ended on a boundary */
if (ch == base64_pad) {
switch(i % 4) {
case 1:
return String();
case 2:
k++;
case 3:
result[k] = 0;
}
}
retString.setSize(j);
return retString;
}
String string_base64_encode(const char *input, int len) {
return php_base64_encode((unsigned char *)input, len);
}
String string_base64_decode(const char *input, int len, bool strict) {
return php_base64_decode(input, len, strict);
}
///////////////////////////////////////////////////////////////////////////////
String string_escape_shell_arg(const char *str) {
int x, y, l;
char *cmd;
y = 0;
l = strlen(str);
String ret(safe_address(l, 4, 3), ReserveString); /* worst case */
cmd = ret.bufferSlice().ptr;
cmd[y++] = '\'';
for (x = 0; x < l; x++) {
switch (str[x]) {
case '\'':
cmd[y++] = '\'';
cmd[y++] = '\\';
cmd[y++] = '\'';
/* fall-through */
default:
cmd[y++] = str[x];
}
}
cmd[y++] = '\'';
ret.setSize(y);
return ret;
}
String string_escape_shell_cmd(const char *str) {
register int x, y, l;
char *cmd;
char *p = nullptr;
l = strlen(str);
String ret(safe_address(l, 2, 1), ReserveString);
cmd = ret.bufferSlice().ptr;
for (x = 0, y = 0; x < l; x++) {
switch (str[x]) {
case '"':
case '\'':
if (!p && (p = (char *)memchr(str + x + 1, str[x], l - x - 1))) {
/* noop */
} else if (p && *p == str[x]) {
p = nullptr;
} else {
cmd[y++] = '\\';
}
cmd[y++] = str[x];
break;
case '#': /* This is character-set independent */
case '&':
case ';':
case '`':
case '|':
case '*':
case '?':
case '~':
case '<':
case '>':
case '^':
case '(':
case ')':
case '[':
case ']':
case '{':
case '}':
case '$':
case '\\':
case '\x0A': /* excluding these two */
case '\xFF':
cmd[y++] = '\\';
/* fall-through */
default:
cmd[y++] = str[x];
}
}
ret.setSize(y);
return ret;
}
///////////////////////////////////////////////////////////////////////////////
static void string_similar_str(const char *txt1, int len1,
const char *txt2, int len2,
int *pos1, int *pos2, int *max) {
const char *p, *q;
const char *end1 = txt1 + len1;
const char *end2 = txt2 + len2;
int l;
*max = 0;
for (p = txt1; p < end1; p++) {
for (q = txt2; q < end2; q++) {
for (l = 0; (p + l < end1) && (q + l < end2) && (p[l] == q[l]); l++);
if (l > *max) {
*max = l;
*pos1 = p - txt1;
*pos2 = q - txt2;
}
}
}
}
static int string_similar_char(const char *txt1, int len1,
const char *txt2, int len2) {
int sum;
int pos1 = 0, pos2 = 0, max;
string_similar_str(txt1, len1, txt2, len2, &pos1, &pos2, &max);
if ((sum = max)) {
if (pos1 && pos2) {
sum += string_similar_char(txt1, pos1, txt2, pos2);
}
if ((pos1 + max < len1) && (pos2 + max < len2)) {
sum += string_similar_char(txt1 + pos1 + max, len1 - pos1 - max,
txt2 + pos2 + max, len2 - pos2 - max);
}
}
return sum;
}
int string_similar_text(const char *t1, int len1,
const char *t2, int len2, float *percent) {
if (len1 == 0 && len2 == 0) {
if (percent) *percent = 0.0;
return 0;
}
int sim = string_similar_char(t1, len1, t2, len2);
if (percent) *percent = sim * 200.0 / (len1 + len2);
return sim;
}
///////////////////////////////////////////////////////////////////////////////
#define LEVENSHTEIN_MAX_LENTH 255
// reference implementation, only optimized for memory usage, not speed
int string_levenshtein(const char *s1, int l1, const char *s2, int l2,
int cost_ins, int cost_rep, int cost_del ) {
int *p1, *p2, *tmp;
int i1, i2, c0, c1, c2;
if (l1==0) return l2*cost_ins;
if (l2==0) return l1*cost_del;
if ((l1>LEVENSHTEIN_MAX_LENTH)||(l2>LEVENSHTEIN_MAX_LENTH)) {
raise_warning("levenshtein(): Argument string(s) too long");
return -1;
}
p1 = (int*)smart_malloc((l2+1) * sizeof(int));
p2 = (int*)smart_malloc((l2+1) * sizeof(int));
for(i2=0;i2<=l2;i2++) {
p1[i2] = i2*cost_ins;
}
for(i1=0;i1<l1;i1++) {
p2[0]=p1[0]+cost_del;
for(i2=0;i2<l2;i2++) {
c0=p1[i2]+((s1[i1]==s2[i2])?0:cost_rep);
c1=p1[i2+1]+cost_del; if (c1<c0) c0=c1;
c2=p2[i2]+cost_ins; if (c2<c0) c0=c2;
p2[i2+1]=c0;
}
tmp=p1; p1=p2; p2=tmp;
}
c0=p1[l2];
smart_free(p1);
smart_free(p2);
return c0;
}
///////////////////////////////////////////////////////////////////////////////
String string_money_format(const char *format, double value) {
bool check = false;
const char *p = format;
while ((p = strchr(p, '%'))) {
if (*(p + 1) == '%') {
p += 2;
} else if (!check) {
check = true;
p++;
} else {
throw_invalid_argument
("format: Only a single %%i or %%n token can be used");
return String();
}
}
int format_len = strlen(format);
int str_len = safe_address(format_len, 1, 1024);
String ret(str_len, ReserveString);
char *str = ret.bufferSlice().ptr;
if ((str_len = strfmon(str, str_len, format, value)) < 0) {
return String();
}
ret.setSize(str_len);
return ret;
}
///////////////////////////////////////////////////////////////////////////////
String string_number_format(double d, int dec,
const String& dec_point,
const String& thousand_sep) {
char *tmpbuf = nullptr, *resbuf;
char *s, *t; /* source, target */
char *dp;
int integral;
int tmplen, reslen=0;
int count=0;
int is_negative=0;
if (d < 0) {
is_negative = 1;
d = -d;
}
if (dec < 0) dec = 0;
d = php_math_round(d, dec);
// departure from PHP: we got rid of dependencies on spprintf() here.
String tmpstr(63, ReserveString);
tmpbuf = tmpstr.bufferSlice().ptr;
snprintf(tmpbuf, 64, "%.*F", dec, d);
tmplen = strlen(tmpbuf);
if (tmpbuf == nullptr || !isdigit((int)tmpbuf[0])) {
tmpstr.setSize(tmplen);
return tmpstr;
}
/* find decimal point, if expected */
if (dec) {
dp = strpbrk(tmpbuf, ".,");
} else {
dp = nullptr;
}
/* calculate the length of the return buffer */
if (dp) {
integral = dp - tmpbuf;
} else {
/* no decimal point was found */
integral = tmplen;
}
/* allow for thousand separators */
if (!thousand_sep.empty()) {
integral += ((integral-1) / 3) * thousand_sep.size();
}
reslen = integral;
if (dec) {
reslen += dec;
if (!dec_point.empty()) {
reslen += dec_point.size();
}
}
/* add a byte for minus sign */
if (is_negative) {
reslen++;
}
String resstr(reslen, ReserveString);
resbuf = resstr.bufferSlice().ptr;
s = tmpbuf+tmplen-1;
t = resbuf+reslen-1;
/* copy the decimal places.
* Take care, as the sprintf implementation may return less places than
* we requested due to internal buffer limitations */
if (dec) {
int declen = dp ? s - dp : 0;
int topad = dec > declen ? dec - declen : 0;
/* pad with '0's */
while (topad--) {
*t-- = '0';
}
if (dp) {
s -= declen + 1; /* +1 to skip the point */
t -= declen;
/* now copy the chars after the point */
memcpy(t + 1, dp + 1, declen);
}
/* add decimal point */
if (!dec_point.empty()) {
memcpy(t + (1 - dec_point.size()), dec_point.data(), dec_point.size());
t -= dec_point.size();
}
}
/* copy the numbers before the decimal point, adding thousand
* separator every three digits */
while(s >= tmpbuf) {
*t-- = *s--;
if (thousand_sep && (++count%3)==0 && s>=tmpbuf) {
memcpy(t + (1 - thousand_sep.size()),
thousand_sep.data(),
thousand_sep.size());
t -= thousand_sep.size();
}
}
/* and a minus sign, if needed */
if (is_negative) {
*t-- = '-';
}
resstr.setSize(reslen);
return resstr;
}
///////////////////////////////////////////////////////////////////////////////
// soundex
/* Simple soundex algorithm as described by Knuth in TAOCP, vol 3 */
String string_soundex(const String& str) {
assert(!str.empty());
int _small, code, last;
String retString(4, ReserveString);
char* soundex = retString.bufferSlice().ptr;
static char soundex_table[26] = {
0, /* A */
'1', /* B */
'2', /* C */
'3', /* D */
0, /* E */
'1', /* F */
'2', /* G */
0, /* H */
0, /* I */
'2', /* J */
'2', /* K */
'4', /* L */
'5', /* M */
'5', /* N */
0, /* O */
'1', /* P */
'2', /* Q */
'6', /* R */
'2', /* S */
'3', /* T */
0, /* U */
'1', /* V */
0, /* W */
'2', /* X */
0, /* Y */
'2' /* Z */
};
/* build soundex string */
last = -1;
const char *p = str.slice().ptr;
for (_small = 0; *p && _small < 4; p++) {
/* convert chars to upper case and strip non-letter chars */
/* BUG: should also map here accented letters used in non */
/* English words or names (also found in English text!): */
/* esstsett, thorn, n-tilde, c-cedilla, s-caron, ... */
code = toupper((int)(unsigned char)(*p));
if (code >= 'A' && code <= 'Z') {
if (_small == 0) {
/* remember first valid char */
soundex[_small++] = code;
last = soundex_table[code - 'A'];
} else {
/* ignore sequences of consonants with same soundex */
/* code in trail, and vowels unless they separate */
/* consonant letters */
code = soundex_table[code - 'A'];
if (code != last) {
if (code != 0) {
soundex[_small++] = code;
}
last = code;
}
}
}
}
/* pad with '0' and terminate with 0 ;-) */
while (_small < 4) {
soundex[_small++] = '0';
}
retString.setSize(4);
return retString;
}
///////////////////////////////////////////////////////////////////////////////
// metaphone
/**
* this is now the original code by Michael G Schwern:
* i've changed it just a slightly bit (use emalloc,
* get rid of includes etc)
* - thies - 13.09.1999
*/
/*----------------------------- */
/* this used to be "metaphone.h" */
/*----------------------------- */
/* Special encodings */
#define SH 'X'
#define TH '0'
/*----------------------------- */
/* end of "metaphone.h" */
/*----------------------------- */
/*----------------------------- */
/* this used to be "metachar.h" */
/*----------------------------- */
/* Metachar.h ... little bits about characters for metaphone */
/*-- Character encoding array & accessing macros --*/
/* Stolen directly out of the book... */
char _codes[26] = { 1,16,4,16,9,2,4,16,9,2,0,2,2,2,1,4,0,2,4,4,1,0,0,0,8,0};
#define ENCODE(c) (isalpha(c) ? _codes[((toupper(c)) - 'A')] : 0)
#define isvowel(c) (ENCODE(c) & 1) /* AEIOU */
/* These letters are passed through unchanged */
#define NOCHANGE(c) (ENCODE(c) & 2) /* FJMNR */
/* These form dipthongs when preceding H */
#define AFFECTH(c) (ENCODE(c) & 4) /* CGPST */
/* These make C and G soft */
#define MAKESOFT(c) (ENCODE(c) & 8) /* EIY */
/* These prevent GH from becoming F */
#define NOGHTOF(c) (ENCODE(c) & 16) /* BDH */
/*----------------------------- */
/* end of "metachar.h" */
/*----------------------------- */
/* I suppose I could have been using a character pointer instead of
* accesssing the array directly... */
/* Look at the next letter in the word */
#define Next_Letter ((char)toupper(word[w_idx+1]))
/* Look at the current letter in the word */
#define Curr_Letter ((char)toupper(word[w_idx]))
/* Go N letters back. */
#define Look_Back_Letter(n) (w_idx >= n ? (char)toupper(word[w_idx-n]) : '\0')
/* Previous letter. I dunno, should this return null on failure? */
#define Prev_Letter (Look_Back_Letter(1))
/* Look two letters down. It makes sure you don't walk off the string. */
#define After_Next_Letter (Next_Letter != '\0' ? (char)toupper(word[w_idx+2]) \
: '\0')
#define Look_Ahead_Letter(n) ((char)toupper(Lookahead(word+w_idx, n)))
/* Allows us to safely look ahead an arbitrary # of letters */
/* I probably could have just used strlen... */
static char Lookahead(unsigned char *word, int how_far) {
char letter_ahead = '\0'; /* null by default */
int idx;
for (idx = 0; word[idx] != '\0' && idx < how_far; idx++);
/* Edge forward in the string... */
letter_ahead = (char)word[idx]; /* idx will be either == to how_far or
* at the end of the string
*/
return letter_ahead;
}
/* phonize one letter
* We don't know the buffers size in advance. On way to solve this is to just
* re-allocate the buffer size. We're using an extra of 2 characters (this
* could be one though; or more too). */
#define Phonize(c) { buffer.append(c); }
/* How long is the phoned word? */
#define Phone_Len (buffer.size())
/* Note is a letter is a 'break' in the word */
#define Isbreak(c) (!isalpha(c))
String string_metaphone(const char *input, int word_len, long max_phonemes,
int traditional) {
unsigned char *word = (unsigned char *)input;
int w_idx = 0; /* point in the phonization we're at. */
int max_buffer_len = 0; /* maximum length of the destination buffer */
/*-- Parameter checks --*/
/* Negative phoneme length is meaningless */
if (max_phonemes < 0)
return String();
/* Empty/null string is meaningless */
/* Overly paranoid */
/* always_assert(word != NULL && word[0] != '\0'); */
if (word == nullptr)
return String();
/*-- Allocate memory for our phoned_phrase --*/
if (max_phonemes == 0) { /* Assume largest possible */
max_buffer_len = word_len;
} else {
max_buffer_len = max_phonemes;
}
StringBuffer buffer(max_buffer_len);
/*-- The first phoneme has to be processed specially. --*/
/* Find our first letter */
for (; !isalpha(Curr_Letter); w_idx++) {
/* On the off chance we were given nothing but crap... */
if (Curr_Letter == '\0') {
return buffer.detach(); /* For testing */
}
}
switch (Curr_Letter) {
/* AE becomes E */
case 'A':
if (Next_Letter == 'E') {
Phonize('E');
w_idx += 2;
}
/* Remember, preserve vowels at the beginning */
else {
Phonize('A');
w_idx++;
}
break;
/* [GKP]N becomes N */
case 'G':
case 'K':
case 'P':
if (Next_Letter == 'N') {
Phonize('N');
w_idx += 2;
}
break;
/* WH becomes H,
WR becomes R
W if followed by a vowel */
case 'W':
if (Next_Letter == 'H' ||
Next_Letter == 'R') {
Phonize(Next_Letter);
w_idx += 2;
} else if (isvowel(Next_Letter)) {
Phonize('W');
w_idx += 2;
}
/* else ignore */
break;
/* X becomes S */
case 'X':
Phonize('S');
w_idx++;
break;
/* Vowels are kept */
/* We did A already
case 'A':
case 'a':
*/
case 'E':
case 'I':
case 'O':
case 'U':
Phonize(Curr_Letter);
w_idx++;
break;
default:
/* do nothing */
break;
}
/* On to the metaphoning */
for (; Curr_Letter != '\0' &&
(max_phonemes == 0 || Phone_Len < max_phonemes);
w_idx++) {
/* How many letters to skip because an eariler encoding handled
* multiple letters */
unsigned short int skip_letter = 0;
/* THOUGHT: It would be nice if, rather than having things like...
* well, SCI. For SCI you encode the S, then have to remember
* to skip the C. So the phonome SCI invades both S and C. It would
* be better, IMHO, to skip the C from the S part of the encoding.
* Hell, I'm trying it.
*/
/* Ignore non-alphas */
if (!isalpha(Curr_Letter))
continue;
/* Drop duplicates, except CC */
if (Curr_Letter == Prev_Letter &&
Curr_Letter != 'C')
continue;
switch (Curr_Letter) {
/* B -> B unless in MB */
case 'B':
if (Prev_Letter != 'M')
Phonize('B');
break;
/* 'sh' if -CIA- or -CH, but not SCH, except SCHW.
* (SCHW is handled in S)
* S if -CI-, -CE- or -CY-
* dropped if -SCI-, SCE-, -SCY- (handed in S)
* else K
*/
case 'C':
if (MAKESOFT(Next_Letter)) { /* C[IEY] */
if (After_Next_Letter == 'A' &&
Next_Letter == 'I') { /* CIA */
Phonize(SH);
}
/* SC[IEY] */
else if (Prev_Letter == 'S') {
/* Dropped */
} else {
Phonize('S');
}
} else if (Next_Letter == 'H') {
if ((!traditional) && (After_Next_Letter == 'R' ||
Prev_Letter == 'S')) { /* Christ, School */
Phonize('K');
} else {
Phonize(SH);
}
skip_letter++;
} else {
Phonize('K');
}
break;
/* J if in -DGE-, -DGI- or -DGY-
* else T
*/
case 'D':
if (Next_Letter == 'G' && MAKESOFT(After_Next_Letter)) {
Phonize('J');
skip_letter++;
} else
Phonize('T');
break;
/* F if in -GH and not B--GH, D--GH, -H--GH, -H---GH
* else dropped if -GNED, -GN,
* else dropped if -DGE-, -DGI- or -DGY- (handled in D)
* else J if in -GE-, -GI, -GY and not GG
* else K
*/
case 'G':
if (Next_Letter == 'H') {
if (!(NOGHTOF(Look_Back_Letter(3)) || Look_Back_Letter(4) == 'H')) {
Phonize('F');
skip_letter++;
} else {
/* silent */
}
} else if (Next_Letter == 'N') {
if (Isbreak(After_Next_Letter) ||
(After_Next_Letter == 'E' && Look_Ahead_Letter(3) == 'D')) {
/* dropped */
} else
Phonize('K');
} else if (MAKESOFT(Next_Letter) && Prev_Letter != 'G') {
Phonize('J');
} else {
Phonize('K');
}
break;
/* H if before a vowel and not after C,G,P,S,T */
case 'H':
if (isvowel(Next_Letter) && !AFFECTH(Prev_Letter))
Phonize('H');
break;
/* dropped if after C
* else K
*/
case 'K':
if (Prev_Letter != 'C')
Phonize('K');
break;
/* F if before H
* else P
*/
case 'P':
if (Next_Letter == 'H') {
Phonize('F');
} else {
Phonize('P');
}
break;
/* K
*/
case 'Q':
Phonize('K');
break;
/* 'sh' in -SH-, -SIO- or -SIA- or -SCHW-
* else S
*/
case 'S':
if (Next_Letter == 'I' &&
(After_Next_Letter == 'O' || After_Next_Letter == 'A')) {
Phonize(SH);
} else if (Next_Letter == 'H') {
Phonize(SH);
skip_letter++;
} else if ((!traditional) &&
(Next_Letter == 'C' && Look_Ahead_Letter(2) == 'H' &&
Look_Ahead_Letter(3) == 'W')) {
Phonize(SH);
skip_letter += 2;
} else {
Phonize('S');
}
break;
/* 'sh' in -TIA- or -TIO-
* else 'th' before H
* else T
*/
case 'T':
if (Next_Letter == 'I' &&
(After_Next_Letter == 'O' || After_Next_Letter == 'A')) {
Phonize(SH);
} else if (Next_Letter == 'H') {
Phonize(TH);
skip_letter++;
} else {
Phonize('T');
}
break;
/* F */
case 'V':
Phonize('F');
break;
/* W before a vowel, else dropped */
case 'W':
if (isvowel(Next_Letter))
Phonize('W');
break;
/* KS */
case 'X':
Phonize('K');
Phonize('S');
break;
/* Y if followed by a vowel */
case 'Y':
if (isvowel(Next_Letter))
Phonize('Y');
break;
/* S */
case 'Z':
Phonize('S');
break;
/* No transformation */
case 'F':
case 'J':
case 'L':
case 'M':
case 'N':
case 'R':
Phonize(Curr_Letter);
break;
default:
/* nothing */
break;
} /* END SWITCH */
w_idx += skip_letter;
} /* END FOR */
return buffer.detach();
}
///////////////////////////////////////////////////////////////////////////////
// Cyrillic
/**
* This is codetables for different Cyrillic charsets (relative to koi8-r).
* Each table contains data for 128-255 symbols from ASCII table.
* First 256 symbols are for conversion from koi8-r to corresponding charset,
* second 256 symbols are for reverse conversion, from charset to koi8-r.
*
* Here we have the following tables:
* _cyr_win1251 - for windows-1251 charset
* _cyr_iso88595 - for iso8859-5 charset
* _cyr_cp866 - for x-cp866 charset
* _cyr_mac - for x-mac-cyrillic charset
*/
typedef unsigned char _cyr_charset_table[512];
static const _cyr_charset_table _cyr_win1251 = {
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,
46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,
154,174,190,46,159,189,46,46,179,191,180,157,46,46,156,183,
46,46,182,166,173,46,46,158,163,152,164,155,46,46,46,167,
225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240,
242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241,
193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208,
210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,209,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
32,32,32,184,186,32,179,191,32,32,32,32,32,180,162,32,
32,32,32,168,170,32,178,175,32,32,32,32,32,165,161,169,
254,224,225,246,228,229,244,227,245,232,233,234,235,236,237,238,
239,255,240,241,242,243,230,226,252,251,231,248,253,249,247,250,
222,192,193,214,196,197,212,195,213,200,201,202,203,204,205,206,
207,223,208,209,210,211,198,194,220,219,199,216,221,217,215,218,
};
static const _cyr_charset_table _cyr_cp866 = {
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240,
242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241,
193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208,
35,35,35,124,124,124,124,43,43,124,124,43,43,43,43,43,
43,45,45,124,45,43,124,124,43,43,45,45,124,45,43,45,
45,45,45,43,43,43,43,43,43,43,43,35,35,124,124,35,
210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,209,
179,163,180,164,183,167,190,174,32,149,158,32,152,159,148,154,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
205,186,213,241,243,201,32,245,187,212,211,200,190,32,247,198,
199,204,181,240,242,185,32,244,203,207,208,202,216,32,246,32,
238,160,161,230,164,165,228,163,229,168,169,170,171,172,173,174,
175,239,224,225,226,227,166,162,236,235,167,232,237,233,231,234,
158,128,129,150,132,133,148,131,149,136,137,138,139,140,141,142,
143,159,144,145,146,147,134,130,156,155,135,152,157,153,151,154,
};
static const _cyr_charset_table _cyr_iso88595 = {
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
32,179,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240,
242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241,
193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208,
210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,209,
32,163,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,
32,32,32,241,32,32,32,32,32,32,32,32,32,32,32,32,
32,32,32,161,32,32,32,32,32,32,32,32,32,32,32,32,
238,208,209,230,212,213,228,211,229,216,217,218,219,220,221,222,
223,239,224,225,226,227,214,210,236,235,215,232,237,233,231,234,
206,176,177,198,180,181,196,179,197,184,185,186,187,188,189,190,
191,207,192,193,194,195,182,178,204,203,183,200,205,201,199,202,
};
static const _cyr_charset_table _cyr_mac = {
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240,
242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241,
160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,
176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,
128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
144,145,146,147,148,149,150,151,152,153,154,155,156,179,163,209,
193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208,
210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,255,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,
208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,
160,161,162,222,164,165,166,167,168,169,170,171,172,173,174,175,
176,177,178,221,180,181,182,183,184,185,186,187,188,189,190,191,
254,224,225,246,228,229,244,227,245,232,233,234,235,236,237,238,
239,223,240,241,242,243,230,226,252,251,231,248,253,249,247,250,
158,128,129,150,132,133,148,131,149,136,137,138,139,140,141,142,
143,159,144,145,146,147,134,130,156,155,135,152,157,153,151,154,
};
/**
* This is the function that performs real in-place conversion of the string
* between charsets.
* Parameters:
* str - string to be converted
* from,to - one-symbol label of source and destination charset
* The following symbols are used as labels:
* k - koi8-r
* w - windows-1251
* i - iso8859-5
* a - x-cp866
* d - x-cp866
* m - x-mac-cyrillic
*/
String string_convert_cyrillic_string(const String& input, char from, char to) {
const unsigned char *from_table, *to_table;
unsigned char tmp;
const unsigned char *uinput = (unsigned char *)input.slice().ptr;
String retString(input.size(), ReserveString);
unsigned char *str = (unsigned char *)retString.bufferSlice().ptr;
from_table = nullptr;
to_table = nullptr;
switch (toupper((int)(unsigned char)from)) {
case 'W': from_table = _cyr_win1251; break;
case 'A':
case 'D': from_table = _cyr_cp866; break;
case 'I': from_table = _cyr_iso88595; break;
case 'M': from_table = _cyr_mac; break;
case 'K':
break;
default:
throw_invalid_argument("Unknown source charset: %c", from);
break;
}
switch (toupper((int)(unsigned char)to)) {
case 'W': to_table = _cyr_win1251; break;
case 'A':
case 'D': to_table = _cyr_cp866; break;
case 'I': to_table = _cyr_iso88595; break;
case 'M': to_table = _cyr_mac; break;
case 'K':
break;
default:
throw_invalid_argument("Unknown destination charset: %c", to);
break;
}
for (int i = 0; i < input.size(); i++) {
tmp = from_table == nullptr ? uinput[i] : from_table[uinput[i]];
str[i] = to_table == nullptr ? tmp : to_table[tmp + 256];
}
retString.setSize(input.size());
return retString;
}
///////////////////////////////////////////////////////////////////////////////
// Hebrew
#define HEB_BLOCK_TYPE_ENG 1
#define HEB_BLOCK_TYPE_HEB 2
#define isheb(c) \
(((((unsigned char) c) >= 224) && (((unsigned char) c) <= 250)) ? 1 : 0)
#define _isblank(c) \
(((((unsigned char) c) == ' ' || ((unsigned char) c) == '\t')) ? 1 : 0)
#define _isnewline(c) \
(((((unsigned char) c) == '\n' || ((unsigned char) c) == '\r')) ? 1 : 0)
/**
* Converts Logical Hebrew text (Hebrew Windows style) to Visual text
* Cheers/complaints/flames - Zeev Suraski <zeev@php.net>
*/
String string_convert_hebrew_string(const String& inStr,
int max_chars_per_line,
int convert_newlines) {
assert(!inStr.empty());
auto str = inStr.data();
auto str_len = inStr.size();
const char *tmp;
char *heb_str, *broken_str;
char *target;
int block_start, block_end, block_type, block_length, i;
long max_chars=0;
int begin, end, char_count, orig_begin;
tmp = str;
block_start=block_end=0;
heb_str = (char *) smart_malloc(str_len + 1);
SCOPE_EXIT { smart_free(heb_str); };
target = heb_str+str_len;
*target = 0;
target--;
block_length=0;
if (isheb(*tmp)) {
block_type = HEB_BLOCK_TYPE_HEB;
} else {
block_type = HEB_BLOCK_TYPE_ENG;
}
do {
if (block_type == HEB_BLOCK_TYPE_HEB) {
while ((isheb((int)*(tmp+1)) ||
_isblank((int)*(tmp+1)) ||
ispunct((int)*(tmp+1)) ||
(int)*(tmp+1)=='\n' ) && block_end<str_len-1) {
tmp++;
block_end++;
block_length++;
}
for (i = block_start; i<= block_end; i++) {
*target = str[i];
switch (*target) {
case '(': *target = ')'; break;
case ')': *target = '('; break;
case '[': *target = ']'; break;
case ']': *target = '['; break;
case '{': *target = '}'; break;
case '}': *target = '{'; break;
case '<': *target = '>'; break;
case '>': *target = '<'; break;
case '\\': *target = '/'; break;
case '/': *target = '\\'; break;
default:
break;
}
target--;
}
block_type = HEB_BLOCK_TYPE_ENG;
} else {
while (!isheb(*(tmp+1)) &&
(int)*(tmp+1)!='\n' && block_end < str_len-1) {
tmp++;
block_end++;
block_length++;
}
while ((_isblank((int)*tmp) ||
ispunct((int)*tmp)) && *tmp!='/' &&
*tmp!='-' && block_end > block_start) {
tmp--;
block_end--;
}
for (i = block_end; i >= block_start; i--) {
*target = str[i];
target--;
}
block_type = HEB_BLOCK_TYPE_HEB;
}
block_start=block_end+1;
} while (block_end < str_len-1);
String brokenStr(str_len, ReserveString);
broken_str = brokenStr.bufferSlice().ptr;
begin=end=str_len-1;
target = broken_str;
while (1) {
char_count=0;
while ((!max_chars || char_count < max_chars) && begin > 0) {
char_count++;
begin--;
if (begin <= 0 || _isnewline(heb_str[begin])) {
while (begin > 0 && _isnewline(heb_str[begin-1])) {
begin--;
char_count++;
}
break;
}
}
if (char_count == max_chars) { /* try to avoid breaking words */
int new_char_count=char_count, new_begin=begin;
while (new_char_count > 0) {
if (_isblank(heb_str[new_begin]) || _isnewline(heb_str[new_begin])) {
break;
}
new_begin++;
new_char_count--;
}
if (new_char_count > 0) {
char_count=new_char_count;
begin=new_begin;
}
}
orig_begin=begin;
if (_isblank(heb_str[begin])) {
heb_str[begin]='\n';
}
while (begin <= end && _isnewline(heb_str[begin])) {
/* skip leading newlines */
begin++;
}
for (i = begin; i <= end; i++) { /* copy content */
*target = heb_str[i];
target++;
}
for (i = orig_begin; i <= end && _isnewline(heb_str[i]); i++) {
*target = heb_str[i];
target++;
}
begin=orig_begin;
if (begin <= 0) {
*target = 0;
break;
}
begin--;
end=begin;
}
if (convert_newlines) {
int count;
auto ret = string_replace(broken_str, str_len, "\n", strlen("\n"),
"<br />\n", strlen("<br />\n"), count, true);
if (!ret.isNull()) {
return ret;
}
}
brokenStr.setSize(str_len);
return brokenStr;
}
#if defined(__APPLE__)
void *memrchr(const void *s, int c, size_t n) {
for (const char *p = (const char *)s + n - 1; p >= s; p--) {
if (*p == c) return (void *)p;
}
return nullptr;
}
#endif
///////////////////////////////////////////////////////////////////////////////
}
| ./CrossVul/dataset_final_sorted/CWE-189/cpp/bad_2263_0 |
crossvul-cpp_data_bad_1605_3 | /*
Copyright 2008-2013 LibRaw LLC (info@libraw.org)
LibRaw is free software; you can redistribute it and/or modify
it under the terms of the one of three licenses as you choose:
1. GNU LESSER GENERAL PUBLIC LICENSE version 2.1
(See file LICENSE.LGPL provided in LibRaw distribution archive for details).
2. COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
(See file LICENSE.CDDL provided in LibRaw distribution archive for details).
3. LibRaw Software License 27032010
(See file LICENSE.LibRaw.pdf provided in LibRaw distribution archive for details).
This file is generated from Dave Coffin's dcraw.c
dcraw.c -- Dave Coffin's raw photo decoder
Copyright 1997-2010 by Dave Coffin, dcoffin a cybercom o net
Look into dcraw homepage (probably http://cybercom.net/~dcoffin/dcraw/)
for more information
*/
#include <math.h>
#define CLASS LibRaw::
#include "libraw/libraw_types.h"
#define LIBRAW_LIBRARY_BUILD
#define LIBRAW_IO_REDEFINED
#include "libraw/libraw.h"
#include "internal/defines.h"
#include "internal/var_defines.h"
int CLASS fcol (int row, int col)
{
static const char filter[16][16] =
{ { 2,1,1,3,2,3,2,0,3,2,3,0,1,2,1,0 },
{ 0,3,0,2,0,1,3,1,0,1,1,2,0,3,3,2 },
{ 2,3,3,2,3,1,1,3,3,1,2,1,2,0,0,3 },
{ 0,1,0,1,0,2,0,2,2,0,3,0,1,3,2,1 },
{ 3,1,1,2,0,1,0,2,1,3,1,3,0,1,3,0 },
{ 2,0,0,3,3,2,3,1,2,0,2,0,3,2,2,1 },
{ 2,3,3,1,2,1,2,1,2,1,1,2,3,0,0,1 },
{ 1,0,0,2,3,0,0,3,0,3,0,3,2,1,2,3 },
{ 2,3,3,1,1,2,1,0,3,2,3,0,2,3,1,3 },
{ 1,0,2,0,3,0,3,2,0,1,1,2,0,1,0,2 },
{ 0,1,1,3,3,2,2,1,1,3,3,0,2,1,3,2 },
{ 2,3,2,0,0,1,3,0,2,0,1,2,3,0,1,0 },
{ 1,3,1,2,3,2,3,2,0,2,0,1,1,0,3,0 },
{ 0,2,0,3,1,0,0,1,1,3,3,2,3,2,2,1 },
{ 2,1,3,2,3,1,2,1,0,3,0,2,0,2,0,2 },
{ 0,3,1,0,0,2,0,3,2,1,3,1,1,3,1,3 } };
if (filters == 1) return filter[(row+top_margin)&15][(col+left_margin)&15];
if (filters == 9) return xtrans[(row+top_margin+6)%6][(col+left_margin+6)%6];
return FC(row,col);
}
#ifndef __GLIBC__
char *my_memmem (char *haystack, size_t haystacklen,
char *needle, size_t needlelen)
{
char *c;
for (c = haystack; c <= haystack + haystacklen - needlelen; c++)
if (!memcmp (c, needle, needlelen))
return c;
return 0;
}
#define memmem my_memmem
char *my_strcasestr (char *haystack, const char *needle)
{
char *c;
for (c = haystack; *c; c++)
if (!strncasecmp(c, needle, strlen(needle)))
return c;
return 0;
}
#define strcasestr my_strcasestr
#endif
ushort CLASS sget2 (uchar *s)
{
if (order == 0x4949) /* "II" means little-endian */
return s[0] | s[1] << 8;
else /* "MM" means big-endian */
return s[0] << 8 | s[1];
}
ushort CLASS get2()
{
uchar str[2] = { 0xff,0xff };
fread (str, 1, 2, ifp);
return sget2(str);
}
unsigned CLASS sget4 (uchar *s)
{
if (order == 0x4949)
return s[0] | s[1] << 8 | s[2] << 16 | s[3] << 24;
else
return s[0] << 24 | s[1] << 16 | s[2] << 8 | s[3];
}
#define sget4(s) sget4((uchar *)s)
unsigned CLASS get4()
{
uchar str[4] = { 0xff,0xff,0xff,0xff };
fread (str, 1, 4, ifp);
return sget4(str);
}
unsigned CLASS getint (int type)
{
return type == 3 ? get2() : get4();
}
float CLASS int_to_float (int i)
{
union { int i; float f; } u;
u.i = i;
return u.f;
}
double CLASS getreal (int type)
{
union { char c[8]; double d; } u;
int i, rev;
switch (type) {
case 3: return (unsigned short) get2();
case 4: return (unsigned int) get4();
case 5: u.d = (unsigned int) get4();
return u.d / (unsigned int) get4();
case 8: return (signed short) get2();
case 9: return (signed int) get4();
case 10: u.d = (signed int) get4();
return u.d / (signed int) get4();
case 11: return int_to_float (get4());
case 12:
rev = 7 * ((order == 0x4949) == (ntohs(0x1234) == 0x1234));
for (i=0; i < 8; i++)
u.c[i ^ rev] = fgetc(ifp);
return u.d;
default: return fgetc(ifp);
}
}
void CLASS read_shorts (ushort *pixel, int count)
{
if (fread (pixel, 2, count, ifp) < count) derror();
if ((order == 0x4949) == (ntohs(0x1234) == 0x1234))
swab ((char*)pixel, (char*)pixel, count*2);
}
void CLASS canon_600_fixed_wb (int temp)
{
static const short mul[4][5] = {
{ 667, 358,397,565,452 },
{ 731, 390,367,499,517 },
{ 1119, 396,348,448,537 },
{ 1399, 485,431,508,688 } };
int lo, hi, i;
float frac=0;
for (lo=4; --lo; )
if (*mul[lo] <= temp) break;
for (hi=0; hi < 3; hi++)
if (*mul[hi] >= temp) break;
if (lo != hi)
frac = (float) (temp - *mul[lo]) / (*mul[hi] - *mul[lo]);
for (i=1; i < 5; i++)
pre_mul[i-1] = 1 / (frac * mul[hi][i] + (1-frac) * mul[lo][i]);
}
/* Return values: 0 = white 1 = near white 2 = not white */
int CLASS canon_600_color (int ratio[2], int mar)
{
int clipped=0, target, miss;
if (flash_used) {
if (ratio[1] < -104)
{ ratio[1] = -104; clipped = 1; }
if (ratio[1] > 12)
{ ratio[1] = 12; clipped = 1; }
} else {
if (ratio[1] < -264 || ratio[1] > 461) return 2;
if (ratio[1] < -50)
{ ratio[1] = -50; clipped = 1; }
if (ratio[1] > 307)
{ ratio[1] = 307; clipped = 1; }
}
target = flash_used || ratio[1] < 197
? -38 - (398 * ratio[1] >> 10)
: -123 + (48 * ratio[1] >> 10);
if (target - mar <= ratio[0] &&
target + 20 >= ratio[0] && !clipped) return 0;
miss = target - ratio[0];
if (abs(miss) >= mar*4) return 2;
if (miss < -20) miss = -20;
if (miss > mar) miss = mar;
ratio[0] = target - miss;
return 1;
}
void CLASS canon_600_auto_wb()
{
int mar, row, col, i, j, st, count[] = { 0,0 };
int test[8], total[2][8], ratio[2][2], stat[2];
memset (&total, 0, sizeof total);
i = canon_ev + 0.5;
if (i < 10) mar = 150;
else if (i > 12) mar = 20;
else mar = 280 - 20 * i;
if (flash_used) mar = 80;
for (row=14; row < height-14; row+=4)
for (col=10; col < width; col+=2) {
for (i=0; i < 8; i++)
test[(i & 4) + FC(row+(i >> 1),col+(i & 1))] =
BAYER(row+(i >> 1),col+(i & 1));
for (i=0; i < 8; i++)
if (test[i] < 150 || test[i] > 1500) goto next;
for (i=0; i < 4; i++)
if (abs(test[i] - test[i+4]) > 50) goto next;
for (i=0; i < 2; i++) {
for (j=0; j < 4; j+=2)
ratio[i][j >> 1] = ((test[i*4+j+1]-test[i*4+j]) << 10) / test[i*4+j];
stat[i] = canon_600_color (ratio[i], mar);
}
if ((st = stat[0] | stat[1]) > 1) goto next;
for (i=0; i < 2; i++)
if (stat[i])
for (j=0; j < 2; j++)
test[i*4+j*2+1] = test[i*4+j*2] * (0x400 + ratio[i][j]) >> 10;
for (i=0; i < 8; i++)
total[st][i] += test[i];
count[st]++;
next: ;
}
if (count[0] | count[1]) {
st = count[0]*200 < count[1];
for (i=0; i < 4; i++)
pre_mul[i] = 1.0 / (total[st][i] + total[st][i+4]);
}
}
void CLASS canon_600_coeff()
{
static const short table[6][12] = {
{ -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 },
{ -1203,1715,-1136,1648, 1388,-876,267,245, -1641,2153,3921,-3409 },
{ -615,1127,-1563,2075, 1437,-925,509,3, -756,1268,2519,-2007 },
{ -190,702,-1886,2398, 2153,-1641,763,-251, -452,964,3040,-2528 },
{ -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 },
{ -807,1319,-1785,2297, 1388,-876,769,-257, -230,742,2067,-1555 } };
int t=0, i, c;
float mc, yc;
mc = pre_mul[1] / pre_mul[2];
yc = pre_mul[3] / pre_mul[2];
if (mc > 1 && mc <= 1.28 && yc < 0.8789) t=1;
if (mc > 1.28 && mc <= 2) {
if (yc < 0.8789) t=3;
else if (yc <= 2) t=4;
}
if (flash_used) t=5;
for (raw_color = i=0; i < 3; i++)
FORCC rgb_cam[i][c] = table[t][i*4 + c] / 1024.0;
}
void CLASS canon_600_load_raw()
{
uchar data[1120], *dp;
ushort *pix;
int irow, row;
for (irow=row=0; irow < height; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (data, 1, 1120, ifp) < 1120) derror();
pix = raw_image + row*raw_width;
for (dp=data; dp < data+1120; dp+=10, pix+=8) {
pix[0] = (dp[0] << 2) + (dp[1] >> 6 );
pix[1] = (dp[2] << 2) + (dp[1] >> 4 & 3);
pix[2] = (dp[3] << 2) + (dp[1] >> 2 & 3);
pix[3] = (dp[4] << 2) + (dp[1] & 3);
pix[4] = (dp[5] << 2) + (dp[9] & 3);
pix[5] = (dp[6] << 2) + (dp[9] >> 2 & 3);
pix[6] = (dp[7] << 2) + (dp[9] >> 4 & 3);
pix[7] = (dp[8] << 2) + (dp[9] >> 6 );
}
if ((row+=2) > height) row = 1;
}
}
void CLASS canon_600_correct()
{
int row, col, val;
static const short mul[4][2] =
{ { 1141,1145 }, { 1128,1109 }, { 1178,1149 }, { 1128,1109 } };
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++) {
if ((val = BAYER(row,col) - black) < 0) val = 0;
val = val * mul[row & 3][col & 1] >> 9;
BAYER(row,col) = val;
}
}
canon_600_fixed_wb(1311);
canon_600_auto_wb();
canon_600_coeff();
maximum = (0x3ff - black) * 1109 >> 9;
black = 0;
}
int CLASS canon_s2is()
{
unsigned row;
for (row=0; row < 100; row++) {
fseek (ifp, row*3340 + 3284, SEEK_SET);
if (getc(ifp) > 15) return 1;
}
return 0;
}
unsigned CLASS getbithuff (int nbits, ushort *huff)
{
#ifdef LIBRAW_NOTHREADS
static unsigned bitbuf=0;
static int vbits=0, reset=0;
#else
#define bitbuf tls->getbits.bitbuf
#define vbits tls->getbits.vbits
#define reset tls->getbits.reset
#endif
unsigned c;
if (nbits > 25) return 0;
if (nbits < 0)
return bitbuf = vbits = reset = 0;
if (nbits == 0 || vbits < 0) return 0;
while (!reset && vbits < nbits && (c = fgetc(ifp)) != EOF &&
!(reset = zero_after_ff && c == 0xff && fgetc(ifp))) {
bitbuf = (bitbuf << 8) + (uchar) c;
vbits += 8;
}
c = bitbuf << (32-vbits) >> (32-nbits);
if (huff) {
vbits -= huff[c] >> 8;
c = (uchar) huff[c];
} else
vbits -= nbits;
if (vbits < 0) derror();
return c;
#ifndef LIBRAW_NOTHREADS
#undef bitbuf
#undef vbits
#undef reset
#endif
}
#define getbits(n) getbithuff(n,0)
#define gethuff(h) getbithuff(*h,h+1)
/*
Construct a decode tree according the specification in *source.
The first 16 bytes specify how many codes should be 1-bit, 2-bit
3-bit, etc. Bytes after that are the leaf values.
For example, if the source is
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0,
0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff },
then the code is
00 0x04
010 0x03
011 0x05
100 0x06
101 0x02
1100 0x07
1101 0x01
11100 0x08
11101 0x09
11110 0x00
111110 0x0a
1111110 0x0b
1111111 0xff
*/
ushort * CLASS make_decoder_ref (const uchar **source)
{
int max, len, h, i, j;
const uchar *count;
ushort *huff;
count = (*source += 16) - 17;
for (max=16; max && !count[max]; max--);
huff = (ushort *) calloc (1 + (1 << max), sizeof *huff);
merror (huff, "make_decoder()");
huff[0] = max;
for (h=len=1; len <= max; len++)
for (i=0; i < count[len]; i++, ++*source)
for (j=0; j < 1 << (max-len); j++)
if (h <= 1 << max)
huff[h++] = len << 8 | **source;
return huff;
}
ushort * CLASS make_decoder (const uchar *source)
{
return make_decoder_ref (&source);
}
void CLASS crw_init_tables (unsigned table, ushort *huff[2])
{
static const uchar first_tree[3][29] = {
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0,
0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff },
{ 0,2,2,3,1,1,1,1,2,0,0,0,0,0,0,0,
0x03,0x02,0x04,0x01,0x05,0x00,0x06,0x07,0x09,0x08,0x0a,0x0b,0xff },
{ 0,0,6,3,1,1,2,0,0,0,0,0,0,0,0,0,
0x06,0x05,0x07,0x04,0x08,0x03,0x09,0x02,0x00,0x0a,0x01,0x0b,0xff },
};
static const uchar second_tree[3][180] = {
{ 0,2,2,2,1,4,2,1,2,5,1,1,0,0,0,139,
0x03,0x04,0x02,0x05,0x01,0x06,0x07,0x08,
0x12,0x13,0x11,0x14,0x09,0x15,0x22,0x00,0x21,0x16,0x0a,0xf0,
0x23,0x17,0x24,0x31,0x32,0x18,0x19,0x33,0x25,0x41,0x34,0x42,
0x35,0x51,0x36,0x37,0x38,0x29,0x79,0x26,0x1a,0x39,0x56,0x57,
0x28,0x27,0x52,0x55,0x58,0x43,0x76,0x59,0x77,0x54,0x61,0xf9,
0x71,0x78,0x75,0x96,0x97,0x49,0xb7,0x53,0xd7,0x74,0xb6,0x98,
0x47,0x48,0x95,0x69,0x99,0x91,0xfa,0xb8,0x68,0xb5,0xb9,0xd6,
0xf7,0xd8,0x67,0x46,0x45,0x94,0x89,0xf8,0x81,0xd5,0xf6,0xb4,
0x88,0xb1,0x2a,0x44,0x72,0xd9,0x87,0x66,0xd4,0xf5,0x3a,0xa7,
0x73,0xa9,0xa8,0x86,0x62,0xc7,0x65,0xc8,0xc9,0xa1,0xf4,0xd1,
0xe9,0x5a,0x92,0x85,0xa6,0xe7,0x93,0xe8,0xc1,0xc6,0x7a,0x64,
0xe1,0x4a,0x6a,0xe6,0xb3,0xf1,0xd3,0xa5,0x8a,0xb2,0x9a,0xba,
0x84,0xa4,0x63,0xe5,0xc5,0xf3,0xd2,0xc4,0x82,0xaa,0xda,0xe4,
0xf2,0xca,0x83,0xa3,0xa2,0xc3,0xea,0xc2,0xe2,0xe3,0xff,0xff },
{ 0,2,2,1,4,1,4,1,3,3,1,0,0,0,0,140,
0x02,0x03,0x01,0x04,0x05,0x12,0x11,0x06,
0x13,0x07,0x08,0x14,0x22,0x09,0x21,0x00,0x23,0x15,0x31,0x32,
0x0a,0x16,0xf0,0x24,0x33,0x41,0x42,0x19,0x17,0x25,0x18,0x51,
0x34,0x43,0x52,0x29,0x35,0x61,0x39,0x71,0x62,0x36,0x53,0x26,
0x38,0x1a,0x37,0x81,0x27,0x91,0x79,0x55,0x45,0x28,0x72,0x59,
0xa1,0xb1,0x44,0x69,0x54,0x58,0xd1,0xfa,0x57,0xe1,0xf1,0xb9,
0x49,0x47,0x63,0x6a,0xf9,0x56,0x46,0xa8,0x2a,0x4a,0x78,0x99,
0x3a,0x75,0x74,0x86,0x65,0xc1,0x76,0xb6,0x96,0xd6,0x89,0x85,
0xc9,0xf5,0x95,0xb4,0xc7,0xf7,0x8a,0x97,0xb8,0x73,0xb7,0xd8,
0xd9,0x87,0xa7,0x7a,0x48,0x82,0x84,0xea,0xf4,0xa6,0xc5,0x5a,
0x94,0xa4,0xc6,0x92,0xc3,0x68,0xb5,0xc8,0xe4,0xe5,0xe6,0xe9,
0xa2,0xa3,0xe3,0xc2,0x66,0x67,0x93,0xaa,0xd4,0xd5,0xe7,0xf8,
0x88,0x9a,0xd7,0x77,0xc4,0x64,0xe2,0x98,0xa5,0xca,0xda,0xe8,
0xf3,0xf6,0xa9,0xb2,0xb3,0xf2,0xd2,0x83,0xba,0xd3,0xff,0xff },
{ 0,0,6,2,1,3,3,2,5,1,2,2,8,10,0,117,
0x04,0x05,0x03,0x06,0x02,0x07,0x01,0x08,
0x09,0x12,0x13,0x14,0x11,0x15,0x0a,0x16,0x17,0xf0,0x00,0x22,
0x21,0x18,0x23,0x19,0x24,0x32,0x31,0x25,0x33,0x38,0x37,0x34,
0x35,0x36,0x39,0x79,0x57,0x58,0x59,0x28,0x56,0x78,0x27,0x41,
0x29,0x77,0x26,0x42,0x76,0x99,0x1a,0x55,0x98,0x97,0xf9,0x48,
0x54,0x96,0x89,0x47,0xb7,0x49,0xfa,0x75,0x68,0xb6,0x67,0x69,
0xb9,0xb8,0xd8,0x52,0xd7,0x88,0xb5,0x74,0x51,0x46,0xd9,0xf8,
0x3a,0xd6,0x87,0x45,0x7a,0x95,0xd5,0xf6,0x86,0xb4,0xa9,0x94,
0x53,0x2a,0xa8,0x43,0xf5,0xf7,0xd4,0x66,0xa7,0x5a,0x44,0x8a,
0xc9,0xe8,0xc8,0xe7,0x9a,0x6a,0x73,0x4a,0x61,0xc7,0xf4,0xc6,
0x65,0xe9,0x72,0xe6,0x71,0x91,0x93,0xa6,0xda,0x92,0x85,0x62,
0xf3,0xc5,0xb2,0xa4,0x84,0xba,0x64,0xa5,0xb3,0xd2,0x81,0xe5,
0xd3,0xaa,0xc4,0xca,0xf2,0xb1,0xe4,0xd1,0x83,0x63,0xea,0xc3,
0xe2,0x82,0xf1,0xa3,0xc2,0xa1,0xc1,0xe3,0xa2,0xe1,0xff,0xff }
};
if (table > 2) table = 2;
huff[0] = make_decoder ( first_tree[table]);
huff[1] = make_decoder (second_tree[table]);
}
/*
Return 0 if the image starts with compressed data,
1 if it starts with uncompressed low-order bits.
In Canon compressed data, 0xff is always followed by 0x00.
*/
int CLASS canon_has_lowbits()
{
uchar test[0x4000];
int ret=1, i;
fseek (ifp, 0, SEEK_SET);
fread (test, 1, sizeof test, ifp);
for (i=540; i < sizeof test - 1; i++)
if (test[i] == 0xff) {
if (test[i+1]) return 1;
ret=0;
}
return ret;
}
void CLASS canon_load_raw()
{
ushort *pixel, *prow, *huff[2];
int nblocks, lowbits, i, c, row, r, save, val;
int block, diffbuf[64], leaf, len, diff, carry=0, pnum=0, base[2];
crw_init_tables (tiff_compress, huff);
lowbits = canon_has_lowbits();
if (!lowbits) maximum = 0x3ff;
fseek (ifp, 540 + lowbits*raw_height*raw_width/4, SEEK_SET);
zero_after_ff = 1;
getbits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row+=8) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pixel = raw_image + row*raw_width;
nblocks = MIN (8, raw_height-row) * raw_width >> 6;
for (block=0; block < nblocks; block++) {
memset (diffbuf, 0, sizeof diffbuf);
for (i=0; i < 64; i++ ) {
leaf = gethuff(huff[i > 0]);
if (leaf == 0 && i) break;
if (leaf == 0xff) continue;
i += leaf >> 4;
len = leaf & 15;
if (len == 0) continue;
diff = getbits(len);
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
if (i < 64) diffbuf[i] = diff;
}
diffbuf[0] += carry;
carry = diffbuf[0];
for (i=0; i < 64; i++ ) {
if (pnum++ % raw_width == 0)
base[0] = base[1] = 512;
if ((pixel[(block << 6) + i] = base[i & 1] += diffbuf[i]) >> 10)
derror();
}
}
if (lowbits) {
save = ftell(ifp);
fseek (ifp, 26 + row*raw_width/4, SEEK_SET);
for (prow=pixel, i=0; i < raw_width*2; i++) {
c = fgetc(ifp);
for (r=0; r < 8; r+=2, prow++) {
val = (*prow << 2) + ((c >> r) & 3);
if (raw_width == 2672 && val < 512) val += 2;
*prow = val;
}
}
fseek (ifp, save, SEEK_SET);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
FORC(2) free (huff[c]);
throw;
}
#endif
FORC(2) free (huff[c]);
}
int CLASS ljpeg_start (struct jhead *jh, int info_only)
{
int c, tag, len;
uchar data[0x10000];
const uchar *dp;
memset (jh, 0, sizeof *jh);
jh->restart = INT_MAX;
fread (data, 2, 1, ifp);
if (data[1] != 0xd8) return 0;
do {
fread (data, 2, 2, ifp);
tag = data[0] << 8 | data[1];
len = (data[2] << 8 | data[3]) - 2;
if (tag <= 0xff00) return 0;
fread (data, 1, len, ifp);
switch (tag) {
case 0xffc3:
jh->sraw = ((data[7] >> 4) * (data[7] & 15) - 1) & 3;
case 0xffc0:
jh->bits = data[0];
jh->high = data[1] << 8 | data[2];
jh->wide = data[3] << 8 | data[4];
jh->clrs = data[5] + jh->sraw;
if (len == 9 && !dng_version) getc(ifp);
break;
case 0xffc4:
if (info_only) break;
for (dp = data; dp < data+len && (c = *dp++) < 4; )
jh->free[c] = jh->huff[c] = make_decoder_ref (&dp);
break;
case 0xffda:
jh->psv = data[1+data[0]*2];
jh->bits -= data[3+data[0]*2] & 15;
break;
case 0xffdd:
jh->restart = data[0] << 8 | data[1];
}
} while (tag != 0xffda);
if (info_only) return 1;
if (jh->clrs > 6 || !jh->huff[0]) return 0;
FORC(5) if (!jh->huff[c+1]) jh->huff[c+1] = jh->huff[c];
if (jh->sraw) {
FORC(4) jh->huff[2+c] = jh->huff[1];
FORC(jh->sraw) jh->huff[1+c] = jh->huff[0];
}
jh->row = (ushort *) calloc (jh->wide*jh->clrs, 4);
merror (jh->row, "ljpeg_start()");
return zero_after_ff = 1;
}
void CLASS ljpeg_end (struct jhead *jh)
{
int c;
FORC4 if (jh->free[c]) free (jh->free[c]);
free (jh->row);
}
int CLASS ljpeg_diff (ushort *huff)
{
int len, diff;
if(!huff)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 2);
#endif
len = gethuff(huff);
if (len == 16 && (!dng_version || dng_version >= 0x1010000))
return -32768;
diff = getbits(len);
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
return diff;
}
ushort * CLASS ljpeg_row (int jrow, struct jhead *jh)
{
int col, c, diff, pred, spred=0;
ushort mark=0, *row[3];
if (jrow * jh->wide % jh->restart == 0) {
FORC(6) jh->vpred[c] = 1 << (jh->bits-1);
if (jrow) {
fseek (ifp, -2, SEEK_CUR);
do mark = (mark << 8) + (c = fgetc(ifp));
while (c != EOF && mark >> 4 != 0xffd);
}
getbits(-1);
}
FORC3 row[c] = jh->row + jh->wide*jh->clrs*((jrow+c) & 1);
for (col=0; col < jh->wide; col++)
FORC(jh->clrs) {
diff = ljpeg_diff (jh->huff[c]);
if (jh->sraw && c <= jh->sraw && (col | c))
pred = spred;
else if (col) pred = row[0][-jh->clrs];
else pred = (jh->vpred[c] += diff) - diff;
if (jrow && col) switch (jh->psv) {
case 1: break;
case 2: pred = row[1][0]; break;
case 3: pred = row[1][-jh->clrs]; break;
case 4: pred = pred + row[1][0] - row[1][-jh->clrs]; break;
case 5: pred = pred + ((row[1][0] - row[1][-jh->clrs]) >> 1); break;
case 6: pred = row[1][0] + ((pred - row[1][-jh->clrs]) >> 1); break;
case 7: pred = (pred + row[1][0]) >> 1; break;
default: pred = 0;
}
if ((**row = pred + diff) >> jh->bits) derror();
if (c <= jh->sraw) spred = **row;
row[0]++; row[1]++;
}
return row[2];
}
void CLASS lossless_jpeg_load_raw()
{
int jwide, jrow, jcol, val, jidx, i, j, row=0, col=0;
struct jhead jh;
ushort *rp;
if (!ljpeg_start (&jh, 0)) return;
if(jh.wide<1 || jh.high<1 || jh.clrs<1 || jh.bits <1)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 2);
#endif
jwide = jh.wide * jh.clrs;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (jrow=0; jrow < jh.high; jrow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
rp = ljpeg_row (jrow, &jh);
if (load_flags & 1)
row = jrow & 1 ? height-1-jrow/2 : jrow/2;
for (jcol=0; jcol < jwide; jcol++) {
val = curve[*rp++];
if (cr2_slice[0]) {
jidx = jrow*jwide + jcol;
i = jidx / (cr2_slice[1]*jh.high);
if ((j = i >= cr2_slice[0]))
i = cr2_slice[0];
jidx -= i * (cr2_slice[1]*jh.high);
row = jidx / cr2_slice[1+j];
col = jidx % cr2_slice[1+j] + i*cr2_slice[1];
}
if (raw_width == 3984 && (col -= 2) < 0)
col += (row--,raw_width);
if(row>raw_height)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 3);
#endif
if ((unsigned) row < raw_height) RAW(row,col) = val;
if (++col >= raw_width)
col = (row++,0);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw;
}
#endif
ljpeg_end (&jh);
}
void CLASS canon_sraw_load_raw()
{
struct jhead jh;
short *rp=0, (*ip)[4];
int jwide, slice, scol, ecol, row, col, jrow=0, jcol=0, pix[3], c;
int v[3]={0,0,0}, ver, hue;
char *cp;
if (!ljpeg_start (&jh, 0) || jh.clrs < 4) return;
jwide = (jh.wide >>= 1) * jh.clrs;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (ecol=slice=0; slice <= cr2_slice[0]; slice++) {
scol = ecol;
ecol += cr2_slice[1] * 2 / jh.clrs;
if (!cr2_slice[0] || ecol > raw_width-1) ecol = raw_width & -2;
for (row=0; row < height; row += (jh.clrs >> 1) - 1) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
ip = (short (*)[4]) image + row*width;
for (col=scol; col < ecol; col+=2, jcol+=jh.clrs) {
if ((jcol %= jwide) == 0)
rp = (short *) ljpeg_row (jrow++, &jh);
if (col >= width) continue;
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.sraw_ycc>=2)
{
FORC (jh.clrs-2)
{
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col + (c >> 1)*width + (c & 1)][1] = ip[col + (c >> 1)*width + (c & 1)][2] = 8192;
}
ip[col][1] = rp[jcol+jh.clrs-2] - 8192;
ip[col][2] = rp[jcol+jh.clrs-1] - 8192;
}
else if(imgdata.params.sraw_ycc)
{
FORC (jh.clrs-2)
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col][1] = rp[jcol+jh.clrs-2] - 8192;
ip[col][2] = rp[jcol+jh.clrs-1] - 8192;
}
else
#endif
{
FORC (jh.clrs-2)
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col][1] = rp[jcol+jh.clrs-2] - 16384;
ip[col][2] = rp[jcol+jh.clrs-1] - 16384;
}
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.sraw_ycc>=2)
{
ljpeg_end (&jh);
maximum = 0x3fff;
return;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (cp=model2; *cp && !isdigit(*cp); cp++);
sscanf (cp, "%d.%d.%d", v, v+1, v+2);
ver = (v[0]*1000 + v[1])*1000 + v[2];
hue = (jh.sraw+1) << 2;
if (unique_id >= 0x80000281 || (unique_id == 0x80000218 && ver > 1000006))
hue = jh.sraw << 1;
ip = (short (*)[4]) image;
rp = ip[0];
for (row=0; row < height; row++, ip+=width) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (row & (jh.sraw >> 1))
for (col=0; col < width; col+=2)
for (c=1; c < 3; c++)
if (row == height-1)
ip[col][c] = ip[col-width][c];
else ip[col][c] = (ip[col-width][c] + ip[col+width][c] + 1) >> 1;
for (col=1; col < width; col+=2)
for (c=1; c < 3; c++)
if (col == width-1)
ip[col][c] = ip[col-1][c];
else ip[col][c] = (ip[col-1][c] + ip[col+1][c] + 1) >> 1;
}
#ifdef LIBRAW_LIBRARY_BUILD
if(!imgdata.params.sraw_ycc)
#endif
for ( ; rp < ip[0]; rp+=4) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (unique_id == 0x80000218 ||
unique_id == 0x80000250 ||
unique_id == 0x80000261 ||
unique_id == 0x80000281 ||
unique_id == 0x80000287) {
rp[1] = (rp[1] << 2) + hue;
rp[2] = (rp[2] << 2) + hue;
pix[0] = rp[0] + (( 50*rp[1] + 22929*rp[2]) >> 14);
pix[1] = rp[0] + ((-5640*rp[1] - 11751*rp[2]) >> 14);
pix[2] = rp[0] + ((29040*rp[1] - 101*rp[2]) >> 14);
} else {
if (unique_id < 0x80000218) rp[0] -= 512;
pix[0] = rp[0] + rp[2];
pix[2] = rp[0] + rp[1];
pix[1] = rp[0] + ((-778*rp[1] - (rp[2] << 11)) >> 12);
}
FORC3 rp[c] = CLIP(pix[c] * sraw_mul[c] >> 10);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
#endif
ljpeg_end (&jh);
maximum = 0x3fff;
}
void CLASS adobe_copy_pixel (unsigned row, unsigned col, ushort **rp)
{
int c;
if (is_raw == 2 && shot_select) (*rp)++;
if (raw_image) {
if (row < raw_height && col < raw_width)
RAW(row,col) = curve[**rp];
*rp += is_raw;
} else {
if (row < height && col < width)
FORC(tiff_samples)
image[row*width+col][c] = curve[(*rp)[c]];
*rp += tiff_samples;
}
if (is_raw == 2 && shot_select) (*rp)--;
}
void CLASS lossless_dng_load_raw()
{
unsigned save, trow=0, tcol=0, jwide, jrow, jcol, row, col;
struct jhead jh;
ushort *rp;
while (trow < raw_height) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
save = ftell(ifp);
if (tile_length < INT_MAX)
fseek (ifp, get4(), SEEK_SET);
if (!ljpeg_start (&jh, 0)) break;
jwide = jh.wide;
if (filters) jwide *= jh.clrs;
jwide /= is_raw;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=col=jrow=0; jrow < jh.high; jrow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
rp = ljpeg_row (jrow, &jh);
for (jcol=0; jcol < jwide; jcol++) {
adobe_copy_pixel (trow+row, tcol+col, &rp);
if (++col >= tile_width || col >= raw_width)
row += 1 + (col = 0);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
#endif
fseek (ifp, save+4, SEEK_SET);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
ljpeg_end (&jh);
}
}
void CLASS packed_dng_load_raw()
{
ushort *pixel, *rp;
int row, col;
pixel = (ushort *) calloc (raw_width, tiff_samples*sizeof *pixel);
merror (pixel, "packed_dng_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (tiff_bps == 16)
read_shorts (pixel, raw_width * tiff_samples);
else {
getbits(-1);
for (col=0; col < raw_width * tiff_samples; col++)
pixel[col] = getbits(tiff_bps);
}
for (rp=pixel, col=0; col < raw_width; col++)
adobe_copy_pixel (row, col, &rp);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
free (pixel);
throw ;
}
#endif
free (pixel);
}
void CLASS pentax_load_raw()
{
ushort bit[2][15], huff[4097];
int dep, row, col, diff, c, i;
ushort vpred[2][2] = {{0,0},{0,0}}, hpred[2];
fseek (ifp, meta_offset, SEEK_SET);
dep = (get2() + 12) & 15;
fseek (ifp, 12, SEEK_CUR);
FORC(dep) bit[0][c] = get2();
FORC(dep) bit[1][c] = fgetc(ifp);
FORC(dep)
for (i=bit[0][c]; i <= ((bit[0][c]+(4096 >> bit[1][c])-1) & 4095); )
huff[++i] = bit[1][c] << 8 | c;
huff[0] = 12;
fseek (ifp, data_offset, SEEK_SET);
getbits(-1);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
diff = ljpeg_diff (huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
RAW(row,col) = hpred[col & 1];
if (hpred[col & 1] >> tiff_bps) derror();
}
}
}
void CLASS nikon_load_raw()
{
static const uchar nikon_tree[][32] = {
{ 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy */
5,4,3,6,2,7,1,0,8,9,11,10,12 },
{ 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy after split */
0x39,0x5a,0x38,0x27,0x16,5,4,3,2,1,0,11,12,12 },
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, /* 12-bit lossless */
5,4,6,3,7,2,8,1,9,0,10,11,12 },
{ 0,1,4,3,1,1,1,1,1,2,0,0,0,0,0,0, /* 14-bit lossy */
5,6,4,7,8,3,9,2,1,0,10,11,12,13,14 },
{ 0,1,5,1,1,1,1,1,1,1,2,0,0,0,0,0, /* 14-bit lossy after split */
8,0x5c,0x4b,0x3a,0x29,7,6,5,4,3,2,1,0,13,14 },
{ 0,1,4,2,2,3,1,2,0,0,0,0,0,0,0,0, /* 14-bit lossless */
7,6,8,5,9,4,10,3,11,12,2,0,1,13,14 } };
ushort *huff, ver0, ver1, vpred[2][2], hpred[2], csize;
int i, min, max, step=0, tree=0, split=0, row, col, len, shl, diff;
fseek (ifp, meta_offset, SEEK_SET);
ver0 = fgetc(ifp);
ver1 = fgetc(ifp);
if (ver0 == 0x49 || ver1 == 0x58)
fseek (ifp, 2110, SEEK_CUR);
if (ver0 == 0x46) tree = 2;
if (tiff_bps == 14) tree += 3;
read_shorts (vpred[0], 4);
max = 1 << tiff_bps & 0x7fff;
if ((csize = get2()) > 1)
step = max / (csize-1);
if (ver0 == 0x44 && ver1 == 0x20 && step > 0) {
for (i=0; i < csize; i++)
curve[i*step] = get2();
for (i=0; i < max; i++)
curve[i] = ( curve[i-i%step]*(step-i%step) +
curve[i-i%step+step]*(i%step) ) / step;
fseek (ifp, meta_offset+562, SEEK_SET);
split = get2();
} else if (ver0 != 0x46 && csize <= 0x4001)
read_shorts (curve, max=csize);
while (curve[max-2] == curve[max-1]) max--;
huff = make_decoder (nikon_tree[tree]);
fseek (ifp, data_offset, SEEK_SET);
getbits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (min=row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (split && row == split) {
free (huff);
huff = make_decoder (nikon_tree[tree+1]);
max += (min = 16) << 1;
}
for (col=0; col < raw_width; col++) {
i = gethuff(huff);
len = i & 15;
shl = i >> 4;
diff = ((getbits(len-shl) << 1) + 1) << shl >> 1;
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - !shl;
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
if ((ushort)(hpred[col & 1] + min) >= max) derror();
RAW(row,col) = curve[LIM((short)hpred[col & 1],0,0x3fff)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
free (huff);
throw;
}
#endif
free (huff);
}
/*
Returns 1 for a Coolpix 995, 0 for anything else.
*/
int CLASS nikon_e995()
{
int i, histo[256];
const uchar often[] = { 0x00, 0x55, 0xaa, 0xff };
memset (histo, 0, sizeof histo);
fseek (ifp, -2000, SEEK_END);
for (i=0; i < 2000; i++)
histo[fgetc(ifp)]++;
for (i=0; i < 4; i++)
if (histo[often[i]] < 200)
return 0;
return 1;
}
/*
Returns 1 for a Coolpix 2100, 0 for anything else.
*/
int CLASS nikon_e2100()
{
uchar t[12];
int i;
fseek (ifp, 0, SEEK_SET);
for (i=0; i < 1024; i++) {
fread (t, 1, 12, ifp);
if (((t[2] & t[4] & t[7] & t[9]) >> 4
& t[1] & t[6] & t[8] & t[11] & 3) != 3)
return 0;
}
return 1;
}
void CLASS nikon_3700()
{
int bits, i;
uchar dp[24];
static const struct {
int bits;
char t_make[12], t_model[15];
} table[] = {
{ 0x00, "Pentax", "Optio 33WR" },
{ 0x03, "Nikon", "E3200" },
{ 0x32, "Nikon", "E3700" },
{ 0x33, "Olympus", "C740UZ" } };
fseek (ifp, 3072, SEEK_SET);
fread (dp, 1, 24, ifp);
bits = (dp[8] & 3) << 4 | (dp[20] & 3);
for (i=0; i < sizeof table / sizeof *table; i++)
if (bits == table[i].bits) {
strcpy (make, table[i].t_make );
strcpy (model, table[i].t_model);
}
}
/*
Separates a Minolta DiMAGE Z2 from a Nikon E4300.
*/
int CLASS minolta_z2()
{
int i, nz;
char tail[424];
fseek (ifp, -sizeof tail, SEEK_END);
fread (tail, 1, sizeof tail, ifp);
for (nz=i=0; i < sizeof tail; i++)
if (tail[i]) nz++;
return nz > 20;
}
void CLASS ppm_thumb()
{
char *thumb;
thumb_length = thumb_width*thumb_height*3;
thumb = (char *) malloc (thumb_length);
merror (thumb, "ppm_thumb()");
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
fread (thumb, 1, thumb_length, ifp);
fwrite (thumb, 1, thumb_length, ofp);
free (thumb);
}
void CLASS ppm16_thumb()
{
int i;
char *thumb;
thumb_length = thumb_width*thumb_height*3;
thumb = (char *) calloc (thumb_length, 2);
merror (thumb, "ppm16_thumb()");
read_shorts ((ushort *) thumb, thumb_length);
for (i=0; i < thumb_length; i++)
thumb[i] = ((ushort *) thumb)[i] >> 8;
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
fwrite (thumb, 1, thumb_length, ofp);
free (thumb);
}
void CLASS layer_thumb()
{
int i, c;
char *thumb, map[][4] = { "012","102" };
colors = thumb_misc >> 5 & 7;
thumb_length = thumb_width*thumb_height;
thumb = (char *) calloc (colors, thumb_length);
merror (thumb, "layer_thumb()");
fprintf (ofp, "P%d\n%d %d\n255\n",
5 + (colors >> 1), thumb_width, thumb_height);
fread (thumb, thumb_length, colors, ifp);
for (i=0; i < thumb_length; i++)
FORCC putc (thumb[i+thumb_length*(map[thumb_misc >> 8][c]-'0')], ofp);
free (thumb);
}
void CLASS rollei_thumb()
{
unsigned i;
ushort *thumb;
thumb_length = thumb_width * thumb_height;
thumb = (ushort *) calloc (thumb_length, 2);
merror (thumb, "rollei_thumb()");
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
read_shorts (thumb, thumb_length);
for (i=0; i < thumb_length; i++) {
putc (thumb[i] << 3, ofp);
putc (thumb[i] >> 5 << 2, ofp);
putc (thumb[i] >> 11 << 3, ofp);
}
free (thumb);
}
void CLASS rollei_load_raw()
{
uchar pixel[10];
unsigned iten=0, isix, i, buffer=0, todo[16];
isix = raw_width * raw_height * 5 / 8;
while (fread (pixel, 1, 10, ifp) == 10) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (i=0; i < 10; i+=2) {
todo[i] = iten++;
todo[i+1] = pixel[i] << 8 | pixel[i+1];
buffer = pixel[i] >> 2 | buffer << 6;
}
for ( ; i < 16; i+=2) {
todo[i] = isix++;
todo[i+1] = buffer >> (14-i)*5;
}
for (i=0; i < 16; i+=2)
raw_image[todo[i]] = (todo[i+1] & 0x3ff);
}
maximum = 0x3ff;
}
int CLASS raw (unsigned row, unsigned col)
{
return (row < raw_height && col < raw_width) ? RAW(row,col) : 0;
}
void CLASS phase_one_flat_field (int is_float, int nc)
{
ushort head[8];
unsigned wide, y, x, c, rend, cend, row, col;
float *mrow, num, mult[4];
read_shorts (head, 8);
wide = head[2] / head[4];
mrow = (float *) calloc (nc*wide, sizeof *mrow);
merror (mrow, "phase_one_flat_field()");
for (y=0; y < head[3] / head[5]; y++) {
for (x=0; x < wide; x++)
for (c=0; c < nc; c+=2) {
num = is_float ? getreal(11) : get2()/32768.0;
if (y==0) mrow[c*wide+x] = num;
else mrow[(c+1)*wide+x] = (num - mrow[c*wide+x]) / head[5];
}
if (y==0) continue;
rend = head[1] + y*head[5];
for (row = rend-head[5]; row < raw_height && row < rend; row++) {
for (x=1; x < wide; x++) {
for (c=0; c < nc; c+=2) {
mult[c] = mrow[c*wide+x-1];
mult[c+1] = (mrow[c*wide+x] - mult[c]) / head[4];
}
cend = head[0] + x*head[4];
for (col = cend-head[4]; col < raw_width && col < cend; col++) {
c = nc > 2 ? FC(row-top_margin,col-left_margin) : 0;
if (!(c & 1)) {
c = RAW(row,col) * mult[c];
RAW(row,col) = LIM(c,0,65535);
}
for (c=0; c < nc; c+=2)
mult[c] += mult[c+1];
}
}
for (x=0; x < wide; x++)
for (c=0; c < nc; c+=2)
mrow[c*wide+x] += mrow[(c+1)*wide+x];
}
}
free (mrow);
}
void CLASS phase_one_correct()
{
unsigned entries, tag, data, save, col, row, type;
int len, i, j, k, cip, val[4], dev[4], sum, max;
int head[9], diff, mindiff=INT_MAX, off_412=0;
static const signed char dir[12][2] =
{ {-1,-1}, {-1,1}, {1,-1}, {1,1}, {-2,0}, {0,-2}, {0,2}, {2,0},
{-2,-2}, {-2,2}, {2,-2}, {2,2} };
float poly[8], num, cfrac, frac, mult[2], *yval[2];
ushort *xval[2];
if (half_size || !meta_length) return;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Phase One correction...\n"));
#endif
fseek (ifp, meta_offset, SEEK_SET);
order = get2();
fseek (ifp, 6, SEEK_CUR);
fseek (ifp, meta_offset+get4(), SEEK_SET);
entries = get4(); get4();
while (entries--) {
tag = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, meta_offset+data, SEEK_SET);
if (tag == 0x419) { /* Polynomial curve */
for (get4(), i=0; i < 8; i++)
poly[i] = getreal(11);
poly[3] += (ph1.tag_210 - poly[7]) * poly[6] + 1;
for (i=0; i < 0x10000; i++) {
num = (poly[5]*i + poly[3])*i + poly[1];
curve[i] = LIM(num,0,65535);
} goto apply; /* apply to right half */
} else if (tag == 0x41a) { /* Polynomial curve */
for (i=0; i < 4; i++)
poly[i] = getreal(11);
for (i=0; i < 0x10000; i++) {
for (num=0, j=4; j--; )
num = num * i + poly[j];
curve[i] = LIM(num+i,0,65535);
} apply: /* apply to whole image */
for (row=0; row < raw_height; row++)
for (col = (tag & 1)*ph1.split_col; col < raw_width; col++)
RAW(row,col) = curve[RAW(row,col)];
} else if (tag == 0x400) { /* Sensor defects */
while ((len -= 8) >= 0) {
col = get2();
row = get2();
type = get2(); get2();
if (col >= raw_width) continue;
if (type == 131) /* Bad column */
for (row=0; row < raw_height; row++)
if (FC(row-top_margin,col-left_margin) == 1) {
for (sum=i=0; i < 4; i++)
sum += val[i] = raw (row+dir[i][0], col+dir[i][1]);
for (max=i=0; i < 4; i++) {
dev[i] = abs((val[i] << 2) - sum);
if (dev[max] < dev[i]) max = i;
}
RAW(row,col) = (sum - val[max])/3.0 + 0.5;
} else {
for (sum=0, i=8; i < 12; i++)
sum += raw (row+dir[i][0], col+dir[i][1]);
RAW(row,col) = 0.5 + sum * 0.0732233 +
(raw(row,col-2) + raw(row,col+2)) * 0.3535534;
}
else if (type == 129) { /* Bad pixel */
if (row >= raw_height) continue;
j = (FC(row-top_margin,col-left_margin) != 1) * 4;
for (sum=0, i=j; i < j+8; i++)
sum += raw (row+dir[i][0], col+dir[i][1]);
RAW(row,col) = (sum + 4) >> 3;
}
}
} else if (tag == 0x401) { /* All-color flat fields */
phase_one_flat_field (1, 2);
} else if (tag == 0x416 || tag == 0x410) {
phase_one_flat_field (0, 2);
} else if (tag == 0x40b) { /* Red+blue flat field */
phase_one_flat_field (0, 4);
} else if (tag == 0x412) {
fseek (ifp, 36, SEEK_CUR);
diff = abs (get2() - ph1.tag_21a);
if (mindiff > diff) {
mindiff = diff;
off_412 = ftell(ifp) - 38;
}
}
fseek (ifp, save, SEEK_SET);
}
if (off_412) {
fseek (ifp, off_412, SEEK_SET);
for (i=0; i < 9; i++) head[i] = get4() & 0x7fff;
yval[0] = (float *) calloc (head[1]*head[3] + head[2]*head[4], 6);
merror (yval[0], "phase_one_correct()");
yval[1] = (float *) (yval[0] + head[1]*head[3]);
xval[0] = (ushort *) (yval[1] + head[2]*head[4]);
xval[1] = (ushort *) (xval[0] + head[1]*head[3]);
get2();
for (i=0; i < 2; i++)
for (j=0; j < head[i+1]*head[i+3]; j++)
yval[i][j] = getreal(11);
for (i=0; i < 2; i++)
for (j=0; j < head[i+1]*head[i+3]; j++)
xval[i][j] = get2();
for (row=0; row < raw_height; row++)
for (col=0; col < raw_width; col++) {
cfrac = (float) col * head[3] / raw_width;
cfrac -= cip = cfrac;
num = RAW(row,col) * 0.5;
for (i=cip; i < cip+2; i++) {
for (k=j=0; j < head[1]; j++)
if (num < xval[0][k = head[1]*i+j]) break;
frac = (j == 0 || j == head[1]) ? 0 :
(xval[0][k] - num) / (xval[0][k] - xval[0][k-1]);
mult[i-cip] = yval[0][k-1] * frac + yval[0][k] * (1-frac);
}
i = ((mult[0] * (1-cfrac) + mult[1] * cfrac) * row + num) * 2;
RAW(row,col) = LIM(i,0,65535);
}
free (yval[0]);
}
}
void CLASS phase_one_load_raw()
{
int a, b, i;
ushort akey, bkey, t_mask;
fseek (ifp, ph1.key_off, SEEK_SET);
akey = get2();
bkey = get2();
t_mask = ph1.format == 1 ? 0x5555:0x1354;
fseek (ifp, data_offset, SEEK_SET);
read_shorts (raw_image, raw_width*raw_height);
if (ph1.format)
for (i=0; i < raw_width*raw_height; i+=2) {
a = raw_image[i+0] ^ akey;
b = raw_image[i+1] ^ bkey;
raw_image[i+0] = (a & t_mask) | (b & ~t_mask);
raw_image[i+1] = (b & t_mask) | (a & ~t_mask);
}
}
unsigned CLASS ph1_bithuff (int nbits, ushort *huff)
{
#ifndef LIBRAW_NOTHREADS
#define bitbuf tls->ph1_bits.bitbuf
#define vbits tls->ph1_bits.vbits
#else
static UINT64 bitbuf=0;
static int vbits=0;
#endif
unsigned c;
if (nbits == -1)
return bitbuf = vbits = 0;
if (nbits == 0) return 0;
if (vbits < nbits) {
bitbuf = bitbuf << 32 | get4();
vbits += 32;
}
c = bitbuf << (64-vbits) >> (64-nbits);
if (huff) {
vbits -= huff[c] >> 8;
return (uchar) huff[c];
}
vbits -= nbits;
return c;
#ifndef LIBRAW_NOTHREADS
#undef bitbuf
#undef vbits
#endif
}
#define ph1_bits(n) ph1_bithuff(n,0)
#define ph1_huff(h) ph1_bithuff(*h,h+1)
void CLASS phase_one_load_raw_c()
{
static const int length[] = { 8,7,6,9,11,10,5,12,14,13 };
int *offset, len[2], pred[2], row, col, i, j;
ushort *pixel;
short (*t_black)[2];
pixel = (ushort *) calloc (raw_width + raw_height*4, 2);
merror (pixel, "phase_one_load_raw_c()");
offset = (int *) (pixel + raw_width);
fseek (ifp, strip_offset, SEEK_SET);
for (row=0; row < raw_height; row++)
offset[row] = get4();
t_black = (short (*)[2]) offset + raw_height;
fseek (ifp, ph1.black_off, SEEK_SET);
if (ph1.black_off)
{
read_shorts ((ushort *) t_black[0], raw_height*2);
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.rawdata.ph1_black = (short (*)[2])calloc(raw_height*2,sizeof(short));
merror (imgdata.rawdata.ph1_black, "phase_one_load_raw_c()");
memmove(imgdata.rawdata.ph1_black,(short *) t_black[0],raw_height*2*sizeof(short));
#endif
}
for (i=0; i < 256; i++)
curve[i] = i*i / 3.969 + 0.5;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, data_offset + offset[row], SEEK_SET);
ph1_bits(-1);
pred[0] = pred[1] = 0;
for (col=0; col < raw_width; col++) {
if (col >= (raw_width & -8))
len[0] = len[1] = 14;
else if ((col & 7) == 0)
for (i=0; i < 2; i++) {
for (j=0; j < 5 && !ph1_bits(1); j++);
if (j--) len[i] = length[j*2 + ph1_bits(1)];
}
if ((i = len[col & 1]) == 14)
pixel[col] = pred[col & 1] = ph1_bits(16);
else
pixel[col] = pred[col & 1] += ph1_bits(i) + 1 - (1 << (i - 1));
if (pred[col & 1] >> 16) derror();
if (ph1.format == 5 && pixel[col] < 256)
pixel[col] = curve[pixel[col]];
}
for (col=0; col < raw_width; col++) {
#ifndef LIBRAW_LIBRARY_BUILD
i = (pixel[col] << 2) - ph1.t_black + t_black[row][col >= ph1.split_col];
if (i > 0) RAW(row,col) = i;
#else
RAW(row,col) = pixel[col] << 2;
#endif
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = 0xfffc - ph1.t_black;
}
void CLASS hasselblad_load_raw()
{
struct jhead jh;
int row, col, pred[2], len[2], diff, c;
if (!ljpeg_start (&jh, 0)) return;
order = 0x4949;
ph1_bits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pred[0] = pred[1] = 0x8000 + load_flags;
for (col=0; col < raw_width; col+=2) {
FORC(2) len[c] = ph1_huff(jh.huff[0]);
FORC(2) {
diff = ph1_bits(len[c]);
if ((diff & (1 << (len[c]-1))) == 0)
diff -= (1 << len[c]) - 1;
if (diff == 65535) diff = -32768;
RAW(row,col+c) = pred[c] += diff;
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...){
ljpeg_end (&jh);
throw;
}
#endif
ljpeg_end (&jh);
maximum = 0xffff;
}
void CLASS leaf_hdr_load_raw()
{
ushort *pixel=0;
unsigned tile=0, r, c, row, col;
if (!filters) {
pixel = (ushort *) calloc (raw_width, sizeof *pixel);
merror (pixel, "leaf_hdr_load_raw()");
}
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
FORC(tiff_samples)
for (r=0; r < raw_height; r++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (r % tile_length == 0) {
fseek (ifp, data_offset + 4*tile++, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
}
if (filters && c != shot_select) continue;
if (filters) pixel = raw_image + r*raw_width;
read_shorts (pixel, raw_width);
if (!filters && (row = r - top_margin) < height)
for (col=0; col < width; col++)
image[row*width+col][c] = pixel[col+left_margin];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
if(!filters) free(pixel);
throw;
}
#endif
if (!filters) {
maximum = 0xffff;
raw_color = 1;
free (pixel);
}
}
void CLASS unpacked_load_raw()
{
int row, col, bits=0;
while (1 << ++bits < maximum);
read_shorts (raw_image, raw_width*raw_height);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++)
if ((RAW(row,col) >>= load_flags) >> bits
&& (unsigned) (row-top_margin) < height
&& (unsigned) (col-left_margin) < width) derror();
}
}
void CLASS sinar_4shot_load_raw()
{
ushort *pixel;
unsigned shot, row, col, r, c;
if ((shot = shot_select) || half_size) {
if (shot) shot--;
if (shot > 3) shot = 3;
fseek (ifp, data_offset + shot*4, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
unpacked_load_raw();
return;
}
#ifndef LIBRAW_LIBRARY_BUILD
free (raw_image);
raw_image = 0;
free (image);
image = (ushort (*)[4])
calloc ((iheight=height), (iwidth=width)*sizeof *image);
merror (image, "sinar_4shot_load_raw()");
#endif
pixel = (ushort *) calloc (raw_width, sizeof *pixel);
merror (pixel, "sinar_4shot_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (shot=0; shot < 4; shot++) {
fseek (ifp, data_offset + shot*4, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
read_shorts (pixel, raw_width);
if ((r = row-top_margin - (shot >> 1 & 1)) >= height) continue;
for (col=0; col < raw_width; col++) {
if ((c = col-left_margin - (shot & 1)) >= width) continue;
image[r*width+c][FC(row,col)] = pixel[col];
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
shrink = filters = 0;
}
void CLASS imacon_full_load_raw()
{
int row, col;
if (!image) return;
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++)
read_shorts (image[row*width+col], 3);
}
}
void CLASS packed_load_raw()
{
int vbits=0, bwide, rbits, bite, half, irow, row, col, val, i;
UINT64 bitbuf=0;
bwide = raw_width * tiff_bps / 8;
bwide += bwide & load_flags >> 7;
rbits = bwide * 8 - raw_width * tiff_bps;
if (load_flags & 1) bwide = bwide * 16 / 15;
bite = 8 + (load_flags & 24);
half = (raw_height+1) >> 1;
for (irow=0; irow < raw_height; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
row = irow;
if (load_flags & 2 &&
(row = irow % half * 2 + irow / half) == 1 &&
load_flags & 4) {
if (vbits=0, tiff_compress)
fseek (ifp, data_offset - (-half*bwide & -2048), SEEK_SET);
else {
fseek (ifp, 0, SEEK_END);
fseek (ifp, ftell(ifp) >> 3 << 2, SEEK_SET);
}
}
for (col=0; col < raw_width; col++) {
for (vbits -= tiff_bps; vbits < 0; vbits += bite) {
bitbuf <<= bite;
for (i=0; i < bite; i+=8)
bitbuf |= (unsigned) (fgetc(ifp) << i);
}
val = bitbuf << (64-tiff_bps-vbits) >> (64-tiff_bps);
RAW(row,col ^ (load_flags >> 6 & 1)) = val;
if (load_flags & 1 && (col % 10) == 9 &&
fgetc(ifp) && col < width+left_margin) derror();
}
vbits -= rbits;
}
}
void CLASS nokia_load_raw()
{
uchar *data, *dp;
int rev, dwide, row, col, c;
rev = 3 * (order == 0x4949);
dwide = (raw_width * 5 + 1) / 4;
data = (uchar *) malloc (dwide*2);
merror (data, "nokia_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (data+dwide, 1, dwide, ifp) < dwide) derror();
FORC(dwide) data[c] = data[dwide+(c ^ rev)];
for (dp=data, col=0; col < raw_width; dp+=5, col+=4)
FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...){
free (data);
throw;
}
#endif
free (data);
maximum = 0x3ff;
}
void CLASS canon_rmf_load_raw()
{
int row, col, bits, orow, ocol, c;
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width-2; col+=3) {
bits = get4();
FORC3 {
orow = row;
if ((ocol = col+c-4) < 0) {
ocol += raw_width;
if ((orow -= 2) < 0)
orow += raw_height;
}
RAW(orow,ocol) = bits >> (10*c+2) & 0x3ff;
}
}
}
maximum = 0x3ff;
}
unsigned CLASS pana_bits (int nbits)
{
#ifndef LIBRAW_NOTHREADS
#define buf tls->pana_bits.buf
#define vbits tls->pana_bits.vbits
#else
static uchar buf[0x4000];
static int vbits;
#endif
int byte;
if (!nbits) return vbits=0;
if (!vbits) {
fread (buf+load_flags, 1, 0x4000-load_flags, ifp);
fread (buf, 1, load_flags, ifp);
}
vbits = (vbits - nbits) & 0x1ffff;
byte = vbits >> 3 ^ 0x3ff0;
return (buf[byte] | buf[byte+1] << 8) >> (vbits & 7) & ~((~0u) << nbits);
#ifndef LIBRAW_NOTHREADS
#undef buf
#undef vbits
#endif
}
void CLASS panasonic_load_raw()
{
int row, col, i, j, sh=0, pred[2], nonz[2];
pana_bits(0);
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
if ((i = col % 14) == 0)
pred[0] = pred[1] = nonz[0] = nonz[1] = 0;
if (i % 3 == 2) sh = 4 >> (3 - pana_bits(2));
if (nonz[i & 1]) {
if ((j = pana_bits(8))) {
if ((pred[i & 1] -= 0x80 << sh) < 0 || sh == 4)
pred[i & 1] &= ~((~0u) << sh);
pred[i & 1] += j << sh;
}
} else if ((nonz[i & 1] = pana_bits(8)) || i > 11)
pred[i & 1] = nonz[i & 1] << 4 | pana_bits(4);
if ((RAW(row,col) = pred[col & 1]) > 4098 && col < width) derror();
}
}
}
void CLASS olympus_load_raw()
{
ushort huff[4096];
int row, col, nbits, sign, low, high, i, c, w, n, nw;
int acarry[2][3], *carry, pred, diff;
huff[n=0] = 0xc0c;
for (i=12; i--; )
FORC(2048 >> i) huff[++n] = (i+1) << 8 | i;
fseek (ifp, 7, SEEK_CUR);
getbits(-1);
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
memset (acarry, 0, sizeof acarry);
for (col=0; col < raw_width; col++) {
carry = acarry[col & 1];
i = 2 * (carry[2] < 3);
for (nbits=2+i; (ushort) carry[0] >> (nbits+i); nbits++);
low = (sign = getbits(3)) & 3;
sign = sign << 29 >> 31;
if ((high = getbithuff(12,huff)) == 12)
high = getbits(16-nbits) >> 1;
carry[0] = (high << nbits) | getbits(nbits);
diff = (carry[0] ^ sign) + carry[1];
carry[1] = (diff*3 + carry[1]) >> 5;
carry[2] = carry[0] > 16 ? 0 : carry[2]+1;
if (col >= width) continue;
if (row < 2 && col < 2) pred = 0;
else if (row < 2) pred = RAW(row,col-2);
else if (col < 2) pred = RAW(row-2,col);
else {
w = RAW(row,col-2);
n = RAW(row-2,col);
nw = RAW(row-2,col-2);
if ((w < nw && nw < n) || (n < nw && nw < w)) {
if (ABS(w-nw) > 32 || ABS(n-nw) > 32)
pred = w + n - nw;
else pred = (w + n) >> 1;
} else pred = ABS(w-nw) > ABS(n-nw) ? w : n;
}
if ((RAW(row,col) = pred + ((diff << 2) | low)) >> 12) derror();
}
}
}
void CLASS minolta_rd175_load_raw()
{
uchar pixel[768];
unsigned irow, box, row, col;
for (irow=0; irow < 1481; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, 768, ifp) < 768) derror();
box = irow / 82;
row = irow % 82 * 12 + ((box < 12) ? box | 1 : (box-12)*2);
switch (irow) {
case 1477: case 1479: continue;
case 1476: row = 984; break;
case 1480: row = 985; break;
case 1478: row = 985; box = 1;
}
if ((box < 12) && (box & 1)) {
for (col=0; col < 1533; col++, row ^= 1)
if (col != 1) RAW(row,col) = (col+1) & 2 ?
pixel[col/2-1] + pixel[col/2+1] : pixel[col/2] << 1;
RAW(row,1) = pixel[1] << 1;
RAW(row,1533) = pixel[765] << 1;
} else
for (col=row & 1; col < 1534; col+=2)
RAW(row,col) = pixel[col/2] << 1;
}
maximum = 0xff << 1;
}
void CLASS quicktake_100_load_raw()
{
uchar pixel[484][644];
static const short gstep[16] =
{ -89,-60,-44,-32,-22,-15,-8,-2,2,8,15,22,32,44,60,89 };
static const short rstep[6][4] =
{ { -3,-1,1,3 }, { -5,-1,1,5 }, { -8,-2,2,8 },
{ -13,-3,3,13 }, { -19,-4,4,19 }, { -28,-6,6,28 } };
static const short t_curve[256] =
{ 0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,
28,29,30,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,53,
54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,74,75,76,77,78,
79,80,81,82,83,84,86,88,90,92,94,97,99,101,103,105,107,110,112,114,116,
118,120,123,125,127,129,131,134,136,138,140,142,144,147,149,151,153,155,
158,160,162,164,166,168,171,173,175,177,179,181,184,186,188,190,192,195,
197,199,201,203,205,208,210,212,214,216,218,221,223,226,230,235,239,244,
248,252,257,261,265,270,274,278,283,287,291,296,300,305,309,313,318,322,
326,331,335,339,344,348,352,357,361,365,370,374,379,383,387,392,396,400,
405,409,413,418,422,426,431,435,440,444,448,453,457,461,466,470,474,479,
483,487,492,496,500,508,519,531,542,553,564,575,587,598,609,620,631,643,
654,665,676,687,698,710,721,732,743,754,766,777,788,799,810,822,833,844,
855,866,878,889,900,911,922,933,945,956,967,978,989,1001,1012,1023 };
int rb, row, col, sharp, val=0;
getbits(-1);
memset (pixel, 0x80, sizeof pixel);
for (row=2; row < height+2; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=2+(row & 1); col < width+2; col+=2) {
val = ((pixel[row-1][col-1] + 2*pixel[row-1][col+1] +
pixel[row][col-2]) >> 2) + gstep[getbits(4)];
pixel[row][col] = val = LIM(val,0,255);
if (col < 4)
pixel[row][col-2] = pixel[row+1][~row & 1] = val;
if (row == 2)
pixel[row-1][col+1] = pixel[row-1][col+3] = val;
}
pixel[row][col] = val;
}
for (rb=0; rb < 2; rb++)
for (row=2+rb; row < height+2; row+=2)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=3-(row & 1); col < width+2; col+=2) {
if (row < 4 || col < 4) sharp = 2;
else {
val = ABS(pixel[row-2][col] - pixel[row][col-2])
+ ABS(pixel[row-2][col] - pixel[row-2][col-2])
+ ABS(pixel[row][col-2] - pixel[row-2][col-2]);
sharp = val < 4 ? 0 : val < 8 ? 1 : val < 16 ? 2 :
val < 32 ? 3 : val < 48 ? 4 : 5;
}
val = ((pixel[row-2][col] + pixel[row][col-2]) >> 1)
+ rstep[sharp][getbits(2)];
pixel[row][col] = val = LIM(val,0,255);
if (row < 4) pixel[row-2][col+2] = val;
if (col < 4) pixel[row+2][col-2] = val;
}
}
for (row=2; row < height+2; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=3-(row & 1); col < width+2; col+=2) {
val = ((pixel[row][col-1] + (pixel[row][col] << 2) +
pixel[row][col+1]) >> 1) - 0x100;
pixel[row][col] = LIM(val,0,255);
}
}
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++)
RAW(row,col) = t_curve[pixel[row+2][col+2]];
}
maximum = 0x3ff;
}
#define radc_token(tree) ((signed char) getbithuff(8,huff[tree]))
#define FORYX for (y=1; y < 3; y++) for (x=col+1; x >= col; x--)
#define PREDICTOR (c ? (buf[c][y-1][x] + buf[c][y][x+1]) / 2 \
: (buf[c][y-1][x+1] + 2*buf[c][y-1][x] + buf[c][y][x+1]) / 4)
#ifdef __GNUC__
# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
# pragma GCC optimize("no-aggressive-loop-optimizations")
# endif
#endif
void CLASS kodak_radc_load_raw()
{
static const char src[] = {
1,1, 2,3, 3,4, 4,2, 5,7, 6,5, 7,6, 7,8,
1,0, 2,1, 3,3, 4,4, 5,2, 6,7, 7,6, 8,5, 8,8,
2,1, 2,3, 3,0, 3,2, 3,4, 4,6, 5,5, 6,7, 6,8,
2,0, 2,1, 2,3, 3,2, 4,4, 5,6, 6,7, 7,5, 7,8,
2,1, 2,4, 3,0, 3,2, 3,3, 4,7, 5,5, 6,6, 6,8,
2,3, 3,1, 3,2, 3,4, 3,5, 3,6, 4,7, 5,0, 5,8,
2,3, 2,6, 3,0, 3,1, 4,4, 4,5, 4,7, 5,2, 5,8,
2,4, 2,7, 3,3, 3,6, 4,1, 4,2, 4,5, 5,0, 5,8,
2,6, 3,1, 3,3, 3,5, 3,7, 3,8, 4,0, 5,2, 5,4,
2,0, 2,1, 3,2, 3,3, 4,4, 4,5, 5,6, 5,7, 4,8,
1,0, 2,2, 2,-2,
1,-3, 1,3,
2,-17, 2,-5, 2,5, 2,17,
2,-7, 2,2, 2,9, 2,18,
2,-18, 2,-9, 2,-2, 2,7,
2,-28, 2,28, 3,-49, 3,-9, 3,9, 4,49, 5,-79, 5,79,
2,-1, 2,13, 2,26, 3,39, 4,-16, 5,55, 6,-37, 6,76,
2,-26, 2,-13, 2,1, 3,-39, 4,16, 5,-55, 6,-76, 6,37
};
ushort huff[19][256];
int row, col, tree, nreps, rep, step, i, c, s, r, x, y, val;
short last[3] = { 16,16,16 }, mul[3], buf[3][3][386];
static const ushort pt[] =
{ 0,0, 1280,1344, 2320,3616, 3328,8000, 4095,16383, 65535,16383 };
for (i=2; i < 12; i+=2)
for (c=pt[i-2]; c <= pt[i]; c++)
curve[c] = (float)
(c-pt[i-2]) / (pt[i]-pt[i-2]) * (pt[i+1]-pt[i-1]) + pt[i-1] + 0.5;
for (s=i=0; i < sizeof src; i+=2)
FORC(256 >> src[i])
huff[0][s++] = src[i] << 8 | (uchar) src[i+1];
s = kodak_cbpp == 243 ? 2 : 3;
FORC(256) huff[18][c] = (8-s) << 8 | c >> s << s | 1 << (s-1);
getbits(-1);
for (i=0; i < sizeof(buf)/sizeof(short); i++)
buf[0][0][i] = 2048;
for (row=0; row < height; row+=4) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
FORC3 mul[c] = getbits(6);
FORC3 {
val = ((0x1000000/last[c] + 0x7ff) >> 12) * mul[c];
s = val > 65564 ? 10:12;
x = ~((~0u) << (s-1));
val <<= 12-s;
for (i=0; i < sizeof(buf[0])/sizeof(short); i++)
buf[c][0][i] = (buf[c][0][i] * val + x) >> s;
last[c] = mul[c];
for (r=0; r <= !c; r++) {
buf[c][1][width/2] = buf[c][2][width/2] = mul[c] << 7;
for (tree=1, col=width/2; col > 0; ) {
if ((tree = radc_token(tree))) {
col -= 2;
if (tree == 8)
FORYX buf[c][y][x] = (uchar) radc_token(18) * mul[c];
else
FORYX buf[c][y][x] = radc_token(tree+10) * 16 + PREDICTOR;
} else
do {
nreps = (col > 2) ? radc_token(9) + 1 : 1;
for (rep=0; rep < 8 && rep < nreps && col > 0; rep++) {
col -= 2;
FORYX buf[c][y][x] = PREDICTOR;
if (rep & 1) {
step = radc_token(10) << 4;
FORYX buf[c][y][x] += step;
}
}
} while (nreps == 9);
}
for (y=0; y < 2; y++)
for (x=0; x < width/2; x++) {
val = (buf[c][y+1][x] << 4) / mul[c];
if (val < 0) val = 0;
if (c) RAW(row+y*2+c-1,x*2+2-c) = val;
else RAW(row+r*2+y,x*2+y) = val;
}
memcpy (buf[c][0]+!c, buf[c][2], sizeof buf[c][0]-2*!c);
}
}
for (y=row; y < row+4; y++)
for (x=0; x < width; x++)
if ((x+y) & 1) {
r = x ? x-1 : x+1;
s = x+1 < width ? x+1 : x-1;
val = (RAW(y,x)-2048)*2 + (RAW(y,r)+RAW(y,s))/2;
if (val < 0) val = 0;
RAW(y,x) = val;
}
}
for (i=0; i < height*width; i++)
raw_image[i] = curve[raw_image[i]];
maximum = 0x3fff;
}
#undef FORYX
#undef PREDICTOR
#ifdef NO_JPEG
void CLASS kodak_jpeg_load_raw() {}
void CLASS lossy_dng_load_raw() {}
#else
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS kodak_jpeg_load_raw() {}
#else
METHODDEF(boolean)
fill_input_buffer (j_decompress_ptr cinfo)
{
#ifndef LIBRAW_NOTHREADS
#define jpeg_buffer tls->jpeg_buffer
#else
static uchar jpeg_buffer[4096];
#endif
size_t nbytes;
nbytes = fread (jpeg_buffer, 1, 4096, ifp);
swab (jpeg_buffer, jpeg_buffer, nbytes);
cinfo->src->next_input_byte = jpeg_buffer;
cinfo->src->bytes_in_buffer = nbytes;
return TRUE;
#ifndef LIBRAW_NOTHREADS
#undef jpeg_buffer
#endif
}
void CLASS kodak_jpeg_load_raw()
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE (*pixel)[3];
int row, col;
cinfo.err = jpeg_std_error (&jerr);
jpeg_create_decompress (&cinfo);
jpeg_stdio_src (&cinfo, ifp);
cinfo.src->fill_input_buffer = fill_input_buffer;
jpeg_read_header (&cinfo, TRUE);
jpeg_start_decompress (&cinfo);
if ((cinfo.output_width != width ) ||
(cinfo.output_height*2 != height ) ||
(cinfo.output_components != 3 )) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: incorrect JPEG dimensions\n"), ifname);
#endif
jpeg_destroy_decompress (&cinfo);
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_DECODE_JPEG;
#else
longjmp (failure, 3);
#endif
}
buf = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, width*3, 1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
while (cinfo.output_scanline < cinfo.output_height) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
row = cinfo.output_scanline * 2;
jpeg_read_scanlines (&cinfo, buf, 1);
pixel = (JSAMPLE (*)[3]) buf[0];
for (col=0; col < width; col+=2) {
RAW(row+0,col+0) = pixel[col+0][1] << 1;
RAW(row+1,col+1) = pixel[col+1][1] << 1;
RAW(row+0,col+1) = pixel[col][0] + pixel[col+1][0];
RAW(row+1,col+0) = pixel[col][2] + pixel[col+1][2];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
jpeg_finish_decompress (&cinfo);
jpeg_destroy_decompress (&cinfo);
throw;
}
#endif
jpeg_finish_decompress (&cinfo);
jpeg_destroy_decompress (&cinfo);
maximum = 0xff << 1;
}
#endif
void CLASS lossy_dng_load_raw()
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE (*pixel)[3];
unsigned sorder=order, ntags, opcode, deg, i, j, c;
unsigned save=data_offset-4, trow=0, tcol=0, row, col;
ushort t_curve[3][256];
double coeff[9], tot;
fseek (ifp, meta_offset, SEEK_SET);
order = 0x4d4d;
ntags = get4();
while (ntags--) {
opcode = get4(); get4(); get4();
if (opcode != 8)
{ fseek (ifp, get4(), SEEK_CUR); continue; }
fseek (ifp, 20, SEEK_CUR);
if ((c = get4()) > 2) break;
fseek (ifp, 12, SEEK_CUR);
if ((deg = get4()) > 8) break;
for (i=0; i <= deg && i < 9; i++)
coeff[i] = getreal(12);
for (i=0; i < 256; i++) {
for (tot=j=0; j <= deg; j++)
tot += coeff[j] * pow(i/255.0, (int)j);
t_curve[c][i] = tot*0xffff;
}
}
order = sorder;
cinfo.err = jpeg_std_error (&jerr);
jpeg_create_decompress (&cinfo);
while (trow < raw_height) {
fseek (ifp, save+=4, SEEK_SET);
if (tile_length < INT_MAX)
fseek (ifp, get4(), SEEK_SET);
#ifdef LIBRAW_LIBRARY_BUILD
if(libraw_internal_data.internal_data.input->jpeg_src(&cinfo) == -1)
{
jpeg_destroy_decompress(&cinfo);
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
#else
jpeg_stdio_src (&cinfo, ifp);
#endif
jpeg_read_header (&cinfo, TRUE);
jpeg_start_decompress (&cinfo);
buf = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width*3, 1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
while (cinfo.output_scanline < cinfo.output_height &&
(row = trow + cinfo.output_scanline) < height) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
jpeg_read_scanlines (&cinfo, buf, 1);
pixel = (JSAMPLE (*)[3]) buf[0];
for (col=0; col < cinfo.output_width && tcol+col < width; col++) {
FORC3 image[row*width+tcol+col][c] = t_curve[c][pixel[col][c]];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
jpeg_destroy_decompress (&cinfo);
throw;
}
#endif
jpeg_abort_decompress (&cinfo);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
}
jpeg_destroy_decompress (&cinfo);
maximum = 0xffff;
}
#endif
void CLASS kodak_dc120_load_raw()
{
static const int mul[4] = { 162, 192, 187, 92 };
static const int add[4] = { 0, 636, 424, 212 };
uchar pixel[848];
int row, shift, col;
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, 848, ifp) < 848) derror();
shift = row * mul[row & 3] + add[row & 3];
for (col=0; col < width; col++)
RAW(row,col) = (ushort) pixel[(col + shift) % 848];
}
maximum = 0xff;
}
void CLASS eight_bit_load_raw()
{
uchar *pixel;
unsigned row, col;
pixel = (uchar *) calloc (raw_width, sizeof *pixel);
merror (pixel, "eight_bit_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, raw_width, ifp) < raw_width) derror();
for (col=0; col < raw_width; col++)
RAW(row,col) = curve[pixel[col]];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_yrgb_load_raw()
{
uchar *pixel;
int row, col, y, cb, cr, rgb[3], c;
pixel = (uchar *) calloc (raw_width, 3*sizeof *pixel);
merror (pixel, "kodak_yrgb_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (~row & 1)
if (fread (pixel, raw_width, 3, ifp) < 3) derror();
for (col=0; col < raw_width; col++) {
y = pixel[width*2*(row & 1) + col];
cb = pixel[width + (col & -2)] - 128;
cr = pixel[width + (col & -2)+1] - 128;
rgb[1] = y-((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,255)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_262_load_raw()
{
static const uchar kodak_tree[2][26] =
{ { 0,1,5,1,1,2,0,0,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 },
{ 0,3,1,1,1,1,1,2,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 } };
ushort *huff[2];
uchar *pixel;
int *strip, ns, c, row, col, chess, pi=0, pi1, pi2, pred, val;
FORC(2) huff[c] = make_decoder (kodak_tree[c]);
ns = (raw_height+63) >> 5;
pixel = (uchar *) malloc (raw_width*32 + ns*4);
merror (pixel, "kodak_262_load_raw()");
strip = (int *) (pixel + raw_width*32);
order = 0x4d4d;
FORC(ns) strip[c] = get4();
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if ((row & 31) == 0) {
fseek (ifp, strip[row >> 5], SEEK_SET);
getbits(-1);
pi = 0;
}
for (col=0; col < raw_width; col++) {
chess = (row + col) & 1;
pi1 = chess ? pi-2 : pi-raw_width-1;
pi2 = chess ? pi-2*raw_width : pi-raw_width+1;
if (col <= chess) pi1 = -1;
if (pi1 < 0) pi1 = pi2;
if (pi2 < 0) pi2 = pi1;
if (pi1 < 0 && col > 1) pi1 = pi2 = pi-2;
pred = (pi1 < 0) ? 0 : (pixel[pi1] + pixel[pi2]) >> 1;
pixel[pi] = val = pred + ljpeg_diff (huff[chess]);
if (val >> 8) derror();
val = curve[pixel[pi++]];
RAW(row,col) = val;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
FORC(2) free (huff[c]);
}
int CLASS kodak_65000_decode (short *out, int bsize)
{
uchar c, blen[768];
ushort raw[6];
INT64 bitbuf=0;
int save, bits=0, i, j, len, diff;
save = ftell(ifp);
bsize = (bsize + 3) & -4;
for (i=0; i < bsize; i+=2) {
c = fgetc(ifp);
if ((blen[i ] = c & 15) > 12 ||
(blen[i+1] = c >> 4) > 12 ) {
fseek (ifp, save, SEEK_SET);
for (i=0; i < bsize; i+=8) {
read_shorts (raw, 6);
out[i ] = raw[0] >> 12 << 8 | raw[2] >> 12 << 4 | raw[4] >> 12;
out[i+1] = raw[1] >> 12 << 8 | raw[3] >> 12 << 4 | raw[5] >> 12;
for (j=0; j < 6; j++)
out[i+2+j] = raw[j] & 0xfff;
}
return 1;
}
}
if ((bsize & 7) == 4) {
bitbuf = fgetc(ifp) << 8;
bitbuf += fgetc(ifp);
bits = 16;
}
for (i=0; i < bsize; i++) {
len = blen[i];
if (bits < len) {
for (j=0; j < 32; j+=8)
bitbuf += (INT64) fgetc(ifp) << (bits+(j^8));
bits += 32;
}
diff = bitbuf & (0xffff >> (16-len));
bitbuf >>= len;
bits -= len;
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
out[i] = diff;
}
return 0;
}
void CLASS kodak_65000_load_raw()
{
short buf[256];
int row, col, len, pred[2], ret, i;
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=256) {
pred[0] = pred[1] = 0;
len = MIN (256, width-col);
ret = kodak_65000_decode (buf, len);
for (i=0; i < len; i++)
if ((RAW(row,col+i) = curve[ret ? buf[i] :
(pred[i & 1] += buf[i])]) >> 12) derror();
}
}
}
void CLASS kodak_ycbcr_load_raw()
{
short buf[384], *bp;
int row, col, len, c, i, j, k, y[2][2], cb, cr, rgb[3];
ushort *ip;
if (!image) return;
for (row=0; row < height; row+=2)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=128) {
len = MIN (128, width-col);
kodak_65000_decode (buf, len*3);
y[0][1] = y[1][1] = cb = cr = 0;
for (bp=buf, i=0; i < len; i+=2, bp+=2) {
cb += bp[4];
cr += bp[5];
rgb[1] = -((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
for (j=0; j < 2; j++)
for (k=0; k < 2; k++) {
if ((y[j][k] = y[j][k^1] + *bp++) >> 10) derror();
ip = image[(row+j)*width + col+i+k];
FORC3 ip[c] = curve[LIM(y[j][k]+rgb[c], 0, 0xfff)];
}
}
}
}
}
void CLASS kodak_rgb_load_raw()
{
short buf[768], *bp;
int row, col, len, c, i, rgb[3];
ushort *ip=image[0];
#ifndef LIBRAW_LIBRARY_BUILD
if (raw_image) free (raw_image);
raw_image = 0;
#endif
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=256) {
len = MIN (256, width-col);
kodak_65000_decode (buf, len*3);
memset (rgb, 0, sizeof rgb);
for (bp=buf, i=0; i < len; i++, ip+=4)
FORC3 if ((ip[c] = rgb[c] += *bp++) >> 12) derror();
}
}
}
void CLASS kodak_thumb_load_raw()
{
int row, col;
colors = thumb_misc >> 5;
for (row=0; row < height; row++)
for (col=0; col < width; col++)
read_shorts (image[row*width+col], colors);
maximum = (1 << (thumb_misc & 31)) - 1;
}
void CLASS sony_decrypt (unsigned *data, int len, int start, int key)
{
#ifndef LIBRAW_NOTHREADS
#define pad tls->sony_decrypt.pad
#define p tls->sony_decrypt.p
#else
static unsigned pad[128], p;
#endif
if (start) {
for (p=0; p < 4; p++)
pad[p] = key = key * 48828125 + 1;
pad[3] = pad[3] << 1 | (pad[0]^pad[2]) >> 31;
for (p=4; p < 127; p++)
pad[p] = (pad[p-4]^pad[p-2]) << 1 | (pad[p-3]^pad[p-1]) >> 31;
for (p=0; p < 127; p++)
pad[p] = htonl(pad[p]);
}
#if 1 // Avoid gcc 4.8 bug
while (len--)
{
*data++ ^= pad[p & 127] = pad[(p+1) & 127] ^ pad[(p+65) & 127];
p++;
}
#else
while (len--)
*data++ ^= pad[p++ & 127] = pad[(p+1) & 127] ^ pad[(p+65) & 127];
#endif
#ifndef LIBRAW_NOTHREADS
#undef pad
#undef p
#endif
}
void CLASS sony_load_raw()
{
uchar head[40];
ushort *pixel;
unsigned i, key, row, col;
fseek (ifp, 200896, SEEK_SET);
fseek (ifp, (unsigned) fgetc(ifp)*4 - 1, SEEK_CUR);
order = 0x4d4d;
key = get4();
fseek (ifp, 164600, SEEK_SET);
fread (head, 1, 40, ifp);
sony_decrypt ((unsigned int *) head, 10, 1, key);
for (i=26; i-- > 22; )
key = key << 8 | head[i];
fseek (ifp, data_offset, SEEK_SET);
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pixel = raw_image + row*raw_width;
if (fread (pixel, 2, raw_width, ifp) < raw_width) derror();
sony_decrypt ((unsigned int *) pixel, raw_width/2, !row, key);
for (col=0; col < raw_width; col++)
if ((pixel[col] = ntohs(pixel[col])) >> 14) derror();
}
maximum = 0x3ff0;
}
void CLASS sony_arw_load_raw()
{
ushort huff[32768];
static const ushort tab[18] =
{ 0xf11,0xf10,0xe0f,0xd0e,0xc0d,0xb0c,0xa0b,0x90a,0x809,
0x708,0x607,0x506,0x405,0x304,0x303,0x300,0x202,0x201 };
int i, c, n, col, row, len, diff, sum=0;
for (n=i=0; i < 18; i++)
FORC(32768 >> (tab[i] >> 8)) huff[n++] = tab[i];
getbits(-1);
for (col = raw_width; col--; )
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (row=0; row < raw_height+1; row+=2) {
if (row == raw_height) row = 1;
len = getbithuff(15,huff);
diff = getbits(len);
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
if ((sum += diff) >> 12) derror();
if (row < height) RAW(row,col) = sum;
}
}
}
void CLASS sony_arw2_load_raw()
{
uchar *data, *dp;
ushort pix[16];
int row, col, val, max, min, imax, imin, sh, bit, i;
data = (uchar *) malloc (raw_width);
merror (data, "sony_arw2_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fread (data, 1, raw_width, ifp);
for (dp=data, col=0; col < raw_width-30; dp+=16) {
max = 0x7ff & (val = sget4(dp));
min = 0x7ff & val >> 11;
imax = 0x0f & val >> 22;
imin = 0x0f & val >> 26;
for (sh=0; sh < 4 && 0x80 << sh <= max-min; sh++);
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = max;
else if (i == imin) pix[i] = min;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.sony_arw2_hack)
{
for (i=0; i < 16; i++, col+=2)
RAW(row,col) = curve[pix[i] << 1];
}
else
{
for (i=0; i < 16; i++, col+=2)
RAW(row,col) = curve[pix[i] << 1] >> 2;
}
#else
for (i=0; i < 16; i++, col+=2)
RAW(row,col) = curve[pix[i] << 1] >> 2;
#endif
col -= col & 1 ? 1:31;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (data);
throw;
}
#endif
free (data);
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.sony_arw2_hack)
{
black <<= 2;
maximum <<=2;
}
#endif
}
void CLASS samsung_load_raw()
{
int row, col, c, i, dir, op[4], len[4];
order = 0x4949;
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, strip_offset+row*4, SEEK_SET);
fseek (ifp, data_offset+get4(), SEEK_SET);
ph1_bits(-1);
FORC4 len[c] = row < 2 ? 7:4;
for (col=0; col < raw_width; col+=16) {
dir = ph1_bits(1);
FORC4 op[c] = ph1_bits(2);
FORC4 switch (op[c]) {
case 3: len[c] = ph1_bits(4); break;
case 2: len[c]--; break;
case 1: len[c]++;
}
for (c=0; c < 16; c+=2) {
i = len[((c & 1) << 1) | (c >> 3)];
RAW(row,col+c) = ((signed) ph1_bits(i) << (32-i) >> (32-i)) +
(dir ? RAW(row+(~c | -2),col+c) : col ? RAW(row,col+(c | -2)) : 128);
if (c == 14) c = -1;
}
}
}
}
#define HOLE(row) ((holes >> (((row) - raw_height) & 7)) & 1)
/* Kudos to Rich Taylor for figuring out SMaL's compression algorithm. */
void CLASS smal_decode_segment (unsigned seg[2][2], int holes)
{
uchar hist[3][13] = {
{ 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 },
{ 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 },
{ 3, 3, 0, 0, 63, 47, 31, 15, 0 } };
int low, high=0xff, carry=0, nbits=8;
int pix, s, count, bin, next, i, sym[3];
uchar diff, pred[]={0,0};
ushort data=0, range=0;
fseek (ifp, seg[0][1]+1, SEEK_SET);
getbits(-1);
for (pix=seg[0][0]; pix < seg[1][0]; pix++) {
for (s=0; s < 3; s++) {
data = data << nbits | getbits(nbits);
if (carry < 0)
carry = (nbits += carry+1) < 1 ? nbits-1 : 0;
while (--nbits >= 0)
if ((data >> nbits & 0xff) == 0xff) break;
if (nbits > 0)
data = ((data & ((1 << (nbits-1)) - 1)) << 1) |
((data + (((data & (1 << (nbits-1)))) << 1)) & ((~0u) << nbits));
if (nbits >= 0) {
data += getbits(1);
carry = nbits - 8;
}
count = ((((data-range+1) & 0xffff) << 2) - 1) / (high >> 4);
for (bin=0; hist[s][bin+5] > count; bin++);
low = hist[s][bin+5] * (high >> 4) >> 2;
if (bin) high = hist[s][bin+4] * (high >> 4) >> 2;
high -= low;
for (nbits=0; high << nbits < 128; nbits++);
range = (range+low) << nbits;
high <<= nbits;
next = hist[s][1];
if (++hist[s][2] > hist[s][3]) {
next = (next+1) & hist[s][0];
hist[s][3] = (hist[s][next+4] - hist[s][next+5]) >> 2;
hist[s][2] = 1;
}
if (hist[s][hist[s][1]+4] - hist[s][hist[s][1]+5] > 1) {
if (bin < hist[s][1])
for (i=bin; i < hist[s][1]; i++) hist[s][i+5]--;
else if (next <= bin)
for (i=hist[s][1]; i < bin; i++) hist[s][i+5]++;
}
hist[s][1] = next;
sym[s] = bin;
}
diff = sym[2] << 5 | sym[1] << 2 | (sym[0] & 3);
if (sym[0] & 4)
diff = diff ? -diff : 0x80;
if (ftell(ifp) + 12 >= seg[1][1])
diff = 0;
raw_image[pix] = pred[pix & 1] += diff;
if (!(pix & 1) && HOLE(pix / raw_width)) pix += 2;
}
maximum = 0xff;
}
void CLASS smal_v6_load_raw()
{
unsigned seg[2][2];
fseek (ifp, 16, SEEK_SET);
seg[0][0] = 0;
seg[0][1] = get2();
seg[1][0] = raw_width * raw_height;
seg[1][1] = INT_MAX;
smal_decode_segment (seg, 0);
}
int CLASS median4 (int *p)
{
int min, max, sum, i;
min = max = sum = p[0];
for (i=1; i < 4; i++) {
sum += p[i];
if (min > p[i]) min = p[i];
if (max < p[i]) max = p[i];
}
return (sum - min - max) >> 1;
}
void CLASS fill_holes (int holes)
{
int row, col, val[4];
for (row=2; row < height-2; row++) {
if (!HOLE(row)) continue;
for (col=1; col < width-1; col+=4) {
val[0] = RAW(row-1,col-1);
val[1] = RAW(row-1,col+1);
val[2] = RAW(row+1,col-1);
val[3] = RAW(row+1,col+1);
RAW(row,col) = median4(val);
}
for (col=2; col < width-2; col+=4)
if (HOLE(row-2) || HOLE(row+2))
RAW(row,col) = (RAW(row,col-2) + RAW(row,col+2)) >> 1;
else {
val[0] = RAW(row,col-2);
val[1] = RAW(row,col+2);
val[2] = RAW(row-2,col);
val[3] = RAW(row+2,col);
RAW(row,col) = median4(val);
}
}
}
void CLASS smal_v9_load_raw()
{
unsigned seg[256][2], offset, nseg, holes, i;
fseek (ifp, 67, SEEK_SET);
offset = get4();
nseg = fgetc(ifp);
fseek (ifp, offset, SEEK_SET);
for (i=0; i < nseg*2; i++)
seg[0][i] = get4() + data_offset*(i & 1);
fseek (ifp, 78, SEEK_SET);
holes = fgetc(ifp);
fseek (ifp, 88, SEEK_SET);
seg[nseg][0] = raw_height * raw_width;
seg[nseg][1] = get4() + data_offset;
for (i=0; i < nseg; i++)
smal_decode_segment (seg+i, holes);
if (holes) fill_holes (holes);
}
void CLASS redcine_load_raw()
{
#ifndef NO_JASPER
int c, row, col;
jas_stream_t *in;
jas_image_t *jimg;
jas_matrix_t *jmat;
jas_seqent_t *data;
ushort *img, *pix;
jas_init();
#ifndef LIBRAW_LIBRARY_BUILD
in = jas_stream_fopen (ifname, "rb");
#else
in = (jas_stream_t*)ifp->make_jas_stream();
if(!in)
throw LIBRAW_EXCEPTION_DECODE_JPEG2000;
#endif
jas_stream_seek (in, data_offset+20, SEEK_SET);
jimg = jas_image_decode (in, -1, 0);
#ifndef LIBRAW_LIBRARY_BUILD
if (!jimg) longjmp (failure, 3);
#else
if(!jimg)
{
jas_stream_close (in);
throw LIBRAW_EXCEPTION_DECODE_JPEG2000;
}
#endif
jmat = jas_matrix_create (height/2, width/2);
merror (jmat, "redcine_load_raw()");
img = (ushort *) calloc ((height+2), (width+2)*2);
merror (img, "redcine_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
bool fastexitflag = false;
try {
#endif
FORC4 {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
jas_image_readcmpt (jimg, c, 0, 0, width/2, height/2, jmat);
data = jas_matrix_getref (jmat, 0, 0);
for (row = c >> 1; row < height; row+=2)
for (col = c & 1; col < width; col+=2)
img[(row+1)*(width+2)+col+1] = data[(row/2)*(width/2)+col/2];
}
for (col=1; col <= width; col++) {
img[col] = img[2*(width+2)+col];
img[(height+1)*(width+2)+col] = img[(height-1)*(width+2)+col];
}
for (row=0; row < height+2; row++) {
img[row*(width+2)] = img[row*(width+2)+2];
img[(row+1)*(width+2)-1] = img[(row+1)*(width+2)-3];
}
for (row=1; row <= height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pix = img + row*(width+2) + (col = 1 + (FC(row,1) & 1));
for ( ; col <= width; col+=2, pix+=2) {
c = (((pix[0] - 0x800) << 3) +
pix[-(width+2)] + pix[width+2] + pix[-1] + pix[1]) >> 2;
pix[0] = LIM(c,0,4095);
}
}
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++)
RAW(row,col) = curve[img[(row+1)*(width+2)+col+1]];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
fastexitflag=true;
}
#endif
free (img);
jas_matrix_destroy (jmat);
jas_image_destroy (jimg);
jas_stream_close (in);
#ifdef LIBRAW_LIBRARY_BUILD
if(fastexitflag)
throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK;
#endif
#endif
}
void CLASS crop_masked_pixels()
{
int row, col;
unsigned
#ifndef LIBRAW_LIBRARY_BUILD
r, raw_pitch = raw_width*2,
c, m, mblack[8], zero, val;
#else
c, m, zero, val;
#define mblack imgdata.color.black_stat
#endif
#ifndef LIBRAW_LIBRARY_BUILD
if (load_raw == &CLASS phase_one_load_raw ||
load_raw == &CLASS phase_one_load_raw_c)
phase_one_correct();
if (fuji_width) {
for (row=0; row < raw_height-top_margin*2; row++) {
for (col=0; col < fuji_width << !fuji_layout; col++) {
if (fuji_layout) {
r = fuji_width - 1 - col + (row >> 1);
c = col + ((row+1) >> 1);
} else {
r = fuji_width - 1 + row - (col >> 1);
c = row + ((col+1) >> 1);
}
if (r < height && c < width)
BAYER(r,c) = RAW(row+top_margin,col+left_margin);
}
}
} else {
for (row=0; row < height; row++)
for (col=0; col < width; col++)
BAYER2(row,col) = RAW(row+top_margin,col+left_margin);
}
#endif
if (mask[0][3]) goto mask_set;
if (load_raw == &CLASS canon_load_raw ||
load_raw == &CLASS lossless_jpeg_load_raw) {
mask[0][1] = mask[1][1] = 2;
mask[0][3] = -2;
goto sides;
}
if (load_raw == &CLASS canon_600_load_raw ||
load_raw == &CLASS sony_load_raw ||
(load_raw == &CLASS eight_bit_load_raw && strncmp(model,"DC2",3)) ||
load_raw == &CLASS kodak_262_load_raw ||
(load_raw == &CLASS packed_load_raw && (load_flags & 32))) {
sides:
mask[0][0] = mask[1][0] = top_margin;
mask[0][2] = mask[1][2] = top_margin+height;
mask[0][3] += left_margin;
mask[1][1] += left_margin+width;
mask[1][3] += raw_width;
}
if (load_raw == &CLASS nokia_load_raw) {
mask[0][2] = top_margin;
mask[0][3] = width;
}
mask_set:
memset (mblack, 0, sizeof mblack);
for (zero=m=0; m < 8; m++)
for (row=MAX(mask[m][0],0); row < MIN(mask[m][2],raw_height); row++)
for (col=MAX(mask[m][1],0); col < MIN(mask[m][3],raw_width); col++) {
c = FC(row-top_margin,col-left_margin);
mblack[c] += val = raw_image[(row)*raw_pitch/2+(col)];
mblack[4+c]++;
zero += !val;
}
if (load_raw == &CLASS canon_600_load_raw && width < raw_width) {
black = (mblack[0]+mblack[1]+mblack[2]+mblack[3]) /
(mblack[4]+mblack[5]+mblack[6]+mblack[7]) - 4;
#ifndef LIBRAW_LIBRARY_BUILD
canon_600_correct();
#endif
} else if (zero < mblack[4] && mblack[5] && mblack[6] && mblack[7])
FORC4 cblack[c] = mblack[c] / mblack[4+c];
}
#ifdef LIBRAW_LIBRARY_BUILD
#undef mblack
#endif
void CLASS remove_zeroes()
{
unsigned row, col, tot, n, r, c;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,0,2);
#endif
for (row=0; row < height; row++)
for (col=0; col < width; col++)
if (BAYER(row,col) == 0) {
tot = n = 0;
for (r = row-2; r <= row+2; r++)
for (c = col-2; c <= col+2; c++)
if (r < height && c < width &&
FC(r,c) == FC(row,col) && BAYER(r,c))
tot += (n++,BAYER(r,c));
if (n) BAYER(row,col) = tot/n;
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,1,2);
#endif
}
void CLASS gamma_curve (double pwr, double ts, int mode, int imax)
{
int i;
double g[6], bnd[2]={0,0}, r;
g[0] = pwr;
g[1] = ts;
g[2] = g[3] = g[4] = 0;
bnd[g[1] >= 1] = 1;
if (g[1] && (g[1]-1)*(g[0]-1) <= 0) {
for (i=0; i < 48; i++) {
g[2] = (bnd[0] + bnd[1])/2;
if (g[0]) bnd[(pow(g[2]/g[1],-g[0]) - 1)/g[0] - 1/g[2] > -1] = g[2];
else bnd[g[2]/exp(1-1/g[2]) < g[1]] = g[2];
}
g[3] = g[2] / g[1];
if (g[0]) g[4] = g[2] * (1/g[0] - 1);
}
if (g[0]) g[5] = 1 / (g[1]*SQR(g[3])/2 - g[4]*(1 - g[3]) +
(1 - pow(g[3],1+g[0]))*(1 + g[4])/(1 + g[0])) - 1;
else g[5] = 1 / (g[1]*SQR(g[3])/2 + 1
- g[2] - g[3] - g[2]*g[3]*(log(g[3]) - 1)) - 1;
if (!mode--) {
memcpy (gamm, g, sizeof gamm);
return;
}
for (i=0; i < 0x10000; i++) {
curve[i] = 0xffff;
if ((r = (double) i / imax) < 1)
curve[i] = 0x10000 * ( mode
? (r < g[3] ? r*g[1] : (g[0] ? pow( r,g[0])*(1+g[4])-g[4] : log(r)*g[2]+1))
: (r < g[2] ? r/g[1] : (g[0] ? pow((r+g[4])/(1+g[4]),1/g[0]) : exp((r-1)/g[2]))));
}
}
void CLASS pseudoinverse (double (*in)[3], double (*out)[3], int size)
{
double work[3][6], num;
int i, j, k;
for (i=0; i < 3; i++) {
for (j=0; j < 6; j++)
work[i][j] = j == i+3;
for (j=0; j < 3; j++)
for (k=0; k < size; k++)
work[i][j] += in[k][i] * in[k][j];
}
for (i=0; i < 3; i++) {
num = work[i][i];
for (j=0; j < 6; j++)
work[i][j] /= num;
for (k=0; k < 3; k++) {
if (k==i) continue;
num = work[k][i];
for (j=0; j < 6; j++)
work[k][j] -= work[i][j] * num;
}
}
for (i=0; i < size; i++)
for (j=0; j < 3; j++)
for (out[i][j]=k=0; k < 3; k++)
out[i][j] += work[j][k+3] * in[i][k];
}
void CLASS cam_xyz_coeff (double cam_xyz[4][3])
{
double cam_rgb[4][3], inverse[4][3], num;
int i, j, k;
for (i=0; i < colors; i++) /* Multiply out XYZ colorspace */
for (j=0; j < 3; j++)
for (cam_rgb[i][j] = k=0; k < 3; k++)
cam_rgb[i][j] += cam_xyz[i][k] * xyz_rgb[k][j];
for (i=0; i < colors; i++) { /* Normalize cam_rgb so that */
for (num=j=0; j < 3; j++) /* cam_rgb * (1,1,1) is (1,1,1,1) */
num += cam_rgb[i][j];
if(num > 0.00001)
{
for (j=0; j < 3; j++)
cam_rgb[i][j] /= num;
pre_mul[i] = 1 / num;
}
else
{
for (j=0; j < 3; j++)
cam_rgb[i][j] = 0.0;
pre_mul[i] = 1.0;
}
}
pseudoinverse (cam_rgb, inverse, colors);
for (raw_color = i=0; i < 3; i++)
for (j=0; j < colors; j++)
rgb_cam[i][j] = inverse[j][i];
}
#ifdef COLORCHECK
void CLASS colorcheck()
{
#define NSQ 24
// Coordinates of the GretagMacbeth ColorChecker squares
// width, height, 1st_column, 1st_row
int cut[NSQ][4]; // you must set these
// ColorChecker Chart under 6500-kelvin illumination
static const double gmb_xyY[NSQ][3] = {
{ 0.400, 0.350, 10.1 }, // Dark Skin
{ 0.377, 0.345, 35.8 }, // Light Skin
{ 0.247, 0.251, 19.3 }, // Blue Sky
{ 0.337, 0.422, 13.3 }, // Foliage
{ 0.265, 0.240, 24.3 }, // Blue Flower
{ 0.261, 0.343, 43.1 }, // Bluish Green
{ 0.506, 0.407, 30.1 }, // Orange
{ 0.211, 0.175, 12.0 }, // Purplish Blue
{ 0.453, 0.306, 19.8 }, // Moderate Red
{ 0.285, 0.202, 6.6 }, // Purple
{ 0.380, 0.489, 44.3 }, // Yellow Green
{ 0.473, 0.438, 43.1 }, // Orange Yellow
{ 0.187, 0.129, 6.1 }, // Blue
{ 0.305, 0.478, 23.4 }, // Green
{ 0.539, 0.313, 12.0 }, // Red
{ 0.448, 0.470, 59.1 }, // Yellow
{ 0.364, 0.233, 19.8 }, // Magenta
{ 0.196, 0.252, 19.8 }, // Cyan
{ 0.310, 0.316, 90.0 }, // White
{ 0.310, 0.316, 59.1 }, // Neutral 8
{ 0.310, 0.316, 36.2 }, // Neutral 6.5
{ 0.310, 0.316, 19.8 }, // Neutral 5
{ 0.310, 0.316, 9.0 }, // Neutral 3.5
{ 0.310, 0.316, 3.1 } }; // Black
double gmb_cam[NSQ][4], gmb_xyz[NSQ][3];
double inverse[NSQ][3], cam_xyz[4][3], num;
int c, i, j, k, sq, row, col, count[4];
memset (gmb_cam, 0, sizeof gmb_cam);
for (sq=0; sq < NSQ; sq++) {
FORCC count[c] = 0;
for (row=cut[sq][3]; row < cut[sq][3]+cut[sq][1]; row++)
for (col=cut[sq][2]; col < cut[sq][2]+cut[sq][0]; col++) {
c = FC(row,col);
if (c >= colors) c -= 2;
gmb_cam[sq][c] += BAYER(row,col);
count[c]++;
}
FORCC gmb_cam[sq][c] = gmb_cam[sq][c]/count[c] - black;
gmb_xyz[sq][0] = gmb_xyY[sq][2] * gmb_xyY[sq][0] / gmb_xyY[sq][1];
gmb_xyz[sq][1] = gmb_xyY[sq][2];
gmb_xyz[sq][2] = gmb_xyY[sq][2] *
(1 - gmb_xyY[sq][0] - gmb_xyY[sq][1]) / gmb_xyY[sq][1];
}
pseudoinverse (gmb_xyz, inverse, NSQ);
for (i=0; i < colors; i++)
for (j=0; j < 3; j++)
for (cam_xyz[i][j] = k=0; k < NSQ; k++)
cam_xyz[i][j] += gmb_cam[k][i] * inverse[k][j];
cam_xyz_coeff (cam_xyz);
if (verbose) {
printf (" { \"%s %s\", %d,\n\t{", make, model, black);
num = 10000 / (cam_xyz[1][0] + cam_xyz[1][1] + cam_xyz[1][2]);
FORCC for (j=0; j < 3; j++)
printf ("%c%d", (c | j) ? ',':' ', (int) (cam_xyz[c][j] * num + 0.5));
puts (" } },");
}
#undef NSQ
}
#endif
void CLASS hat_transform (float *temp, float *base, int st, int size, int sc)
{
int i;
for (i=0; i < sc; i++)
temp[i] = 2*base[st*i] + base[st*(sc-i)] + base[st*(i+sc)];
for (; i+sc < size; i++)
temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(i+sc)];
for (; i < size; i++)
temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(2*size-2-(i+sc))];
}
#if !defined(LIBRAW_USE_OPENMP)
void CLASS wavelet_denoise()
{
float *fimg=0, *temp, thold, mul[2], avg, diff;
int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] =
{ 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Wavelet denoising...\n"));
#endif
while (maximum << scale < 0x10000) scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight*iwidth) < 0x15550000)
fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg);
merror (fimg, "wavelet_denoise()");
temp = fimg + size*3;
if ((nc = colors) == 3 && filters) nc++;
FORC(nc) { /* denoise R,G1,B,G3 individually */
for (i=0; i < size; i++)
fimg[i] = 256 * sqrt((double)(image[i][c] << scale));
for (hpass=lev=0; lev < 5; lev++) {
lpass = size*((lev & 1)+1);
for (row=0; row < iheight; row++) {
hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev);
for (col=0; col < iwidth; col++)
fimg[lpass + row*iwidth + col] = temp[col] * 0.25;
}
for (col=0; col < iwidth; col++) {
hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev);
for (row=0; row < iheight; row++)
fimg[lpass + row*iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
for (i=0; i < size; i++) {
fimg[hpass+i] -= fimg[lpass+i];
if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold;
else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold;
else fimg[hpass+i] = 0;
if (hpass) fimg[i] += fimg[hpass+i];
}
hpass = lpass;
}
for (i=0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000);
}
if (filters && colors == 3) { /* pull G1 and G3 closer together */
for (row=0; row < 2; row++) {
mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1];
blk[row] = cblack[FC(row,0) | 1];
}
for (i=0; i < 4; i++)
window[i] = (ushort *) fimg + width*i;
for (wlast=-1, row=1; row < height-1; row++) {
while (wlast < row+1) {
for (wlast++, i=0; i < 4; i++)
window[(i+3) & 3] = window[i];
for (col = FC(wlast,1) & 1; col < width; col+=2)
window[2][col] = BAYER(wlast,col);
}
thold = threshold/512;
for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) {
avg = ( window[0][col-1] + window[0][col+1] +
window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 )
* mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt((double)BAYER(row,col)) - avg;
if (diff < -thold) diff += thold;
else if (diff > thold) diff -= thold;
else diff = 0;
BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5);
}
}
}
free (fimg);
}
#else /* LIBRAW_USE_OPENMP */
void CLASS wavelet_denoise()
{
float *fimg=0, *temp, thold, mul[2], avg, diff;
int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] =
{ 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Wavelet denoising...\n"));
#endif
while (maximum << scale < 0x10000) scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight*iwidth) < 0x15550000)
fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg);
merror (fimg, "wavelet_denoise()");
temp = fimg + size*3;
if ((nc = colors) == 3 && filters) nc++;
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp parallel default(shared) private(i,col,row,thold,lev,lpass,hpass,temp,c) firstprivate(scale,size)
#endif
{
temp = (float*)malloc( (iheight + iwidth) * sizeof *fimg);
FORC(nc) { /* denoise R,G1,B,G3 individually */
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++)
fimg[i] = 256 * sqrt((double)(image[i][c] << scale));
for (hpass=lev=0; lev < 5; lev++) {
lpass = size*((lev & 1)+1);
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (row=0; row < iheight; row++) {
hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev);
for (col=0; col < iwidth; col++)
fimg[lpass + row*iwidth + col] = temp[col] * 0.25;
}
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (col=0; col < iwidth; col++) {
hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev);
for (row=0; row < iheight; row++)
fimg[lpass + row*iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++) {
fimg[hpass+i] -= fimg[lpass+i];
if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold;
else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold;
else fimg[hpass+i] = 0;
if (hpass) fimg[i] += fimg[hpass+i];
}
hpass = lpass;
}
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000);
}
free(temp);
} /* end omp parallel */
/* the following loops are hard to parallize, no idea yes,
* problem is wlast which is carrying dependency
* second part should be easyer, but did not yet get it right.
*/
if (filters && colors == 3) { /* pull G1 and G3 closer together */
for (row=0; row < 2; row++){
mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1];
blk[row] = cblack[FC(row,0) | 1];
}
for (i=0; i < 4; i++)
window[i] = (ushort *) fimg + width*i;
for (wlast=-1, row=1; row < height-1; row++) {
while (wlast < row+1) {
for (wlast++, i=0; i < 4; i++)
window[(i+3) & 3] = window[i];
for (col = FC(wlast,1) & 1; col < width; col+=2)
window[2][col] = BAYER(wlast,col);
}
thold = threshold/512;
for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) {
avg = ( window[0][col-1] + window[0][col+1] +
window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 )
* mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt((double)BAYER(row,col)) - avg;
if (diff < -thold) diff += thold;
else if (diff > thold) diff -= thold;
else diff = 0;
BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5);
}
}
}
free (fimg);
}
#endif
// green equilibration
void CLASS green_matching()
{
int i,j;
double m1,m2,c1,c2;
int o1_1,o1_2,o1_3,o1_4;
int o2_1,o2_2,o2_3,o2_4;
ushort (*img)[4];
const int margin = 3;
int oj = 2, oi = 2;
float f;
const float thr = 0.01f;
if(half_size || shrink) return;
if(FC(oj, oi) != 3) oj++;
if(FC(oj, oi) != 3) oi++;
if(FC(oj, oi) != 3) oj--;
img = (ushort (*)[4]) calloc (height*width, sizeof *image);
merror (img, "green_matching()");
memcpy(img,image,height*width*sizeof *image);
for(j=oj;j<height-margin;j+=2)
for(i=oi;i<width-margin;i+=2){
o1_1=img[(j-1)*width+i-1][1];
o1_2=img[(j-1)*width+i+1][1];
o1_3=img[(j+1)*width+i-1][1];
o1_4=img[(j+1)*width+i+1][1];
o2_1=img[(j-2)*width+i][3];
o2_2=img[(j+2)*width+i][3];
o2_3=img[j*width+i-2][3];
o2_4=img[j*width+i+2][3];
m1=(o1_1+o1_2+o1_3+o1_4)/4.0;
m2=(o2_1+o2_2+o2_3+o2_4)/4.0;
c1=(abs(o1_1-o1_2)+abs(o1_1-o1_3)+abs(o1_1-o1_4)+abs(o1_2-o1_3)+abs(o1_3-o1_4)+abs(o1_2-o1_4))/6.0;
c2=(abs(o2_1-o2_2)+abs(o2_1-o2_3)+abs(o2_1-o2_4)+abs(o2_2-o2_3)+abs(o2_3-o2_4)+abs(o2_2-o2_4))/6.0;
if((img[j*width+i][3]<maximum*0.95)&&(c1<maximum*thr)&&(c2<maximum*thr))
{
f = image[j*width+i][3]*m1/m2;
image[j*width+i][3]=f>0xffff?0xffff:f;
}
}
free(img);
}
void CLASS scale_colors()
{
unsigned bottom, right, size, row, col, ur, uc, i, x, y, c, sum[8];
int val, dark, sat;
double dsum[8], dmin, dmax;
float scale_mul[4], fr, fc;
ushort *img=0, *pix;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,0,2);
#endif
if (user_mul[0])
memcpy (pre_mul, user_mul, sizeof pre_mul);
if (use_auto_wb || (use_camera_wb && cam_mul[0] == -1)) {
memset (dsum, 0, sizeof dsum);
bottom = MIN (greybox[1]+greybox[3], height);
right = MIN (greybox[0]+greybox[2], width);
for (row=greybox[1]; row < bottom; row += 8)
for (col=greybox[0]; col < right; col += 8) {
memset (sum, 0, sizeof sum);
for (y=row; y < row+8 && y < bottom; y++)
for (x=col; x < col+8 && x < right; x++)
FORC4 {
if (filters) {
c = fcol(y,x);
val = BAYER2(y,x);
} else
val = image[y*width+x][c];
if (val > maximum-25) goto skip_block;
if ((val -= cblack[c]) < 0) val = 0;
sum[c] += val;
sum[c+4]++;
if (filters) break;
}
FORC(8) dsum[c] += sum[c];
skip_block: ;
}
FORC4 if (dsum[c]) pre_mul[c] = dsum[c+4] / dsum[c];
}
if (use_camera_wb && cam_mul[0] != -1) {
memset (sum, 0, sizeof sum);
for (row=0; row < 8; row++)
for (col=0; col < 8; col++) {
c = FC(row,col);
if ((val = white[row][col] - cblack[c]) > 0)
sum[c] += val;
sum[c+4]++;
}
if (sum[0] && sum[1] && sum[2] && sum[3])
FORC4 pre_mul[c] = (float) sum[c+4] / sum[c];
else if (cam_mul[0] && cam_mul[2])
memcpy (pre_mul, cam_mul, sizeof pre_mul);
else
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_CAMERA_WB;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: Cannot use camera white balance.\n"), ifname);
#endif
}
}
if (pre_mul[1] == 0) pre_mul[1] = 1;
if (pre_mul[3] == 0) pre_mul[3] = colors < 4 ? pre_mul[1] : 1;
dark = black;
sat = maximum;
if (threshold) wavelet_denoise();
maximum -= black;
for (dmin=DBL_MAX, dmax=c=0; c < 4; c++) {
if (dmin > pre_mul[c])
dmin = pre_mul[c];
if (dmax < pre_mul[c])
dmax = pre_mul[c];
}
if (!highlight) dmax = dmin;
FORC4 scale_mul[c] = (pre_mul[c] /= dmax) * 65535.0 / maximum;
#ifdef DCRAW_VERBOSE
if (verbose) {
fprintf (stderr,
_("Scaling with darkness %d, saturation %d, and\nmultipliers"), dark, sat);
FORC4 fprintf (stderr, " %f", pre_mul[c]);
fputc ('\n', stderr);
}
#endif
size = iheight*iwidth;
#ifdef LIBRAW_LIBRARY_BUILD
scale_colors_loop(scale_mul);
#else
for (i=0; i < size*4; i++) {
val = image[0][i];
if (!val) continue;
val -= cblack[i & 3];
val *= scale_mul[i & 3];
image[0][i] = CLIP(val);
}
#endif
if ((aber[0] != 1 || aber[2] != 1) && colors == 3) {
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Correcting chromatic aberration...\n"));
#endif
for (c=0; c < 4; c+=2) {
if (aber[c] == 1) continue;
img = (ushort *) malloc (size * sizeof *img);
merror (img, "scale_colors()");
for (i=0; i < size; i++)
img[i] = image[i][c];
for (row=0; row < iheight; row++) {
ur = fr = (row - iheight*0.5) * aber[c] + iheight*0.5;
if (ur > iheight-2) continue;
fr -= ur;
for (col=0; col < iwidth; col++) {
uc = fc = (col - iwidth*0.5) * aber[c] + iwidth*0.5;
if (uc > iwidth-2) continue;
fc -= uc;
pix = img + ur*iwidth + uc;
image[row*iwidth+col][c] =
(pix[ 0]*(1-fc) + pix[ 1]*fc) * (1-fr) +
(pix[iwidth]*(1-fc) + pix[iwidth+1]*fc) * fr;
}
}
free(img);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,1,2);
#endif
}
void CLASS pre_interpolate()
{
ushort (*img)[4];
int row, col, c;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,0,2);
#endif
if (shrink) {
if (half_size) {
height = iheight;
width = iwidth;
if (filters == 9) {
for (row=0; row < 3; row++)
for (col=1; col < 4; col++)
if (!(image[row*width+col][0] | image[row*width+col][2]))
goto break2; break2:
for ( ; row < height; row+=3)
for (col=(col-1)%3+1; col < width-1; col+=3) {
img = image + row*width+col;
for (c=0; c < 3; c+=2)
img[0][c] = (img[-1][c] + img[1][c]) >> 1;
}
}
} else {
img = (ushort (*)[4]) calloc (height, width*sizeof *img);
merror (img, "pre_interpolate()");
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
c = fcol(row,col);
img[row*width+col][c] = image[(row >> 1)*iwidth+(col >> 1)][c];
}
free (image);
image = img;
shrink = 0;
}
}
if (filters > 1000 && colors == 3) {
mix_green = four_color_rgb ^ half_size;
if (four_color_rgb | half_size) colors++;
else {
for (row = FC(1,0) >> 1; row < height; row+=2)
for (col = FC(row,1) & 1; col < width; col+=2)
image[row*width+col][1] = image[row*width+col][3];
filters &= ~((filters & 0x55555555) << 1);
}
}
if (half_size) filters = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,1,2);
#endif
}
void CLASS border_interpolate (int border)
{
unsigned row, col, y, x, f, c, sum[8];
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
if (col==border && row >= border && row < height-border)
col = width-border;
memset (sum, 0, sizeof sum);
for (y=row-1; y != row+2; y++)
for (x=col-1; x != col+2; x++)
if (y < height && x < width) {
f = fcol(y,x);
sum[f] += image[y*width+x][f];
sum[f+4]++;
}
f = fcol(row,col);
FORCC if (c != f && sum[c+4])
image[row*width+col][c] = sum[c] / sum[c+4];
}
}
void CLASS lin_interpolate_loop(int code[16][16][32],int size)
{
int row;
for (row=1; row < height-1; row++)
{
int col,*ip;
ushort *pix;
for (col=1; col < width-1; col++) {
int i;
int sum[4];
pix = image[row*width+col];
ip = code[row % size][col % size];
memset (sum, 0, sizeof sum);
for (i=*ip++; i--; ip+=3)
sum[ip[2]] += pix[ip[0]] << ip[1];
for (i=colors; --i; ip+=2)
pix[ip[0]] = sum[ip[0]] * ip[1] >> 8;
}
}
}
void CLASS lin_interpolate()
{
int code[16][16][32], size=16, *ip, sum[4];
int f, c, x, y, row, col, shift, color;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Bilinear interpolation...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3);
#endif
if (filters == 9) size = 6;
border_interpolate(1);
for (row=0; row < size; row++)
for (col=0; col < size; col++) {
ip = code[row][col]+1;
f = fcol(row,col);
memset (sum, 0, sizeof sum);
for (y=-1; y <= 1; y++)
for (x=-1; x <= 1; x++) {
shift = (y==0) + (x==0);
color = fcol(row+y,col+x);
if (color == f) continue;
*ip++ = (width*y + x)*4 + color;
*ip++ = shift;
*ip++ = color;
sum[color] += 1 << shift;
}
code[row][col][0] = (ip - code[row][col]) / 3;
FORCC
if (c != f) {
*ip++ = c;
*ip++ = sum[c]>0?256 / sum[c]:0;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3);
#endif
lin_interpolate_loop(code,size);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3);
#endif
}
/*
This algorithm is officially called:
"Interpolation using a Threshold-based variable number of gradients"
described in http://scien.stanford.edu/pages/labsite/1999/psych221/projects/99/tingchen/algodep/vargra.html
I've extended the basic idea to work with non-Bayer filter arrays.
Gradients are numbered clockwise from NW=0 to W=7.
*/
void CLASS vng_interpolate()
{
static const signed char *cp, terms[] = {
-2,-2,+0,-1,0,0x01, -2,-2,+0,+0,1,0x01, -2,-1,-1,+0,0,0x01,
-2,-1,+0,-1,0,0x02, -2,-1,+0,+0,0,0x03, -2,-1,+0,+1,1,0x01,
-2,+0,+0,-1,0,0x06, -2,+0,+0,+0,1,0x02, -2,+0,+0,+1,0,0x03,
-2,+1,-1,+0,0,0x04, -2,+1,+0,-1,1,0x04, -2,+1,+0,+0,0,0x06,
-2,+1,+0,+1,0,0x02, -2,+2,+0,+0,1,0x04, -2,+2,+0,+1,0,0x04,
-1,-2,-1,+0,0,0x80, -1,-2,+0,-1,0,0x01, -1,-2,+1,-1,0,0x01,
-1,-2,+1,+0,1,0x01, -1,-1,-1,+1,0,0x88, -1,-1,+1,-2,0,0x40,
-1,-1,+1,-1,0,0x22, -1,-1,+1,+0,0,0x33, -1,-1,+1,+1,1,0x11,
-1,+0,-1,+2,0,0x08, -1,+0,+0,-1,0,0x44, -1,+0,+0,+1,0,0x11,
-1,+0,+1,-2,1,0x40, -1,+0,+1,-1,0,0x66, -1,+0,+1,+0,1,0x22,
-1,+0,+1,+1,0,0x33, -1,+0,+1,+2,1,0x10, -1,+1,+1,-1,1,0x44,
-1,+1,+1,+0,0,0x66, -1,+1,+1,+1,0,0x22, -1,+1,+1,+2,0,0x10,
-1,+2,+0,+1,0,0x04, -1,+2,+1,+0,1,0x04, -1,+2,+1,+1,0,0x04,
+0,-2,+0,+0,1,0x80, +0,-1,+0,+1,1,0x88, +0,-1,+1,-2,0,0x40,
+0,-1,+1,+0,0,0x11, +0,-1,+2,-2,0,0x40, +0,-1,+2,-1,0,0x20,
+0,-1,+2,+0,0,0x30, +0,-1,+2,+1,1,0x10, +0,+0,+0,+2,1,0x08,
+0,+0,+2,-2,1,0x40, +0,+0,+2,-1,0,0x60, +0,+0,+2,+0,1,0x20,
+0,+0,+2,+1,0,0x30, +0,+0,+2,+2,1,0x10, +0,+1,+1,+0,0,0x44,
+0,+1,+1,+2,0,0x10, +0,+1,+2,-1,1,0x40, +0,+1,+2,+0,0,0x60,
+0,+1,+2,+1,0,0x20, +0,+1,+2,+2,0,0x10, +1,-2,+1,+0,0,0x80,
+1,-1,+1,+1,0,0x88, +1,+0,+1,+2,0,0x08, +1,+0,+2,-1,0,0x40,
+1,+0,+2,+1,0,0x10
}, chood[] = { -1,-1, -1,0, -1,+1, 0,+1, +1,+1, +1,0, +1,-1, 0,-1 };
ushort (*brow[5])[4], *pix;
int prow=8, pcol=2, *ip, *code[16][16], gval[8], gmin, gmax, sum[4];
int row, col, x, y, x1, x2, y1, y2, t, weight, grads, color, diag;
int g, diff, thold, num, c;
lin_interpolate();
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("VNG interpolation...\n"));
#endif
if (filters == 1) prow = pcol = 16;
if (filters == 9) prow = pcol = 6;
ip = (int *) calloc (prow*pcol, 1280);
merror (ip, "vng_interpolate()");
for (row=0; row < prow; row++) /* Precalculate for VNG */
for (col=0; col < pcol; col++) {
code[row][col] = ip;
for (cp=terms, t=0; t < 64; t++) {
y1 = *cp++; x1 = *cp++;
y2 = *cp++; x2 = *cp++;
weight = *cp++;
grads = *cp++;
color = fcol(row+y1,col+x1);
if (fcol(row+y2,col+x2) != color) continue;
diag = (fcol(row,col+1) == color && fcol(row+1,col) == color) ? 2:1;
if (abs(y1-y2) == diag && abs(x1-x2) == diag) continue;
*ip++ = (y1*width + x1)*4 + color;
*ip++ = (y2*width + x2)*4 + color;
*ip++ = weight;
for (g=0; g < 8; g++)
if (grads & 1<<g) *ip++ = g;
*ip++ = -1;
}
*ip++ = INT_MAX;
for (cp=chood, g=0; g < 8; g++) {
y = *cp++; x = *cp++;
*ip++ = (y*width + x) * 4;
color = fcol(row,col);
if (fcol(row+y,col+x) != color && fcol(row+y*2,col+x*2) == color)
*ip++ = (y*width + x) * 8 + color;
else
*ip++ = 0;
}
}
brow[4] = (ushort (*)[4]) calloc (width*3, sizeof **brow);
merror (brow[4], "vng_interpolate()");
for (row=0; row < 3; row++)
brow[row] = brow[4] + row*width;
for (row=2; row < height-2; row++) { /* Do VNG interpolation */
#ifdef LIBRAW_LIBRARY_BUILD
if(!((row-2)%256))RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,(row-2)/256+1,((height-3)/256)+1);
#endif
for (col=2; col < width-2; col++) {
pix = image[row*width+col];
ip = code[row % prow][col % pcol];
memset (gval, 0, sizeof gval);
while ((g = ip[0]) != INT_MAX) { /* Calculate gradients */
diff = ABS(pix[g] - pix[ip[1]]) << ip[2];
gval[ip[3]] += diff;
ip += 5;
if ((g = ip[-1]) == -1) continue;
gval[g] += diff;
while ((g = *ip++) != -1)
gval[g] += diff;
}
ip++;
gmin = gmax = gval[0]; /* Choose a threshold */
for (g=1; g < 8; g++) {
if (gmin > gval[g]) gmin = gval[g];
if (gmax < gval[g]) gmax = gval[g];
}
if (gmax == 0) {
memcpy (brow[2][col], pix, sizeof *image);
continue;
}
thold = gmin + (gmax >> 1);
memset (sum, 0, sizeof sum);
color = fcol(row,col);
for (num=g=0; g < 8; g++,ip+=2) { /* Average the neighbors */
if (gval[g] <= thold) {
FORCC
if (c == color && ip[1])
sum[c] += (pix[c] + pix[ip[1]]) >> 1;
else
sum[c] += pix[ip[0] + c];
num++;
}
}
FORCC { /* Save to buffer */
t = pix[color];
if (c != color)
t += (sum[c] - sum[color]) / num;
brow[2][col][c] = CLIP(t);
}
}
if (row > 3) /* Write buffer to image */
memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image);
for (g=0; g < 4; g++)
brow[(g-1) & 3] = brow[g];
}
memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image);
memcpy (image[(row-1)*width+2], brow[1]+2, (width-4)*sizeof *image);
free (brow[4]);
free (code[0][0]);
}
/*
Patterned Pixel Grouping Interpolation by Alain Desbiolles
*/
void CLASS ppg_interpolate()
{
int dir[5] = { 1, width, -1, -width, 1 };
int row, col, diff[2], guess[2], c, d, i;
ushort (*pix)[4];
border_interpolate(3);
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("PPG interpolation...\n"));
#endif
/* Fill in the green layer with gradients and pattern recognition: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=3; row < height-3; row++)
for (col=3+(FC(row,3) & 1), c=FC(row,col); col < width-3; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]) > 0; i++) {
guess[i] = (pix[-d][1] + pix[0][c] + pix[d][1]) * 2
- pix[-2*d][c] - pix[2*d][c];
diff[i] = ( ABS(pix[-2*d][c] - pix[ 0][c]) +
ABS(pix[ 2*d][c] - pix[ 0][c]) +
ABS(pix[ -d][1] - pix[ d][1]) ) * 3 +
( ABS(pix[ 3*d][1] - pix[ d][1]) +
ABS(pix[-3*d][1] - pix[-d][1]) ) * 2;
}
d = dir[i = diff[0] > diff[1]];
pix[0][1] = ULIM(guess[i] >> 2, pix[d][1], pix[-d][1]);
}
/* Calculate red and blue for each green pixel: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=1; row < height-1; row++)
for (col=1+(FC(row,2) & 1), c=FC(row,col+1); col < width-1; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]) > 0; c=2-c, i++)
pix[0][c] = CLIP((pix[-d][c] + pix[d][c] + 2*pix[0][1]
- pix[-d][1] - pix[d][1]) >> 1);
}
/* Calculate blue for red pixels and vice versa: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=1; row < height-1; row++)
for (col=1+(FC(row,1) & 1), c=2-FC(row,col); col < width-1; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]+dir[i+1]) > 0; i++) {
diff[i] = ABS(pix[-d][c] - pix[d][c]) +
ABS(pix[-d][1] - pix[0][1]) +
ABS(pix[ d][1] - pix[0][1]);
guess[i] = pix[-d][c] + pix[d][c] + 2*pix[0][1]
- pix[-d][1] - pix[d][1];
}
if (diff[0] != diff[1])
pix[0][c] = CLIP(guess[diff[0] > diff[1]] >> 1);
else
pix[0][c] = CLIP((guess[0]+guess[1]) >> 2);
}
}
void CLASS cielab (ushort rgb[3], short lab[3])
{
int c, i, j, k;
float r, xyz[3];
#ifdef LIBRAW_NOTHREADS
static float cbrt[0x10000], xyz_cam[3][4];
#else
#define cbrt tls->ahd_data.cbrt
#define xyz_cam tls->ahd_data.xyz_cam
#endif
if (!rgb) {
#ifndef LIBRAW_NOTHREADS
if(cbrt[0] < -1.0f)
#endif
for (i=0; i < 0x10000; i++) {
r = i / 65535.0;
cbrt[i] = r > 0.008856 ? pow(r,1.f/3.0f) : 7.787f*r + 16.f/116.0f;
}
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
for (xyz_cam[i][j] = k=0; k < 3; k++)
xyz_cam[i][j] += xyz_rgb[i][k] * rgb_cam[k][j] / d65_white[i];
return;
}
xyz[0] = xyz[1] = xyz[2] = 0.5;
FORCC {
xyz[0] += xyz_cam[0][c] * rgb[c];
xyz[1] += xyz_cam[1][c] * rgb[c];
xyz[2] += xyz_cam[2][c] * rgb[c];
}
xyz[0] = cbrt[CLIP((int) xyz[0])];
xyz[1] = cbrt[CLIP((int) xyz[1])];
xyz[2] = cbrt[CLIP((int) xyz[2])];
lab[0] = 64 * (116 * xyz[1] - 16);
lab[1] = 64 * 500 * (xyz[0] - xyz[1]);
lab[2] = 64 * 200 * (xyz[1] - xyz[2]);
#ifndef LIBRAW_NOTHREADS
#undef cbrt
#undef xyz_cam
#endif
}
#define TS 512 /* Tile Size */
#define fcol(row,col) xtrans[(row+top_margin+6)%6][(col+left_margin+6)%6]
/*
Frank Markesteijn's algorithm for Fuji X-Trans sensors
*/
void CLASS xtrans_interpolate (int passes)
{
int c, d, f, g, h, i, v, ng, row, col, top, left, mrow, mcol;
int val, ndir, pass, hm[8], avg[4], color[3][8];
static const short orth[12] = { 1,0,0,1,-1,0,0,-1,1,0,0,1 },
patt[2][16] = { { 0,1,0,-1,2,0,-1,0,1,1,1,-1,0,0,0,0 },
{ 0,1,0,-2,1,0,-2,0,1,1,-2,-2,1,-1,-1,1 } },
dir[4] = { 1,TS,TS+1,TS-1 };
short allhex[3][3][2][8], *hex;
ushort min, max, sgrow, sgcol;
ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short (*lab) [TS][3], (*lix)[3];
float (*drv)[TS][TS], diff[6], tr;
char (*homo)[TS][TS], *buffer;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("%d-pass X-Trans interpolation...\n"), passes);
#endif
cielab (0,0);
border_interpolate(6);
ndir = 4 << (passes > 1);
buffer = (char *) malloc (TS*TS*(ndir*11+6));
merror (buffer, "xtrans_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*) [TS][3])(buffer + TS*TS*(ndir*6));
drv = (float (*)[TS][TS]) (buffer + TS*TS*(ndir*6+6));
homo = (char (*)[TS][TS]) (buffer + TS*TS*(ndir*10+6));
/* Map a green hexagon around each non-green pixel and vice versa: */
for (row=0; row < 3; row++)
for (col=0; col < 3; col++)
for (ng=d=0; d < 10; d+=2) {
g = fcol(row,col) == 1;
if (fcol(row+orth[d],col+orth[d+2]) == 1) ng=0; else ng++;
if (ng == 4) { sgrow = row; sgcol = col; }
if (ng == g+1) FORC(8) {
v = orth[d ]*patt[g][c*2] + orth[d+1]*patt[g][c*2+1];
h = orth[d+2]*patt[g][c*2] + orth[d+3]*patt[g][c*2+1];
allhex[row][col][0][c^(g*2 & d)] = h + v*width;
allhex[row][col][1][c^(g*2 & d)] = h + v*TS;
}
}
/* Set green1 and green3 to the minimum and maximum allowed values: */
for (row=2; row < height-2; row++)
for (min=~(max=0), col=2; col < width-2; col++) {
if (fcol(row,col) == 1 && (min=~(max=0))) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][0];
if (!max) FORC(6) {
val = pix[hex[c]][1];
if (min > val) min = val;
if (max < val) max = val;
}
pix[0][1] = min;
pix[0][3] = max;
switch ((row-sgrow) % 3) {
case 1: if (row < height-3) { row++; col--; } break;
case 2: if ((min=~(max=0)) && (col+=2) < width-3 && row > 2) row--;
}
}
for (top=3; top < height-19; top += TS-16)
for (left=3; left < width-19; left += TS-16) {
mrow = MIN (top+TS, height-3);
mcol = MIN (left+TS, width-3);
for (row=top; row < mrow; row++)
for (col=left; col < mcol; col++)
memcpy (rgb[0][row-top][col-left], image[row*width+col], 6);
FORC3 memcpy (rgb[c+1], rgb[0], sizeof *rgb);
/* Interpolate green horizontally, vertically, and along both diagonals: */
for (row=top; row < mrow; row++)
for (col=left; col < mcol; col++) {
if ((f = fcol(row,col)) == 1) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][0];
color[1][0] = 174 * (pix[ hex[1]][1] + pix[ hex[0]][1]) -
46 * (pix[2*hex[1]][1] + pix[2*hex[0]][1]);
color[1][1] = 223 * pix[ hex[3]][1] + pix[ hex[2]][1] * 33 +
92 * (pix[ 0 ][f] - pix[ -hex[2]][f]);
FORC(2) color[1][2+c] =
164 * pix[hex[4+c]][1] + 92 * pix[-2*hex[4+c]][1] + 33 *
(2*pix[0][f] - pix[3*hex[4+c]][f] - pix[-3*hex[4+c]][f]);
FORC4 rgb[c^!((row-sgrow) % 3)][row-top][col-left][1] =
LIM(color[1][c] >> 8,pix[0][1],pix[0][3]);
}
for (pass=0; pass < passes; pass++) {
if (pass == 1)
memcpy (rgb+=4, buffer, 4*sizeof *rgb);
/* Recalculate green from interpolated values of closer pixels: */
if (pass) {
for (row=top+2; row < mrow-2; row++)
for (col=left+2; col < mcol-2; col++) {
if ((f = fcol(row,col)) == 1) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][1];
for (d=3; d < 6; d++) {
rix = &rgb[(d-2)^!((row-sgrow) % 3)][row-top][col-left];
val = rix[-2*hex[d]][1] + 2*rix[hex[d]][1]
- rix[-2*hex[d]][f] - 2*rix[hex[d]][f] + 3*rix[0][f];
rix[0][1] = LIM(val/3,pix[0][1],pix[0][3]);
}
}
}
/* Interpolate red and blue values for solitary green pixels: */
for (row=(top-sgrow+4)/3*3+sgrow; row < mrow-2; row+=3)
for (col=(left-sgcol+4)/3*3+sgcol; col < mcol-2; col+=3) {
rix = &rgb[0][row-top][col-left];
h = fcol(row,col+1);
memset (diff, 0, sizeof diff);
for (i=1, d=0; d < 6; d++, i^=TS^1, h^=2) {
for (c=0; c < 2; c++, h^=2) {
g = 2*rix[0][1] - rix[i<<c][1] - rix[-i<<c][1];
color[h][d] = g + rix[i<<c][h] + rix[-i<<c][h];
if (d > 1)
diff[d] += SQR (rix[i<<c][1] - rix[-i<<c][1]
- rix[i<<c][h] + rix[-i<<c][h]) + SQR(g);
}
if (d > 1 && (d & 1))
if (diff[d-1] < diff[d])
FORC(2) color[c*2][d] = color[c*2][d-1];
if (d < 2 || (d & 1)) {
FORC(2) rix[0][c*2] = CLIP(color[c*2][d]/2);
rix += TS*TS;
}
}
}
/* Interpolate red for blue pixels and vice versa: */
for (row=top+1; row < mrow-1; row++)
for (col=left+1; col < mcol-1; col++) {
if ((f = 2-fcol(row,col)) == 1) continue;
rix = &rgb[0][row-top][col-left];
i = (row-sgrow) % 3 ? TS:1;
for (d=0; d < 4; d++, rix += TS*TS)
rix[0][f] = CLIP((rix[i][f] + rix[-i][f] +
2*rix[0][1] - rix[i][1] - rix[-i][1])/2);
}
/* Fill in red and blue for 2x2 blocks of green: */
for (row=top+2; row < mrow-2; row++) if ((row-sgrow) % 3)
for (col=left+2; col < mcol-2; col++) if ((col-sgcol) % 3) {
rix = &rgb[0][row-top][col-left];
hex = allhex[row % 3][col % 3][1];
for (d=0; d < ndir; d+=2, rix += TS*TS)
if (hex[d] + hex[d+1]) {
g = 3*rix[0][1] - 2*rix[hex[d]][1] - rix[hex[d+1]][1];
for (c=0; c < 4; c+=2) rix[0][c] =
CLIP((g + 2*rix[hex[d]][c] + rix[hex[d+1]][c])/3);
} else {
g = 2*rix[0][1] - rix[hex[d]][1] - rix[hex[d+1]][1];
for (c=0; c < 4; c+=2) rix[0][c] =
CLIP((g + rix[hex[d]][c] + rix[hex[d+1]][c])/2);
}
}
}
rgb = (ushort(*)[TS][TS][3]) buffer;
mrow -= top;
mcol -= left;
/* Convert to CIELab and differentiate in all directions: */
for (d=0; d < ndir; d++) {
for (row=2; row < mrow-2; row++)
for (col=2; col < mcol-2; col++)
cielab (rgb[d][row][col], lab[row][col]);
for (f=dir[d & 3],row=3; row < mrow-3; row++)
for (col=3; col < mcol-3; col++) {
lix = &lab[row][col];
g = 2*lix[0][0] - lix[f][0] - lix[-f][0];
drv[d][row][col] = SQR(g)
+ SQR((2*lix[0][1] - lix[f][1] - lix[-f][1] + g*500/232))
+ SQR((2*lix[0][2] - lix[f][2] - lix[-f][2] - g*500/580));
}
}
/* Build homogeneity maps from the derivatives: */
memset(homo, 0, ndir*TS*TS);
for (row=4; row < mrow-4; row++)
for (col=4; col < mcol-4; col++) {
for (tr=FLT_MAX, d=0; d < ndir; d++)
if (tr > drv[d][row][col])
tr = drv[d][row][col];
tr *= 8;
for (d=0; d < ndir; d++)
for (v=-1; v <= 1; v++)
for (h=-1; h <= 1; h++)
if (drv[d][row+v][col+h] <= tr)
homo[d][row][col]++;
}
/* Average the most homogenous pixels for the final result: */
if (height-top < TS+4) mrow = height-top+2;
if (width-left < TS+4) mcol = width-left+2;
for (row = MIN(top,8); row < mrow-8; row++)
for (col = MIN(left,8); col < mcol-8; col++) {
for (d=0; d < ndir; d++)
for (hm[d]=0, v=-2; v <= 2; v++)
for (h=-2; h <= 2; h++)
hm[d] += homo[d][row+v][col+h];
for (d=0; d < ndir-4; d++)
if (hm[d] < hm[d+4]) hm[d ] = 0; else
if (hm[d] > hm[d+4]) hm[d+4] = 0;
for (max=hm[0],d=1; d < ndir; d++)
if (max < hm[d]) max = hm[d];
max -= max >> 3;
memset (avg, 0, sizeof avg);
for (d=0; d < ndir; d++)
if (hm[d] >= max) {
FORC3 avg[c] += rgb[d][row][col][c];
avg[3]++;
}
FORC3 image[(row+top)*width+col+left][c] = avg[c]/avg[3];
}
}
free(buffer);
}
#undef fcol
/*
Adaptive Homogeneity-Directed interpolation is based on
the work of Keigo Hirakawa, Thomas Parks, and Paul Lee.
*/
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS ahd_interpolate_green_h_and_v(int top, int left, ushort (*out_rgb)[TS][TS][3])
{
int row, col;
int c, val;
ushort (*pix)[4];
const int rowlimit = MIN(top+TS, height-2);
const int collimit = MIN(left+TS, width-2);
for (row = top; row < rowlimit; row++) {
col = left + (FC(row,left) & 1);
for (c = FC(row,col); col < collimit; col+=2) {
pix = image + row*width+col;
val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2
- pix[-2][c] - pix[2][c]) >> 2;
out_rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]);
val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2
- pix[-2*width][c] - pix[2*width][c]) >> 2;
out_rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]);
}
}
}
void CLASS ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][3], short (*out_lab)[TS][3])
{
unsigned row, col;
int c, val;
ushort (*pix)[4];
ushort (*rix)[3];
short (*lix)[3];
float xyz[3];
const unsigned num_pix_per_row = 4*width;
const unsigned rowlimit = MIN(top+TS-1, height-3);
const unsigned collimit = MIN(left+TS-1, width-3);
ushort *pix_above;
ushort *pix_below;
int t1, t2;
for (row = top+1; row < rowlimit; row++) {
pix = image + row*width + left;
rix = &inout_rgb[row-top][0];
lix = &out_lab[row-top][0];
for (col = left+1; col < collimit; col++) {
pix++;
pix_above = &pix[0][0] - num_pix_per_row;
pix_below = &pix[0][0] + num_pix_per_row;
rix++;
lix++;
c = 2 - FC(row, col);
if (c == 1) {
c = FC(row+1,col);
t1 = 2-c;
val = pix[0][1] + (( pix[-1][t1] + pix[1][t1]
- rix[-1][1] - rix[1][1] ) >> 1);
rix[0][t1] = CLIP(val);
val = pix[0][1] + (( pix_above[c] + pix_below[c]
- rix[-TS][1] - rix[TS][1] ) >> 1);
} else {
t1 = -4+c; /* -4+c: pixel of color c to the left */
t2 = 4+c; /* 4+c: pixel of color c to the right */
val = rix[0][1] + (( pix_above[t1] + pix_above[t2]
+ pix_below[t1] + pix_below[t2]
- rix[-TS-1][1] - rix[-TS+1][1]
- rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2);
}
rix[0][c] = CLIP(val);
c = FC(row,col);
rix[0][c] = pix[0][c];
cielab(rix[0],lix[0]);
}
}
}
void CLASS ahd_interpolate_r_and_b_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][TS][3], short (*out_lab)[TS][TS][3])
{
int direction;
for (direction = 0; direction < 2; direction++) {
ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(top, left, inout_rgb[direction], out_lab[direction]);
}
}
void CLASS ahd_interpolate_build_homogeneity_map(int top, int left, short (*lab)[TS][TS][3], char (*out_homogeneity_map)[TS][2])
{
int row, col;
int tr, tc;
int direction;
int i;
short (*lix)[3];
short (*lixs[2])[3];
short *adjacent_lix;
unsigned ldiff[2][4], abdiff[2][4], leps, abeps;
static const int dir[4] = { -1, 1, -TS, TS };
const int rowlimit = MIN(top+TS-2, height-4);
const int collimit = MIN(left+TS-2, width-4);
int homogeneity;
char (*homogeneity_map_p)[2];
memset (out_homogeneity_map, 0, 2*TS*TS);
for (row=top+2; row < rowlimit; row++) {
tr = row-top;
homogeneity_map_p = &out_homogeneity_map[tr][1];
for (direction=0; direction < 2; direction++) {
lixs[direction] = &lab[direction][tr][1];
}
for (col=left+2; col < collimit; col++) {
tc = col-left;
homogeneity_map_p++;
for (direction=0; direction < 2; direction++) {
lix = ++lixs[direction];
for (i=0; i < 4; i++) {
adjacent_lix = lix[dir[i]];
ldiff[direction][i] = ABS(lix[0][0]-adjacent_lix[0]);
abdiff[direction][i] = SQR(lix[0][1]-adjacent_lix[1])
+ SQR(lix[0][2]-adjacent_lix[2]);
}
}
leps = MIN(MAX(ldiff[0][0],ldiff[0][1]),
MAX(ldiff[1][2],ldiff[1][3]));
abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]),
MAX(abdiff[1][2],abdiff[1][3]));
for (direction=0; direction < 2; direction++) {
homogeneity = 0;
for (i=0; i < 4; i++) {
if (ldiff[direction][i] <= leps && abdiff[direction][i] <= abeps) {
homogeneity++;
}
}
homogeneity_map_p[0][direction] = homogeneity;
}
}
}
}
void CLASS ahd_interpolate_combine_homogeneous_pixels(int top, int left, ushort (*rgb)[TS][TS][3], char (*homogeneity_map)[TS][2])
{
int row, col;
int tr, tc;
int i, j;
int direction;
int hm[2];
int c;
const int rowlimit = MIN(top+TS-3, height-5);
const int collimit = MIN(left+TS-3, width-5);
ushort (*pix)[4];
ushort (*rix[2])[3];
for (row=top+3; row < rowlimit; row++) {
tr = row-top;
pix = &image[row*width+left+2];
for (direction = 0; direction < 2; direction++) {
rix[direction] = &rgb[direction][tr][2];
}
for (col=left+3; col < collimit; col++) {
tc = col-left;
pix++;
for (direction = 0; direction < 2; direction++) {
rix[direction]++;
}
for (direction=0; direction < 2; direction++) {
hm[direction] = 0;
for (i=tr-1; i <= tr+1; i++) {
for (j=tc-1; j <= tc+1; j++) {
hm[direction] += homogeneity_map[i][j][direction];
}
}
}
if (hm[0] != hm[1]) {
memcpy(pix[0], rix[hm[1] > hm[0]][0], 3 * sizeof(ushort));
} else {
FORC3 {
pix[0][c] = (rix[0][0][c] + rix[1][0][c]) >> 1;
}
}
}
}
}
void CLASS ahd_interpolate()
{
int i, j, k, top, left;
float xyz_cam[3][4],r;
char *buffer;
ushort (*rgb)[TS][TS][3];
short (*lab)[TS][TS][3];
char (*homo)[TS][2];
int terminate_flag = 0;
cielab(0,0);
border_interpolate(5);
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel private(buffer,rgb,lab,homo,top,left,i,j,k) shared(xyz_cam,terminate_flag)
#endif
#endif
{
buffer = (char *) malloc (26*TS*TS); /* 1664 kB */
merror (buffer, "ahd_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS);
homo = (char (*)[TS][2]) (buffer + 24*TS*TS);
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
#pragma omp for schedule(dynamic)
#endif
#endif
for (top=2; top < height-5; top += TS-6){
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
if(0== omp_get_thread_num())
#endif
if(callbacks.progress_cb) {
int rr = (*callbacks.progress_cb)(callbacks.progresscb_data,LIBRAW_PROGRESS_INTERPOLATE,top-2,height-7);
if(rr)
terminate_flag = 1;
}
#endif
for (left=2; !terminate_flag && (left < width-5); left += TS-6) {
ahd_interpolate_green_h_and_v(top, left, rgb);
ahd_interpolate_r_and_b_and_convert_to_cielab(top, left, rgb, lab);
ahd_interpolate_build_homogeneity_map(top, left, lab, homo);
ahd_interpolate_combine_homogeneous_pixels(top, left, rgb, homo);
}
}
free (buffer);
}
#ifdef LIBRAW_LIBRARY_BUILD
if(terminate_flag)
throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK;
#endif
}
#else
void CLASS ahd_interpolate()
{
int i, j, top, left, row, col, tr, tc, c, d, val, hm[2];
static const int dir[4] = { -1, 1, -TS, TS };
unsigned ldiff[2][4], abdiff[2][4], leps, abeps;
ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short (*lab)[TS][TS][3], (*lix)[3];
char (*homo)[TS][TS], *buffer;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("AHD interpolation...\n"));
#endif
cielab (0,0);
border_interpolate(5);
buffer = (char *) malloc (26*TS*TS);
merror (buffer, "ahd_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS);
homo = (char (*)[TS][TS]) (buffer + 24*TS*TS);
for (top=2; top < height-5; top += TS-6)
for (left=2; left < width-5; left += TS-6) {
/* Interpolate green horizontally and vertically: */
for (row=top; row < top+TS && row < height-2; row++) {
col = left + (FC(row,left) & 1);
for (c = FC(row,col); col < left+TS && col < width-2; col+=2) {
pix = image + row*width+col;
val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2
- pix[-2][c] - pix[2][c]) >> 2;
rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]);
val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2
- pix[-2*width][c] - pix[2*width][c]) >> 2;
rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]);
}
}
/* Interpolate red and blue, and convert to CIELab: */
for (d=0; d < 2; d++)
for (row=top+1; row < top+TS-1 && row < height-3; row++)
for (col=left+1; col < left+TS-1 && col < width-3; col++) {
pix = image + row*width+col;
rix = &rgb[d][row-top][col-left];
lix = &lab[d][row-top][col-left];
if ((c = 2 - FC(row,col)) == 1) {
c = FC(row+1,col);
val = pix[0][1] + (( pix[-1][2-c] + pix[1][2-c]
- rix[-1][1] - rix[1][1] ) >> 1);
rix[0][2-c] = CLIP(val);
val = pix[0][1] + (( pix[-width][c] + pix[width][c]
- rix[-TS][1] - rix[TS][1] ) >> 1);
} else
val = rix[0][1] + (( pix[-width-1][c] + pix[-width+1][c]
+ pix[+width-1][c] + pix[+width+1][c]
- rix[-TS-1][1] - rix[-TS+1][1]
- rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2);
rix[0][c] = CLIP(val);
c = FC(row,col);
rix[0][c] = pix[0][c];
cielab (rix[0],lix[0]);
}
/* Build homogeneity maps from the CIELab images: */
memset (homo, 0, 2*TS*TS);
for (row=top+2; row < top+TS-2 && row < height-4; row++) {
tr = row-top;
for (col=left+2; col < left+TS-2 && col < width-4; col++) {
tc = col-left;
for (d=0; d < 2; d++) {
lix = &lab[d][tr][tc];
for (i=0; i < 4; i++) {
ldiff[d][i] = ABS(lix[0][0]-lix[dir[i]][0]);
abdiff[d][i] = SQR(lix[0][1]-lix[dir[i]][1])
+ SQR(lix[0][2]-lix[dir[i]][2]);
}
}
leps = MIN(MAX(ldiff[0][0],ldiff[0][1]),
MAX(ldiff[1][2],ldiff[1][3]));
abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]),
MAX(abdiff[1][2],abdiff[1][3]));
for (d=0; d < 2; d++)
for (i=0; i < 4; i++)
if (ldiff[d][i] <= leps && abdiff[d][i] <= abeps)
homo[d][tr][tc]++;
}
}
/* Combine the most homogenous pixels for the final result: */
for (row=top+3; row < top+TS-3 && row < height-5; row++) {
tr = row-top;
for (col=left+3; col < left+TS-3 && col < width-5; col++) {
tc = col-left;
for (d=0; d < 2; d++)
for (hm[d]=0, i=tr-1; i <= tr+1; i++)
for (j=tc-1; j <= tc+1; j++)
hm[d] += homo[d][i][j];
if (hm[0] != hm[1])
FORC3 image[row*width+col][c] = rgb[hm[1] > hm[0]][tr][tc][c];
else
FORC3 image[row*width+col][c] =
(rgb[0][tr][tc][c] + rgb[1][tr][tc][c]) >> 1;
}
}
}
free (buffer);
}
#endif
#undef TS
void CLASS median_filter()
{
ushort (*pix)[4];
int pass, c, i, j, k, med[9];
static const uchar opt[] = /* Optimal 9-element median search */
{ 1,2, 4,5, 7,8, 0,1, 3,4, 6,7, 1,2, 4,5, 7,8,
0,3, 5,8, 4,7, 3,6, 1,4, 2,5, 4,7, 4,2, 6,4, 4,2 };
for (pass=1; pass <= med_passes; pass++) {
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_MEDIAN_FILTER,pass-1,med_passes);
#endif
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Median filter pass %d...\n"), pass);
#endif
for (c=0; c < 3; c+=2) {
for (pix = image; pix < image+width*height; pix++)
pix[0][3] = pix[0][c];
for (pix = image+width; pix < image+width*(height-1); pix++) {
if ((pix-image+1) % width < 2) continue;
for (k=0, i = -width; i <= width; i += width)
for (j = i-1; j <= i+1; j++)
med[k++] = pix[j][3] - pix[j][1];
for (i=0; i < sizeof opt; i+=2)
if (med[opt[i]] > med[opt[i+1]])
SWAP (med[opt[i]] , med[opt[i+1]]);
pix[0][c] = CLIP(med[4] + pix[0][1]);
}
}
}
}
void CLASS blend_highlights()
{
int clip=INT_MAX, row, col, c, i, j;
static const float trans[2][4][4] =
{ { { 1,1,1 }, { 1.7320508,-1.7320508,0 }, { -1,-1,2 } },
{ { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } };
static const float itrans[2][4][4] =
{ { { 1,0.8660254,-0.5 }, { 1,-0.8660254,-0.5 }, { 1,0,1 } },
{ { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } };
float cam[2][4], lab[2][4], sum[2], chratio;
if ((unsigned) (colors-3) > 1) return;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Blending highlights...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,0,2);
#endif
FORCC if (clip > (i = 65535*pre_mul[c])) clip = i;
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
FORCC if (image[row*width+col][c] > clip) break;
if (c == colors) continue;
FORCC {
cam[0][c] = image[row*width+col][c];
cam[1][c] = MIN(cam[0][c],clip);
}
for (i=0; i < 2; i++) {
FORCC for (lab[i][c]=j=0; j < colors; j++)
lab[i][c] += trans[colors-3][c][j] * cam[i][j];
for (sum[i]=0,c=1; c < colors; c++)
sum[i] += SQR(lab[i][c]);
}
chratio = sqrt(sum[1]/sum[0]);
for (c=1; c < colors; c++)
lab[0][c] *= chratio;
FORCC for (cam[0][c]=j=0; j < colors; j++)
cam[0][c] += itrans[colors-3][c][j] * lab[0][j];
FORCC image[row*width+col][c] = cam[0][c] / colors;
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,1,2);
#endif
}
#define SCALE (4 >> shrink)
void CLASS recover_highlights()
{
float *map, sum, wgt, grow;
int hsat[4], count, spread, change, val, i;
unsigned high, wide, mrow, mcol, row, col, kc, c, d, y, x;
ushort *pixel;
static const signed char dir[8][2] =
{ {-1,-1}, {-1,0}, {-1,1}, {0,1}, {1,1}, {1,0}, {1,-1}, {0,-1} };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Rebuilding highlights...\n"));
#endif
grow = pow (2.0, 4-highlight);
FORCC hsat[c] = 32000 * pre_mul[c];
for (kc=0, c=1; c < colors; c++)
if (pre_mul[kc] < pre_mul[c]) kc = c;
high = height / SCALE;
wide = width / SCALE;
map = (float *) calloc (high, wide*sizeof *map);
merror (map, "recover_highlights()");
FORCC if (c != kc) {
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,c-1,colors-1);
#endif
memset (map, 0, high*wide*sizeof *map);
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
sum = wgt = count = 0;
for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++)
for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) {
pixel = image[row*width+col];
if (pixel[c] / hsat[c] == 1 && pixel[kc] > 24000) {
sum += pixel[c];
wgt += pixel[kc];
count++;
}
}
if (count == SCALE*SCALE)
map[mrow*wide+mcol] = sum / wgt;
}
for (spread = 32/grow; spread--; ) {
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
if (map[mrow*wide+mcol]) continue;
sum = count = 0;
for (d=0; d < 8; d++) {
y = mrow + dir[d][0];
x = mcol + dir[d][1];
if (y < high && x < wide && map[y*wide+x] > 0) {
sum += (1 + (d & 1)) * map[y*wide+x];
count += 1 + (d & 1);
}
}
if (count > 3)
map[mrow*wide+mcol] = - (sum+grow) / (count+grow);
}
for (change=i=0; i < high*wide; i++)
if (map[i] < 0) {
map[i] = -map[i];
change = 1;
}
if (!change) break;
}
for (i=0; i < high*wide; i++)
if (map[i] == 0) map[i] = 1;
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++)
for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) {
pixel = image[row*width+col];
if (pixel[c] / hsat[c] > 1) {
val = pixel[kc] * map[mrow*wide+mcol];
if (pixel[c] < val) pixel[c] = CLIP(val);
}
}
}
}
free (map);
}
#undef SCALE
void CLASS tiff_get (unsigned base,
unsigned *tag, unsigned *type, unsigned *len, unsigned *save)
{
*tag = get2();
*type = get2();
*len = get4();
*save = ftell(ifp) + 4;
if (*len * ("11124811248488"[*type < 14 ? *type:0]-'0') > 4)
fseek (ifp, get4()+base, SEEK_SET);
}
void CLASS parse_thumb_note (int base, unsigned toff, unsigned tlen)
{
unsigned entries, tag, type, len, save;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
if (tag == toff) thumb_offset = get4()+base;
if (tag == tlen) thumb_length = get4();
fseek (ifp, save, SEEK_SET);
}
}
void CLASS parse_makernote (int base, int uptag)
{
static const uchar xlat[2][256] = {
{ 0xc1,0xbf,0x6d,0x0d,0x59,0xc5,0x13,0x9d,0x83,0x61,0x6b,0x4f,0xc7,0x7f,0x3d,0x3d,
0x53,0x59,0xe3,0xc7,0xe9,0x2f,0x95,0xa7,0x95,0x1f,0xdf,0x7f,0x2b,0x29,0xc7,0x0d,
0xdf,0x07,0xef,0x71,0x89,0x3d,0x13,0x3d,0x3b,0x13,0xfb,0x0d,0x89,0xc1,0x65,0x1f,
0xb3,0x0d,0x6b,0x29,0xe3,0xfb,0xef,0xa3,0x6b,0x47,0x7f,0x95,0x35,0xa7,0x47,0x4f,
0xc7,0xf1,0x59,0x95,0x35,0x11,0x29,0x61,0xf1,0x3d,0xb3,0x2b,0x0d,0x43,0x89,0xc1,
0x9d,0x9d,0x89,0x65,0xf1,0xe9,0xdf,0xbf,0x3d,0x7f,0x53,0x97,0xe5,0xe9,0x95,0x17,
0x1d,0x3d,0x8b,0xfb,0xc7,0xe3,0x67,0xa7,0x07,0xf1,0x71,0xa7,0x53,0xb5,0x29,0x89,
0xe5,0x2b,0xa7,0x17,0x29,0xe9,0x4f,0xc5,0x65,0x6d,0x6b,0xef,0x0d,0x89,0x49,0x2f,
0xb3,0x43,0x53,0x65,0x1d,0x49,0xa3,0x13,0x89,0x59,0xef,0x6b,0xef,0x65,0x1d,0x0b,
0x59,0x13,0xe3,0x4f,0x9d,0xb3,0x29,0x43,0x2b,0x07,0x1d,0x95,0x59,0x59,0x47,0xfb,
0xe5,0xe9,0x61,0x47,0x2f,0x35,0x7f,0x17,0x7f,0xef,0x7f,0x95,0x95,0x71,0xd3,0xa3,
0x0b,0x71,0xa3,0xad,0x0b,0x3b,0xb5,0xfb,0xa3,0xbf,0x4f,0x83,0x1d,0xad,0xe9,0x2f,
0x71,0x65,0xa3,0xe5,0x07,0x35,0x3d,0x0d,0xb5,0xe9,0xe5,0x47,0x3b,0x9d,0xef,0x35,
0xa3,0xbf,0xb3,0xdf,0x53,0xd3,0x97,0x53,0x49,0x71,0x07,0x35,0x61,0x71,0x2f,0x43,
0x2f,0x11,0xdf,0x17,0x97,0xfb,0x95,0x3b,0x7f,0x6b,0xd3,0x25,0xbf,0xad,0xc7,0xc5,
0xc5,0xb5,0x8b,0xef,0x2f,0xd3,0x07,0x6b,0x25,0x49,0x95,0x25,0x49,0x6d,0x71,0xc7 },
{ 0xa7,0xbc,0xc9,0xad,0x91,0xdf,0x85,0xe5,0xd4,0x78,0xd5,0x17,0x46,0x7c,0x29,0x4c,
0x4d,0x03,0xe9,0x25,0x68,0x11,0x86,0xb3,0xbd,0xf7,0x6f,0x61,0x22,0xa2,0x26,0x34,
0x2a,0xbe,0x1e,0x46,0x14,0x68,0x9d,0x44,0x18,0xc2,0x40,0xf4,0x7e,0x5f,0x1b,0xad,
0x0b,0x94,0xb6,0x67,0xb4,0x0b,0xe1,0xea,0x95,0x9c,0x66,0xdc,0xe7,0x5d,0x6c,0x05,
0xda,0xd5,0xdf,0x7a,0xef,0xf6,0xdb,0x1f,0x82,0x4c,0xc0,0x68,0x47,0xa1,0xbd,0xee,
0x39,0x50,0x56,0x4a,0xdd,0xdf,0xa5,0xf8,0xc6,0xda,0xca,0x90,0xca,0x01,0x42,0x9d,
0x8b,0x0c,0x73,0x43,0x75,0x05,0x94,0xde,0x24,0xb3,0x80,0x34,0xe5,0x2c,0xdc,0x9b,
0x3f,0xca,0x33,0x45,0xd0,0xdb,0x5f,0xf5,0x52,0xc3,0x21,0xda,0xe2,0x22,0x72,0x6b,
0x3e,0xd0,0x5b,0xa8,0x87,0x8c,0x06,0x5d,0x0f,0xdd,0x09,0x19,0x93,0xd0,0xb9,0xfc,
0x8b,0x0f,0x84,0x60,0x33,0x1c,0x9b,0x45,0xf1,0xf0,0xa3,0x94,0x3a,0x12,0x77,0x33,
0x4d,0x44,0x78,0x28,0x3c,0x9e,0xfd,0x65,0x57,0x16,0x94,0x6b,0xfb,0x59,0xd0,0xc8,
0x22,0x36,0xdb,0xd2,0x63,0x98,0x43,0xa1,0x04,0x87,0x86,0xf7,0xa6,0x26,0xbb,0xd6,
0x59,0x4d,0xbf,0x6a,0x2e,0xaa,0x2b,0xef,0xe6,0x78,0xb6,0x4e,0xe0,0x2f,0xdc,0x7c,
0xbe,0x57,0x19,0x32,0x7e,0x2a,0xd0,0xb8,0xba,0x29,0x00,0x3c,0x52,0x7d,0xa8,0x49,
0x3b,0x2d,0xeb,0x25,0x49,0xfa,0xa3,0xaa,0x39,0xa7,0xc5,0xa7,0x50,0x11,0x36,0xfb,
0xc6,0x67,0x4a,0xf5,0xa5,0x12,0x65,0x7e,0xb0,0xdf,0xaf,0x4e,0xb3,0x61,0x7f,0x2f } };
unsigned offset=0, entries, tag, type, len, save, c;
unsigned ver97=0, serial=0, i, wbi=0, wb[4]={0,0,0,0};
uchar buf97[324], ci, cj, ck;
short morder, sorder=order;
char buf[10];
unsigned SamsungKey[11];
static const double rgb_adobe[3][3] = // inv(sRGB2XYZ_D65) * AdobeRGB2XYZ_D65
{{ 1.398283396477404, -0.398283116703571, 4.427165001263944E-08},
{-1.233904514232401E-07, 0.999999995196570, 3.126724276714121e-08},
{ 4.561487232726535E-08, -0.042938290466635, 1.042938250416105 }};
float adobe_cam [3][3];
/*
The MakerNote might have its own TIFF header (possibly with
its own byte-order!), or it might just be a table.
*/
if (!strcmp(make,"Nokia")) return;
fread (buf, 1, 10, ifp);
if (!strncmp (buf,"KDK" ,3) || /* these aren't TIFF tables */
!strncmp (buf,"VER" ,3) ||
!strncmp (buf,"IIII",4) ||
!strncmp (buf,"MMMM",4)) return;
if (!strncmp (buf,"KC" ,2) || /* Konica KD-400Z, KD-510Z */
!strncmp (buf,"MLY" ,3)) { /* Minolta DiMAGE G series */
order = 0x4d4d;
while ((i=ftell(ifp)) < data_offset && i < 16384) {
wb[0] = wb[2]; wb[2] = wb[1]; wb[1] = wb[3];
wb[3] = get2();
if (wb[1] == 256 && wb[3] == 256 &&
wb[0] > 256 && wb[0] < 640 && wb[2] > 256 && wb[2] < 640)
FORC4 cam_mul[c] = wb[c];
}
goto quit;
}
if (!strcmp (buf,"Nikon")) {
base = ftell(ifp);
order = get2();
if (get2() != 42) goto quit;
offset = get4();
fseek (ifp, offset-8, SEEK_CUR);
} else if (!strcmp (buf,"OLYMPUS")) {
base = ftell(ifp)-10;
fseek (ifp, -2, SEEK_CUR);
order = get2(); get2();
} else if (!strncmp (buf,"SONY",4) ||
!strcmp (buf,"Panasonic")) {
goto nf;
} else if (!strncmp (buf,"FUJIFILM",8)) {
base = ftell(ifp)-10;
nf: order = 0x4949;
fseek (ifp, 2, SEEK_CUR);
} else if (!strcmp (buf,"OLYMP") ||
!strcmp (buf,"LEICA") ||
!strcmp (buf,"Ricoh") ||
!strcmp (buf,"EPSON"))
fseek (ifp, -2, SEEK_CUR);
else if (!strcmp (buf,"AOC") ||
!strcmp (buf,"QVC"))
fseek (ifp, -4, SEEK_CUR);
else {
fseek (ifp, -10, SEEK_CUR);
if (!strncmp(make,"SAMSUNG",7))
base = ftell(ifp);
}
entries = get2();
if (entries > 1000) return;
morder = order;
while (entries--) {
order = morder;
tiff_get (base, &tag, &type, &len, &save);
tag |= uptag << 16;
if (tag == 2 && strstr(make,"NIKON") && !iso_speed)
iso_speed = (get2(),get2());
if (tag == 37 && strstr(make,"NIKON") && !iso_speed)
{
unsigned char cc;
fread(&cc,1,1,ifp);
iso_speed = int(100.0 * pow(2.0,double(cc)/12.0-5.0));
}
if (tag == 4 && len > 26 && len < 35) {
if ((i=(get4(),get2())) != 0x7fff && !iso_speed)
iso_speed = 50 * pow (2.0, i/32.0 - 4);
if ((i=(get2(),get2())) != 0x7fff && !aperture)
aperture = pow (2.0, i/64.0);
if ((i=get2()) != 0xffff && !shutter)
shutter = pow (2.0, (short) i/-32.0);
wbi = (get2(),get2());
shot_order = (get2(),get2());
}
if ((tag == 4 || tag == 0x114) && !strncmp(make,"KONICA",6)) {
fseek (ifp, tag == 4 ? 140:160, SEEK_CUR);
switch (get2()) {
case 72: flip = 0; break;
case 76: flip = 6; break;
case 82: flip = 5; break;
}
}
if (tag == 7 && type == 2 && len > 20)
fgets (model2, 64, ifp);
if (tag == 8 && type == 4)
shot_order = get4();
if (tag == 9 && !strcmp(make,"Canon"))
fread (artist, 64, 1, ifp);
if (tag == 0xc && len == 4)
FORC3 cam_mul[(c << 1 | c >> 1) & 3] = getreal(type);
if (tag == 0xd && type == 7 && get2() == 0xaaaa) {
for (c=i=2; (ushort) c != 0xbbbb && i < len; i++)
c = c << 8 | fgetc(ifp);
while ((i+=4) < len-5)
if (get4() == 257 && (i=len) && (c = (get4(),fgetc(ifp))) < 3)
flip = "065"[c]-'0';
}
if (tag == 0x10 && type == 4)
unique_id = get4();
if (tag == 0x11 && is_raw && !strncmp(make,"NIKON",5)) {
fseek (ifp, get4()+base, SEEK_SET);
parse_tiff_ifd (base);
}
if (tag == 0x14 && type == 7) {
if (len == 2560) {
fseek (ifp, 1248, SEEK_CUR);
goto get2_256;
}
fread (buf, 1, 10, ifp);
if (!strncmp(buf,"NRW ",4)) {
fseek (ifp, strcmp(buf+4,"0100") ? 46:1546, SEEK_CUR);
cam_mul[0] = get4() << 2;
cam_mul[1] = get4() + get4();
cam_mul[2] = get4() << 2;
}
}
if (tag == 0x15 && type == 2 && is_raw)
fread (model, 64, 1, ifp);
if (strstr(make,"PENTAX")) {
if (tag == 0x1b) tag = 0x1018;
if (tag == 0x1c) tag = 0x1017;
}
if (tag == 0x1d)
while ((c = fgetc(ifp)) && c != EOF)
serial = serial*10 + (isdigit(c) ? c - '0' : c % 10);
if (tag == 0x81 && type == 4) {
data_offset = get4();
fseek (ifp, data_offset + 41, SEEK_SET);
raw_height = get2() * 2;
raw_width = get2();
filters = 0x61616161;
}
if (tag == 0x29 && type == 1) {
c = wbi < 18 ? "012347800000005896"[wbi]-'0' : 0;
fseek (ifp, 8 + c*32, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get4();
}
if ((tag == 0x81 && type == 7) ||
(tag == 0x100 && type == 7) ||
(tag == 0x280 && type == 1)) {
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (tag == 0x88 && type == 4 && (thumb_offset = get4()))
thumb_offset += base;
if (tag == 0x89 && type == 4)
thumb_length = get4();
if (tag == 0x8c || tag == 0x96)
meta_offset = ftell(ifp);
if (tag == 0x97) {
for (i=0; i < 4; i++)
ver97 = ver97 * 10 + fgetc(ifp)-'0';
switch (ver97) {
case 100:
fseek (ifp, 68, SEEK_CUR);
FORC4 cam_mul[(c >> 1) | ((c & 1) << 1)] = get2();
break;
case 102:
fseek (ifp, 6, SEEK_CUR);
goto get2_rggb;
case 103:
fseek (ifp, 16, SEEK_CUR);
FORC4 cam_mul[c] = get2();
}
if (ver97 >= 200) {
if (ver97 != 205) fseek (ifp, 280, SEEK_CUR);
fread (buf97, 324, 1, ifp);
}
}
if (tag == 0xa1 && type == 7) {
order = 0x4949;
fseek (ifp, 140, SEEK_CUR);
FORC3 cam_mul[c] = get4();
}
if (tag == 0xa4 && type == 3) {
fseek (ifp, wbi*48, SEEK_CUR);
FORC3 cam_mul[c] = get2();
}
if (tag == 0xa7 && (unsigned) (ver97-200) < 17) {
ci = xlat[0][serial & 0xff];
cj = xlat[1][fgetc(ifp)^fgetc(ifp)^fgetc(ifp)^fgetc(ifp)];
ck = 0x60;
for (i=0; i < 324; i++)
buf97[i] ^= (cj += ci * ck++);
i = "66666>666;6A;:;55"[ver97-200] - '0';
FORC4 cam_mul[c ^ (c >> 1) ^ (i & 1)] =
sget2 (buf97 + (i & -2) + c*2);
}
if(tag == 0xb001 && type == 3)
{
unique_id = get2();
}
if (tag == 0x200 && len == 3)
shot_order = (get4(),get4());
if (tag == 0x200 && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x201 && len == 4)
goto get2_rggb;
if (tag == 0x220 && type == 7)
meta_offset = ftell(ifp);
if (tag == 0x401 && type == 4 && len == 4)
FORC4 cblack[c ^ c >> 1] = get4();
if (tag == 0x03d && strstr(make,"NIKON") && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0xe01) { /* Nikon Capture Note */
order = 0x4949;
fseek (ifp, 22, SEEK_CUR);
for (offset=22; offset+22 < len; offset += 22+i) {
tag = get4();
fseek (ifp, 14, SEEK_CUR);
i = get4()-4;
if (tag == 0x76a43207) flip = get2();
else fseek (ifp, i, SEEK_CUR);
}
}
if (tag == 0xe80 && len == 256 && type == 7) {
fseek (ifp, 48, SEEK_CUR);
cam_mul[0] = get2() * 508 * 1.078 / 0x10000;
cam_mul[2] = get2() * 382 * 1.173 / 0x10000;
}
if (tag == 0xf00 && type == 7) {
if (len == 614)
fseek (ifp, 176, SEEK_CUR);
else if (len == 734 || len == 1502)
fseek (ifp, 148, SEEK_CUR);
else goto next;
goto get2_256;
}
if ((tag == 0x1011 && len == 9) || tag == 0x20400200)
{
if(!strcasecmp(make,"Olympus"))
{
int j,k;
for (i=0; i < 3; i++)
FORC3 adobe_cam[i][c] = ((short) get2()) / 256.0;
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
for (cmatrix[i][j] = k=0; k < 3; k++)
cmatrix[i][j] += rgb_adobe[i][k] * adobe_cam[k][j];
}
else
for (i=0; i < 3; i++)
FORC3 cmatrix[i][c] = ((short) get2()) / 256.0;
}
if ((tag == 0x1012 || tag == 0x20400600) && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x1017 || tag == 0x20400100)
cam_mul[0] = get2() / 256.0;
if (tag == 0x1018 || tag == 0x20400100)
cam_mul[2] = get2() / 256.0;
if (tag == 0x2011 && len == 2) {
get2_256:
order = 0x4d4d;
cam_mul[0] = get2() / 256.0;
cam_mul[2] = get2() / 256.0;
}
if ((tag | 0x70) == 0x2070 && type == 4)
fseek (ifp, get4()+base, SEEK_SET);
if (tag == 0x2020)
parse_thumb_note (base, 257, 258);
if (tag == 0x2040)
parse_makernote (base, 0x2040);
if (tag == 0xb028) {
fseek (ifp, get4()+base, SEEK_SET);
parse_thumb_note (base, 136, 137);
}
if (tag == 0x4001 && len > 500) {
i = len == 582 ? 50 : len == 653 ? 68 : len == 5120 ? 142 : 126;
fseek (ifp, i, SEEK_CUR);
get2_rggb:
FORC4 cam_mul[c ^ (c >> 1)] = get2();
i = len >> 3 == 164 ? 112:22;
fseek (ifp, i, SEEK_CUR);
FORC4 sraw_mul[c ^ (c >> 1)] = get2();
}
if(!strcasecmp(make,"Samsung"))
{
if (tag == 0xa020) // get the full Samsung encryption key
for (i=0; i<11; i++) SamsungKey[i] = get4();
if (tag == 0xa021) // get and decode Samsung cam_mul array
FORC4 cam_mul[c ^ (c >> 1)] = get4() - SamsungKey[c];
if (tag == 0xa030 && len == 9) // get and decode Samsung color matrix
for (i=0; i < 3; i++)
FORC3 cmatrix[i][c] = (short)((get4() + SamsungKey[i*3+c]))/256.0;
if (tag == 0xa028)
FORC4 cblack[c ^ (c >> 1)] = get4() - SamsungKey[c];
}
else
{
// Somebody else use 0xa021 and 0xa028?
if (tag == 0xa021)
FORC4 cam_mul[c ^ (c >> 1)] = get4();
if (tag == 0xa028)
FORC4 cam_mul[c ^ (c >> 1)] -= get4();
}
next:
fseek (ifp, save, SEEK_SET);
}
quit:
order = sorder;
}
/*
Since the TIFF DateTime string has no timezone information,
assume that the camera's clock was set to Universal Time.
*/
void CLASS get_timestamp (int reversed)
{
struct tm t;
char str[20];
int i;
str[19] = 0;
if (reversed)
for (i=19; i--; ) str[i] = fgetc(ifp);
else
fread (str, 19, 1, ifp);
memset (&t, 0, sizeof t);
if (sscanf (str, "%d:%d:%d %d:%d:%d", &t.tm_year, &t.tm_mon,
&t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec) != 6)
return;
t.tm_year -= 1900;
t.tm_mon -= 1;
t.tm_isdst = -1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
void CLASS parse_exif (int base)
{
unsigned kodak, entries, tag, type, len, save, c;
double expo;
kodak = !strncmp(make,"EASTMAN",7) && tiff_nifds < 3;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
switch (tag) {
case 33434: shutter = getreal(type); break;
case 33437: aperture = getreal(type); break;
case 34855: iso_speed = get2(); break;
case 36867:
case 36868: get_timestamp(0); break;
case 37377: if ((expo = -getreal(type)) < 128)
shutter = pow (2.0, expo); break;
case 37378: aperture = pow (2.0, getreal(type)/2); break;
case 37386: focal_len = getreal(type); break;
case 37500: parse_makernote (base, 0); break;
case 40962: if (kodak) raw_width = get4(); break;
case 40963: if (kodak) raw_height = get4(); break;
case 41730:
if (get4() == 0x20002)
for (exif_cfa=c=0; c < 8; c+=2)
exif_cfa |= fgetc(ifp) * 0x01010101 << c;
}
fseek (ifp, save, SEEK_SET);
}
}
void CLASS parse_gps (int base)
{
unsigned entries, tag, type, len, save, c;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
switch (tag) {
case 1: case 3: case 5:
gpsdata[29+tag/2] = getc(ifp); break;
case 2: case 4: case 7:
FORC(6) gpsdata[tag/3*6+c] = get4(); break;
case 6:
FORC(2) gpsdata[18+c] = get4(); break;
case 18: case 29:
fgets ((char *) (gpsdata+14+tag/3), MIN(len,12), ifp);
}
fseek (ifp, save, SEEK_SET);
}
}
void CLASS romm_coeff (float romm_cam[3][3])
{
static const float rgb_romm[3][3] = /* ROMM == Kodak ProPhoto */
{ { 2.034193, -0.727420, -0.306766 },
{ -0.228811, 1.231729, -0.002922 },
{ -0.008565, -0.153273, 1.161839 } };
int i, j, k;
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
for (cmatrix[i][j] = k=0; k < 3; k++)
cmatrix[i][j] += rgb_romm[i][k] * romm_cam[k][j];
}
void CLASS parse_mos (int offset)
{
char data[40];
int skip, from, i, c, neut[4], planes=0, frot=0;
static const char *mod[] =
{ "","DCB2","Volare","Cantare","CMost","Valeo 6","Valeo 11","Valeo 22",
"Valeo 11p","Valeo 17","","Aptus 17","Aptus 22","Aptus 75","Aptus 65",
"Aptus 54S","Aptus 65S","Aptus 75S","AFi 5","AFi 6","AFi 7",
"","","","","","","","","","","","","","","","","","AFi-II 12" };
float romm_cam[3][3];
fseek (ifp, offset, SEEK_SET);
while (1) {
if (get4() != 0x504b5453) break;
get4();
fread (data, 1, 40, ifp);
skip = get4();
from = ftell(ifp);
if (!strcmp(data,"JPEG_preview_data")) {
thumb_offset = from;
thumb_length = skip;
}
if (!strcmp(data,"icc_camera_profile")) {
profile_offset = from;
profile_length = skip;
}
if (!strcmp(data,"ShootObj_back_type")) {
fscanf (ifp, "%d", &i);
if ((unsigned) i < sizeof mod / sizeof (*mod))
strcpy (model, mod[i]);
}
if (!strcmp(data,"icc_camera_to_tone_matrix")) {
for (i=0; i < 9; i++)
romm_cam[0][i] = int_to_float(get4());
romm_coeff (romm_cam);
}
if (!strcmp(data,"CaptProf_color_matrix")) {
for (i=0; i < 9; i++)
fscanf (ifp, "%f", &romm_cam[0][i]);
romm_coeff (romm_cam);
}
if (!strcmp(data,"CaptProf_number_of_planes"))
fscanf (ifp, "%d", &planes);
if (!strcmp(data,"CaptProf_raw_data_rotation"))
fscanf (ifp, "%d", &flip);
if (!strcmp(data,"CaptProf_mosaic_pattern"))
FORC4 {
fscanf (ifp, "%d", &i);
if (i == 1) frot = c ^ (c >> 1);
}
if (!strcmp(data,"ImgProf_rotation_angle")) {
fscanf (ifp, "%d", &i);
flip = i - flip;
}
if (!strcmp(data,"NeutObj_neutrals") && !cam_mul[0]) {
FORC4 fscanf (ifp, "%d", neut+c);
FORC3 cam_mul[c] = (float) neut[0] / neut[c+1];
}
if (!strcmp(data,"Rows_data"))
load_flags = get4();
parse_mos (from);
fseek (ifp, skip+from, SEEK_SET);
}
if (planes)
filters = (planes == 1) * 0x01010101 *
(uchar) "\x94\x61\x16\x49"[(flip/90 + frot) & 3];
}
void CLASS linear_table (unsigned len)
{
int i;
if (len > 0x1000) len = 0x1000;
read_shorts (curve, len);
for (i=len; i < 0x1000; i++)
curve[i] = curve[i-1];
maximum = curve[0xfff];
}
void CLASS parse_kodak_ifd (int base)
{
unsigned entries, tag, type, len, save;
int i, c, wbi=-2, wbtemp=6500;
float mul[3]={1,1,1}, num;
static const int wbtag[] = { 64037,64040,64039,64041,-1,-1,64042 };
entries = get2();
if (entries > 1024) return;
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
if (tag == 1020) wbi = getint(type);
if (tag == 1021 && len == 72) { /* WB set in software */
fseek (ifp, 40, SEEK_CUR);
FORC3 cam_mul[c] = 2048.0 / get2();
wbi = -2;
}
if (tag == 2118) wbtemp = getint(type);
if (tag == 2130 + wbi)
FORC3 mul[c] = getreal(type);
if (tag == 2140 + wbi && wbi >= 0)
FORC3 {
for (num=i=0; i < 4; i++)
num += getreal(type) * pow (wbtemp/100.0, i);
cam_mul[c] = 2048 / (num * mul[c]);
}
if (tag == 2317) linear_table (len);
if (tag == 6020) iso_speed = getint(type);
if (tag == 64013) wbi = fgetc(ifp);
if ((unsigned) wbi < 7 && tag == wbtag[wbi])
FORC3 cam_mul[c] = get4();
if (tag == 64019) width = getint(type);
if (tag == 64020) height = (getint(type)+1) & -2;
fseek (ifp, save, SEEK_SET);
}
}
int CLASS parse_tiff_ifd (int base)
{
unsigned entries, tag, type, len, plen=16, save;
int ifd, use_cm=0, cfa, i, j, c, ima_len=0;
int blrr=1, blrc=1, dblack[] = { 0,0,0,0 };
char software[64], *cbuf, *cp;
uchar cfa_pat[16], cfa_pc[] = { 0,1,2,3 }, tab[256];
double cc[4][4], cm[4][3], cam_xyz[4][3], num;
double ab[]={ 1,1,1,1 }, asn[] = { 0,0,0,0 }, xyz[] = { 1,1,1 };
unsigned sony_curve[] = { 0,0,0,0,0,4095 };
unsigned *buf, sony_offset=0, sony_length=0, sony_key=0;
struct jhead jh;
#ifndef LIBRAW_LIBRARY_BUILD
FILE *sfp;
#endif
if (tiff_nifds >= sizeof tiff_ifd / sizeof tiff_ifd[0])
return 1;
ifd = tiff_nifds++;
for (j=0; j < 4; j++)
for (i=0; i < 4; i++)
cc[j][i] = i == j;
entries = get2();
if (entries > 512) return 1;
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
switch (tag) {
case 5: width = get2(); break;
case 6: height = get2(); break;
case 7: width += get2(); break;
case 9: if ((i = get2())) filters = i; break;
case 17: case 18:
if (type == 3 && len == 1)
cam_mul[(tag-17)*2] = get2() / 256.0;
break;
case 23:
if (type == 3) iso_speed = get2();
break;
case 36: case 37: case 38:
cam_mul[tag-0x24] = get2();
break;
case 39:
if (len < 50 || cam_mul[0]) break;
fseek (ifp, 12, SEEK_CUR);
FORC3 cam_mul[c] = get2();
break;
case 46:
if (type != 7 || fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) break;
thumb_offset = ftell(ifp) - 2;
thumb_length = len;
break;
case 61440: /* Fuji HS10 table */
parse_tiff_ifd (base);
break;
case 2: case 256: case 61441: /* ImageWidth */
tiff_ifd[ifd].t_width = getint(type);
break;
case 3: case 257: case 61442: /* ImageHeight */
tiff_ifd[ifd].t_height = getint(type);
break;
case 258: /* BitsPerSample */
case 61443:
tiff_ifd[ifd].samples = len & 7;
tiff_ifd[ifd].bps = getint(type);
break;
case 61446:
raw_height = 0;
if (tiff_ifd[ifd].bps > 12) break;
load_raw = &CLASS packed_load_raw;
load_flags = get4() ? 24:80;
break;
case 259: /* Compression */
tiff_ifd[ifd].comp = getint(type);
break;
case 262: /* PhotometricInterpretation */
tiff_ifd[ifd].phint = get2();
break;
case 270: /* ImageDescription */
fread (desc, 512, 1, ifp);
break;
case 271: /* Make */
fgets (make, 64, ifp);
break;
case 272: /* Model */
fgets (model, 64, ifp);
break;
case 280: /* Panasonic RW2 offset */
if (type != 4) break;
load_raw = &CLASS panasonic_load_raw;
load_flags = 0x2008;
case 273: /* StripOffset */
case 513: /* JpegIFOffset */
case 61447:
tiff_ifd[ifd].offset = get4()+base;
if (!tiff_ifd[ifd].bps && tiff_ifd[ifd].offset > 0) {
fseek (ifp, tiff_ifd[ifd].offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
tiff_ifd[ifd].comp = 6;
tiff_ifd[ifd].t_width = jh.wide;
tiff_ifd[ifd].t_height = jh.high;
tiff_ifd[ifd].bps = jh.bits;
tiff_ifd[ifd].samples = jh.clrs;
if (!(jh.sraw || (jh.clrs & 1)))
tiff_ifd[ifd].t_width *= jh.clrs;
i = order;
parse_tiff (tiff_ifd[ifd].offset + 12);
order = i;
}
}
break;
case 274: /* Orientation */
tiff_ifd[ifd].t_flip = "50132467"[get2() & 7]-'0';
break;
case 277: /* SamplesPerPixel */
tiff_ifd[ifd].samples = getint(type) & 7;
break;
case 279: /* StripByteCounts */
case 514:
case 61448:
tiff_ifd[ifd].bytes = get4();
break;
case 61454:
FORC3 cam_mul[(4-c) % 3] = getint(type);
break;
case 305: case 11: /* Software */
fgets (software, 64, ifp);
if (!strncmp(software,"Adobe",5) ||
!strncmp(software,"dcraw",5) ||
!strncmp(software,"UFRaw",5) ||
!strncmp(software,"Bibble",6) ||
!strncmp(software,"Nikon Scan",10) ||
!strcmp (software,"Digital Photo Professional"))
is_raw = 0;
break;
case 306: /* DateTime */
get_timestamp(0);
break;
case 315: /* Artist */
fread (artist, 64, 1, ifp);
break;
case 322: /* TileWidth */
tiff_ifd[ifd].t_tile_width = getint(type);
break;
case 323: /* TileLength */
tiff_ifd[ifd].t_tile_length = getint(type);
break;
case 324: /* TileOffsets */
tiff_ifd[ifd].offset = len > 1 ? ftell(ifp) : get4();
if (len == 4) {
load_raw = &CLASS sinar_4shot_load_raw;
is_raw = 5;
}
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 325: /* TileByteCount */
tiff_ifd[ifd].tile_maxbytes = 0;
for(int jj=0;jj<len;jj++)
{
int s = get4();
if(s > tiff_ifd[ifd].tile_maxbytes) tiff_ifd[ifd].tile_maxbytes=s;
}
break;
#endif
case 330: /* SubIFDs */
if (!strcmp(model,"DSLR-A100") && tiff_ifd[ifd].t_width == 3872) {
load_raw = &CLASS sony_arw_load_raw;
data_offset = get4()+base;
ifd++; break;
}
if(len > 1000) len=1000; /* 1000 SubIFDs is enough */
while (len--) {
i = ftell(ifp);
fseek (ifp, get4()+base, SEEK_SET);
if (parse_tiff_ifd (base)) break;
fseek (ifp, i+4, SEEK_SET);
}
break;
case 400:
strcpy (make, "Sarnoff");
maximum = 0xfff;
break;
case 28688:
FORC4 sony_curve[c+1] = get2() >> 2 & 0xfff;
for (i=0; i < 5; i++)
for (j = sony_curve[i]+1; j <= sony_curve[i+1]; j++)
curve[j] = curve[j-1] + (1 << i);
break;
case 29184: sony_offset = get4(); break;
case 29185: sony_length = get4(); break;
case 29217: sony_key = get4(); break;
case 29264:
parse_minolta (ftell(ifp));
raw_width = 0;
break;
case 29443:
FORC4 cam_mul[c ^ (c < 2)] = get2();
break;
case 29459:
FORC4 cam_mul[c] = get2();
i = (cam_mul[1] == 1024 && cam_mul[2] == 1024) << 1;
SWAP (cam_mul[i],cam_mul[i+1])
break;
case 30720: // Sony matrix, Sony_SR2SubIFD_0x7800
for (i=0; i < 3; i++)
FORC3 cmatrix[i][c] = ((short) get2()) / 1024.0;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr, _(" Sony matrix:\n%f %f %f\n%f %f %f\n%f %f %f\n"), cmatrix[0][0], cmatrix[0][1], cmatrix[0][2], cmatrix[1][0], cmatrix[1][1], cmatrix[1][2], cmatrix[2][0], cmatrix[2][1], cmatrix[2][2]);
#endif
break;
case 29456: // Sony black level, Sony_SR2SubIFD_0x7310, needs to be divided by 4
FORC4 cblack[c ^ c >> 1] = get2()/4;
i = cblack[3];
FORC3 if(i>cblack[c]) i = cblack[c];
FORC4 cblack[c]-=i;
black = i;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr, _("...Sony black: %u cblack: %u %u %u %u\n"),black, cblack[0],cblack[1],cblack[2], cblack[3]);
#endif
break;
case 33405: /* Model2 */
fgets (model2, 64, ifp);
break;
case 33422: /* CFAPattern */
case 64777: /* Kodak P-series */
if ((plen=len) > 16) plen = 16;
fread (cfa_pat, 1, plen, ifp);
for (colors=cfa=i=0; i < plen && colors < 4; i++) {
colors += !(cfa & (1 << cfa_pat[i]));
cfa |= 1 << cfa_pat[i];
}
if (cfa == 070) memcpy (cfa_pc,"\003\004\005",3); /* CMY */
if (cfa == 072) memcpy (cfa_pc,"\005\003\004\001",4); /* GMCY */
goto guess_cfa_pc;
case 33424:
case 65024:
fseek (ifp, get4()+base, SEEK_SET);
parse_kodak_ifd (base);
break;
case 33434: /* ExposureTime */
shutter = getreal(type);
break;
case 33437: /* FNumber */
aperture = getreal(type);
break;
case 34306: /* Leaf white balance */
FORC4 cam_mul[c ^ 1] = 4096.0 / get2();
break;
case 34307: /* Leaf CatchLight color matrix */
fread (software, 1, 7, ifp);
if (strncmp(software,"MATRIX",6)) break;
colors = 4;
for (raw_color = i=0; i < 3; i++) {
FORC4 fscanf (ifp, "%f", &rgb_cam[i][c^1]);
if (!use_camera_wb) continue;
num = 0;
FORC4 num += rgb_cam[i][c];
FORC4 rgb_cam[i][c] /= num;
}
break;
case 34310: /* Leaf metadata */
parse_mos (ftell(ifp));
case 34303:
strcpy (make, "Leaf");
break;
case 34665: /* EXIF tag */
fseek (ifp, get4()+base, SEEK_SET);
parse_exif (base);
break;
case 34853: /* GPSInfo tag */
fseek (ifp, get4()+base, SEEK_SET);
parse_gps (base);
break;
case 34675: /* InterColorProfile */
case 50831: /* AsShotICCProfile */
profile_offset = ftell(ifp);
profile_length = len;
break;
case 37122: /* CompressedBitsPerPixel */
kodak_cbpp = get4();
break;
case 37386: /* FocalLength */
focal_len = getreal(type);
break;
case 37393: /* ImageNumber */
shot_order = getint(type);
break;
case 37400: /* old Kodak KDC tag */
for (raw_color = i=0; i < 3; i++) {
getreal(type);
FORC3 rgb_cam[i][c] = getreal(type);
}
break;
case 40976:
strip_offset = get4();
load_raw = &CLASS samsung_load_raw;
break;
case 46275: /* Imacon tags */
strcpy (make, "Imacon");
data_offset = ftell(ifp);
ima_len = len;
break;
case 46279:
if (!ima_len) break;
fseek (ifp, 38, SEEK_CUR);
case 46274:
fseek (ifp, 40, SEEK_CUR);
raw_width = get4();
raw_height = get4();
left_margin = get4() & 7;
width = raw_width - left_margin - (get4() & 7);
top_margin = get4() & 7;
height = raw_height - top_margin - (get4() & 7);
if (raw_width == 7262 && ima_len == 234317952 ) {
height = 5412;
width = 7216;
left_margin = 7;
filters=0;
} else if (raw_width == 7262) {
height = 5444;
width = 7244;
left_margin = 7;
}
fseek (ifp, 52, SEEK_CUR);
FORC3 cam_mul[c] = getreal(11);
fseek (ifp, 114, SEEK_CUR);
flip = (get2() >> 7) * 90;
if (width * height * 6 == ima_len) {
if (flip % 180 == 90) SWAP(width,height);
raw_width = width;
raw_height = height;
left_margin = top_margin = filters = flip = 0;
}
sprintf (model, "Ixpress %d-Mp", height*width/1000000);
load_raw = &CLASS imacon_full_load_raw;
if (filters) {
if (left_margin & 1) filters = 0x61616161;
load_raw = &CLASS unpacked_load_raw;
}
maximum = 0xffff;
break;
case 50454: /* Sinar tag */
case 50455:
if (!(cbuf = (char *) malloc(len))) break;
fread (cbuf, 1, len, ifp);
for (cp = cbuf-1; cp && cp < cbuf+len; cp = strchr(cp,'\n'))
if (!strncmp (++cp,"Neutral ",8))
sscanf (cp+8, "%f %f %f", cam_mul, cam_mul+1, cam_mul+2);
free (cbuf);
break;
case 50458:
if (!make[0]) strcpy (make, "Hasselblad");
break;
case 50459: /* Hasselblad tag */
i = order;
j = ftell(ifp);
c = tiff_nifds;
order = get2();
fseek (ifp, j+(get2(),get4()), SEEK_SET);
parse_tiff_ifd (j);
maximum = 0xffff;
tiff_nifds = c;
order = i;
break;
case 50706: /* DNGVersion */
FORC4 dng_version = (dng_version << 8) + fgetc(ifp);
if (!make[0]) strcpy (make, "DNG");
is_raw = 1;
break;
case 50710: /* CFAPlaneColor */
if (len > 4) len = 4;
colors = len;
fread (cfa_pc, 1, colors, ifp);
guess_cfa_pc:
FORCC tab[cfa_pc[c]] = c;
cdesc[c] = 0;
for (i=16; i--; )
filters = filters << 2 | tab[cfa_pat[i % plen]];
filters -= !filters;
break;
case 50711: /* CFALayout */
if (get2() == 2) {
fuji_width = 1;
filters = 0x49494949;
}
break;
case 291:
case 50712: /* LinearizationTable */
linear_table (len);
break;
case 50713: /* BlackLevelRepeatDim */
blrr = get2();
blrc = get2();
break;
case 61450:
blrr = blrc = 2;
case 50714: /* BlackLevel */
black = getreal(type);
if ((unsigned)(filters+1) < 1000) break;
dblack[0] = black;
dblack[1] = (blrc == 2) ? getreal(type):dblack[0];
dblack[2] = (blrr == 2) ? getreal(type):dblack[0];
dblack[3] = (blrc == 2 && blrr == 2) ? getreal(type):dblack[1];
if (colors == 3)
filters |= ((filters >> 2 & 0x22222222) |
(filters << 2 & 0x88888888)) & filters << 1;
FORC4 cblack[filters >> (c << 1) & 3] = dblack[c];
black = 0;
break;
case 50715: /* BlackLevelDeltaH */
case 50716: /* BlackLevelDeltaV */
for (num=i=0; i < len && i < 65536; i++)
num += getreal(type);
black += num/len + 0.5;
break;
case 50717: /* WhiteLevel */
maximum = getint(type);
break;
case 50718: /* DefaultScale */
pixel_aspect = getreal(type);
pixel_aspect /= getreal(type);
break;
case 50721: /* ColorMatrix1 */
case 50722: /* ColorMatrix2 */
FORCC for (j=0; j < 3; j++)
cm[c][j] = getreal(type);
use_cm = 1;
break;
case 50723: /* CameraCalibration1 */
case 50724: /* CameraCalibration2 */
for (i=0; i < colors; i++)
FORCC cc[i][c] = getreal(type);
break;
case 50727: /* AnalogBalance */
FORCC ab[c] = getreal(type);
break;
case 50728: /* AsShotNeutral */
FORCC asn[c] = getreal(type);
break;
case 50729: /* AsShotWhiteXY */
xyz[0] = getreal(type);
xyz[1] = getreal(type);
xyz[2] = 1 - xyz[0] - xyz[1];
FORC3 xyz[c] /= d65_white[c];
break;
case 50740: /* DNGPrivateData */
if (dng_version) break;
parse_minolta (j = get4()+base);
fseek (ifp, j, SEEK_SET);
parse_tiff_ifd (base);
break;
case 50752:
read_shorts (cr2_slice, 3);
break;
case 50829: /* ActiveArea */
top_margin = getint(type);
left_margin = getint(type);
height = getint(type) - top_margin;
width = getint(type) - left_margin;
break;
case 50830: /* MaskedAreas */
for (i=0; i < len && i < 32; i++)
mask[0][i] = getint(type);
black = 0;
break;
case 51009: /* OpcodeList2 */
meta_offset = ftell(ifp);
break;
case 64772: /* Kodak P-series */
if (len < 13) break;
fseek (ifp, 16, SEEK_CUR);
data_offset = get4();
fseek (ifp, 28, SEEK_CUR);
data_offset += get4();
load_raw = &CLASS packed_load_raw;
break;
case 65026:
if (type == 2) fgets (model2, 64, ifp);
}
fseek (ifp, save, SEEK_SET);
}
if (sony_length && (buf = (unsigned *) malloc(sony_length))) {
fseek (ifp, sony_offset, SEEK_SET);
fread (buf, sony_length, 1, ifp);
sony_decrypt (buf, sony_length/4, 1, sony_key);
#ifndef LIBRAW_LIBRARY_BUILD
sfp = ifp;
if ((ifp = tmpfile())) {
fwrite (buf, sony_length, 1, ifp);
fseek (ifp, 0, SEEK_SET);
parse_tiff_ifd (-sony_offset);
fclose (ifp);
}
ifp = sfp;
#else
if( !ifp->tempbuffer_open(buf,sony_length))
{
parse_tiff_ifd(-sony_offset);
ifp->tempbuffer_close();
}
#endif
free (buf);
}
for (i=0; i < colors; i++)
FORCC cc[i][c] *= ab[i];
if (use_cm) {
FORCC for (i=0; i < 3; i++)
for (cam_xyz[c][i]=j=0; j < colors; j++)
cam_xyz[c][i] += cc[c][j] * cm[j][i] * xyz[i];
cam_xyz_coeff (cam_xyz);
}
if (asn[0]) {
cam_mul[3] = 0;
FORCC cam_mul[c] = 1 / asn[c];
}
if (!use_cm)
FORCC pre_mul[c] /= cc[c][c];
return 0;
}
int CLASS parse_tiff (int base)
{
int doff;
fseek (ifp, base, SEEK_SET);
order = get2();
if (order != 0x4949 && order != 0x4d4d) return 0;
get2();
while ((doff = get4())) {
fseek (ifp, doff+base, SEEK_SET);
if (parse_tiff_ifd (base)) break;
}
return 1;
}
void CLASS apply_tiff()
{
int max_samp=0, raw=-1, thm=-1, i;
struct jhead jh;
thumb_misc = 16;
if (thumb_offset) {
fseek (ifp, thumb_offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
if((unsigned)jh.bits<17 && (unsigned)jh.wide < 0x10000 && (unsigned)jh.high < 0x10000)
{
thumb_misc = jh.bits;
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
}
for (i=0; i < tiff_nifds; i++) {
if (max_samp < tiff_ifd[i].samples)
max_samp = tiff_ifd[i].samples;
if (max_samp > 3) max_samp = 3;
if ((tiff_ifd[i].comp != 6 || tiff_ifd[i].samples != 3) &&
unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 &&
(unsigned)tiff_ifd[i].bps < 33 && (unsigned)tiff_ifd[i].samples < 13 &&
tiff_ifd[i].t_width*tiff_ifd[i].t_height > raw_width*raw_height) {
raw_width = tiff_ifd[i].t_width;
raw_height = tiff_ifd[i].t_height;
tiff_bps = tiff_ifd[i].bps;
tiff_compress = tiff_ifd[i].comp;
data_offset = tiff_ifd[i].offset;
tiff_flip = tiff_ifd[i].t_flip;
tiff_samples = tiff_ifd[i].samples;
tile_width = tiff_ifd[i].t_tile_width;
tile_length = tiff_ifd[i].t_tile_length;
#ifdef LIBRAW_LIBRARY_BUILD
data_size = tile_length < INT_MAX && tile_length>0 ? tiff_ifd[i].tile_maxbytes: tiff_ifd[i].bytes;
#endif
raw = i;
}
}
if (!tile_width ) tile_width = INT_MAX;
if (!tile_length) tile_length = INT_MAX;
for (i=tiff_nifds; i--; )
if (tiff_ifd[i].t_flip) tiff_flip = tiff_ifd[i].t_flip;
if (raw >= 0 && !load_raw)
switch (tiff_compress) {
case 32767:
if (tiff_ifd[raw].bytes == raw_width*raw_height) {
tiff_bps = 12;
load_raw = &CLASS sony_arw2_load_raw; break;
}
if (tiff_ifd[raw].bytes*8 != raw_width*raw_height*tiff_bps) {
raw_height += 8;
load_raw = &CLASS sony_arw_load_raw; break;
}
load_flags = 79;
case 32769:
load_flags++;
case 32770:
case 32773: goto slr;
case 0: case 1:
if (!strncmp(make,"OLYMPUS",7) &&
tiff_ifd[raw].bytes*2 == raw_width*raw_height*3)
load_flags = 24;
if (tiff_ifd[raw].bytes*5 == raw_width*raw_height*8) {
load_flags = 81;
tiff_bps = 12;
} slr:
switch (tiff_bps) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 12: if (tiff_ifd[raw].phint == 2)
load_flags = 6;
load_raw = &CLASS packed_load_raw; break;
case 14: load_flags = 0;
case 16: load_raw = &CLASS unpacked_load_raw;
if (!strncmp(make,"OLYMPUS",7) &&
tiff_ifd[raw].bytes*7 > raw_width*raw_height)
load_raw = &CLASS olympus_load_raw;
}
break;
case 6: case 7: case 99:
load_raw = &CLASS lossless_jpeg_load_raw; break;
case 262:
load_raw = &CLASS kodak_262_load_raw; break;
case 34713:
if ((raw_width+9)/10*16*raw_height == tiff_ifd[raw].bytes) {
load_raw = &CLASS packed_load_raw;
load_flags = 1;
} else if (raw_width*raw_height*2 == tiff_ifd[raw].bytes) {
load_raw = &CLASS unpacked_load_raw;
load_flags = 4;
order = 0x4d4d;
} else
load_raw = &CLASS nikon_load_raw; break;
case 65535:
load_raw = &CLASS pentax_load_raw; break;
case 65000:
switch (tiff_ifd[raw].phint) {
case 2: load_raw = &CLASS kodak_rgb_load_raw; filters = 0; break;
case 6: load_raw = &CLASS kodak_ycbcr_load_raw; filters = 0; break;
case 32803: load_raw = &CLASS kodak_65000_load_raw;
}
case 32867: case 34892: break;
default: is_raw = 0;
}
if (!dng_version)
if ( (tiff_samples == 3 && tiff_ifd[raw].bytes && tiff_bps != 14 &&
tiff_compress != 32769 && tiff_compress != 32770)
|| (tiff_bps == 8 && !strcasestr(make,"Kodak") &&
!strstr(model2,"DEBUG RAW")))
is_raw = 0;
for (i=0; i < tiff_nifds; i++)
if (i != raw && tiff_ifd[i].samples == max_samp &&
tiff_ifd[i].bps>0 && tiff_ifd[i].bps < 33 &&
unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 &&
tiff_ifd[i].t_width * tiff_ifd[i].t_height / (SQR(tiff_ifd[i].bps)+1) >
thumb_width * thumb_height / (SQR(thumb_misc)+1)
&& tiff_ifd[i].comp != 34892) {
thumb_width = tiff_ifd[i].t_width;
thumb_height = tiff_ifd[i].t_height;
thumb_offset = tiff_ifd[i].offset;
thumb_length = tiff_ifd[i].bytes;
thumb_misc = tiff_ifd[i].bps;
thm = i;
}
if (thm >= 0) {
thumb_misc |= tiff_ifd[thm].samples << 5;
switch (tiff_ifd[thm].comp) {
case 0:
write_thumb = &CLASS layer_thumb;
break;
case 1:
if (tiff_ifd[thm].bps <= 8)
write_thumb = &CLASS ppm_thumb;
else if (!strcmp(make,"Imacon"))
write_thumb = &CLASS ppm16_thumb;
else
thumb_load_raw = &CLASS kodak_thumb_load_raw;
break;
case 65000:
thumb_load_raw = tiff_ifd[thm].phint == 6 ?
&CLASS kodak_ycbcr_load_raw : &CLASS kodak_rgb_load_raw;
}
}
}
void CLASS parse_minolta (int base)
{
int save, tag, len, offset, high=0, wide=0, i, c;
short sorder=order;
fseek (ifp, base, SEEK_SET);
if (fgetc(ifp) || fgetc(ifp)-'M' || fgetc(ifp)-'R') return;
order = fgetc(ifp) * 0x101;
offset = base + get4() + 8;
while ((save=ftell(ifp)) < offset) {
for (tag=i=0; i < 4; i++)
tag = tag << 8 | fgetc(ifp);
len = get4();
switch (tag) {
case 0x505244: /* PRD */
fseek (ifp, 8, SEEK_CUR);
high = get2();
wide = get2();
break;
case 0x574247: /* WBG */
get4();
i = strcmp(model,"DiMAGE A200") ? 0:3;
FORC4 cam_mul[c ^ (c >> 1) ^ i] = get2();
break;
case 0x545457: /* TTW */
parse_tiff (ftell(ifp));
data_offset = offset;
}
fseek (ifp, save+len+8, SEEK_SET);
}
raw_height = high;
raw_width = wide;
order = sorder;
}
/*
Many cameras have a "debug mode" that writes JPEG and raw
at the same time. The raw file has no header, so try to
to open the matching JPEG file and read its metadata.
*/
void CLASS parse_external_jpeg()
{
const char *file, *ext;
char *jname, *jfile, *jext;
#ifndef LIBRAW_LIBRARY_BUILD
FILE *save=ifp;
#else
#if defined(_WIN32) && !defined(__MINGW32__) && defined(_MSC_VER) && (_MSC_VER > 1310)
if(ifp->wfname())
{
std::wstring rawfile(ifp->wfname());
rawfile.replace(rawfile.length()-3,3,L"JPG");
if(!ifp->subfile_open(rawfile.c_str()))
{
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
ifp->subfile_close();
}
else
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
return;
}
#endif
if(!ifp->fname())
{
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
return;
}
#endif
ext = strrchr (ifname, '.');
file = strrchr (ifname, '/');
if (!file) file = strrchr (ifname, '\\');
#ifndef LIBRAW_LIBRARY_BUILD
if (!file) file = ifname-1;
#else
if (!file) file = (char*)ifname-1;
#endif
file++;
if (!ext || strlen(ext) != 4 || ext-file != 8) return;
jname = (char *) malloc (strlen(ifname) + 1);
merror (jname, "parse_external_jpeg()");
strcpy (jname, ifname);
jfile = file - ifname + jname;
jext = ext - ifname + jname;
if (strcasecmp (ext, ".jpg")) {
strcpy (jext, isupper(ext[1]) ? ".JPG":".jpg");
if (isdigit(*file)) {
memcpy (jfile, file+4, 4);
memcpy (jfile+4, file, 4);
}
} else
while (isdigit(*--jext)) {
if (*jext != '9') {
(*jext)++;
break;
}
*jext = '0';
}
#ifndef LIBRAW_LIBRARY_BUILD
if (strcmp (jname, ifname)) {
if ((ifp = fopen (jname, "rb"))) {
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Reading metadata from %s ...\n"), jname);
#endif
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
fclose (ifp);
}
}
#else
if (strcmp (jname, ifname))
{
if(!ifp->subfile_open(jname))
{
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
ifp->subfile_close();
}
else
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
}
#endif
if (!timestamp)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("Failed to read metadata from %s\n"), jname);
#endif
}
free (jname);
#ifndef LIBRAW_LIBRARY_BUILD
ifp = save;
#endif
}
/*
CIFF block 0x1030 contains an 8x8 white sample.
Load this into white[][] for use in scale_colors().
*/
void CLASS ciff_block_1030()
{
static const ushort key[] = { 0x410, 0x45f3 };
int i, bpp, row, col, vbits=0;
unsigned long bitbuf=0;
if ((get2(),get4()) != 0x80008 || !get4()) return;
bpp = get2();
if (bpp != 10 && bpp != 12) return;
for (i=row=0; row < 8; row++)
for (col=0; col < 8; col++) {
if (vbits < bpp) {
bitbuf = bitbuf << 16 | (get2() ^ key[i++ & 1]);
vbits += 16;
}
white[row][col] =
bitbuf << (LONG_BIT - vbits) >> (LONG_BIT - bpp);
vbits -= bpp;
}
}
/*
Parse a CIFF file, better known as Canon CRW format.
*/
void CLASS parse_ciff (int offset, int length, int depth)
{
int tboff, nrecs, c, type, len, save, wbi=-1;
ushort key[] = { 0x410, 0x45f3 };
fseek (ifp, offset+length-4, SEEK_SET);
tboff = get4() + offset;
fseek (ifp, tboff, SEEK_SET);
nrecs = get2();
if ((nrecs | depth) > 127) return;
while (nrecs--) {
type = get2();
len = get4();
save = ftell(ifp) + 4;
fseek (ifp, offset+get4(), SEEK_SET);
if ((((type >> 8) + 8) | 8) == 0x38)
parse_ciff (ftell(ifp), len, depth+1); /* Parse a sub-table */
if (type == 0x0810)
fread (artist, 64, 1, ifp);
if (type == 0x080a) {
fread (make, 64, 1, ifp);
fseek (ifp, strlen(make) - 63, SEEK_CUR);
fread (model, 64, 1, ifp);
}
if (type == 0x1810) {
width = get4();
height = get4();
pixel_aspect = int_to_float(get4());
flip = get4();
}
if (type == 0x1835) /* Get the decoder table */
tiff_compress = get4();
if (type == 0x2007) {
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (type == 0x1818) {
shutter = pow (2.0f, -int_to_float((get4(),get4())));
aperture = pow (2.0f, int_to_float(get4())/2);
}
if (type == 0x102a) {
iso_speed = pow (2.0, (get4(),get2())/32.0 - 4) * 50;
aperture = pow (2.0, (get2(),(short)get2())/64.0);
shutter = pow (2.0,-((short)get2())/32.0);
wbi = (get2(),get2());
if (wbi > 17) wbi = 0;
fseek (ifp, 32, SEEK_CUR);
if (shutter > 1e6) shutter = get2()/10.0;
}
if (type == 0x102c) {
if (get2() > 512) { /* Pro90, G1 */
fseek (ifp, 118, SEEK_CUR);
FORC4 cam_mul[c ^ 2] = get2();
} else { /* G2, S30, S40 */
fseek (ifp, 98, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2();
}
}
if (type == 0x0032) {
if (len == 768) { /* EOS D30 */
fseek (ifp, 72, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = 1024.0 / get2();
if (!wbi) cam_mul[0] = -1; /* use my auto white balance */
} else if (!cam_mul[0]) {
if (get2() == key[0]) /* Pro1, G6, S60, S70 */
c = (strstr(model,"Pro1") ?
"012346000000000000":"01345:000000006008")[wbi]-'0'+ 2;
else { /* G3, G5, S45, S50 */
c = "023457000000006000"[wbi]-'0';
key[0] = key[1] = 0;
}
fseek (ifp, 78 + c*8, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2() ^ key[c & 1];
if (!wbi) cam_mul[0] = -1;
}
}
if (type == 0x10a9) { /* D60, 10D, 300D, and clones */
if (len > 66) wbi = "0134567028"[wbi]-'0';
fseek (ifp, 2 + wbi*8, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
}
if (type == 0x1030 && (0x18040 >> wbi & 1))
ciff_block_1030(); /* all that don't have 0x10a9 */
if (type == 0x1031) {
raw_width = (get2(),get2());
raw_height = get2();
}
if (type == 0x5029) {
focal_len = len >> 16;
if ((len & 0xffff) == 2) focal_len /= 32;
}
if (type == 0x5813) flash_used = int_to_float(len);
if (type == 0x5814) canon_ev = int_to_float(len);
if (type == 0x5817) shot_order = len;
if (type == 0x5834) unique_id = len;
if (type == 0x580e) timestamp = len;
if (type == 0x180e) timestamp = get4();
#ifdef LOCALTIME
if ((type | 0x4000) == 0x580e)
timestamp = mktime (gmtime (×tamp));
#endif
fseek (ifp, save, SEEK_SET);
}
}
void CLASS parse_rollei()
{
char line[128], *val;
struct tm t;
fseek (ifp, 0, SEEK_SET);
memset (&t, 0, sizeof t);
do {
fgets (line, 128, ifp);
if ((val = strchr(line,'=')))
*val++ = 0;
else
val = line + strlen(line);
if (!strcmp(line,"DAT"))
sscanf (val, "%d.%d.%d", &t.tm_mday, &t.tm_mon, &t.tm_year);
if (!strcmp(line,"TIM"))
sscanf (val, "%d:%d:%d", &t.tm_hour, &t.tm_min, &t.tm_sec);
if (!strcmp(line,"HDR"))
thumb_offset = atoi(val);
if (!strcmp(line,"X "))
raw_width = atoi(val);
if (!strcmp(line,"Y "))
raw_height = atoi(val);
if (!strcmp(line,"TX "))
thumb_width = atoi(val);
if (!strcmp(line,"TY "))
thumb_height = atoi(val);
} while (strncmp(line,"EOHD",4));
data_offset = thumb_offset + thumb_width * thumb_height * 2;
t.tm_year -= 1900;
t.tm_mon -= 1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
strcpy (make, "Rollei");
strcpy (model,"d530flex");
write_thumb = &CLASS rollei_thumb;
}
void CLASS parse_sinar_ia()
{
int entries, off;
char str[8], *cp;
order = 0x4949;
fseek (ifp, 4, SEEK_SET);
entries = get4();
fseek (ifp, get4(), SEEK_SET);
while (entries--) {
off = get4(); get4();
fread (str, 8, 1, ifp);
if (!strcmp(str,"META")) meta_offset = off;
if (!strcmp(str,"THUMB")) thumb_offset = off;
if (!strcmp(str,"RAW0")) data_offset = off;
}
fseek (ifp, meta_offset+20, SEEK_SET);
fread (make, 64, 1, ifp);
make[63] = 0;
if ((cp = strchr(make,' '))) {
strcpy (model, cp+1);
*cp = 0;
}
raw_width = get2();
raw_height = get2();
load_raw = &CLASS unpacked_load_raw;
thumb_width = (get4(),get2());
thumb_height = get2();
write_thumb = &CLASS ppm_thumb;
maximum = 0x3fff;
}
void CLASS parse_phase_one (int base)
{
unsigned entries, tag, type, len, data, save, i, c;
float romm_cam[3][3];
char *cp;
memset (&ph1, 0, sizeof ph1);
fseek (ifp, base, SEEK_SET);
order = get4() & 0xffff;
if (get4() >> 8 != 0x526177) return; /* "Raw" */
fseek (ifp, get4()+base, SEEK_SET);
entries = get4();
get4();
while (entries--) {
tag = get4();
type = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, base+data, SEEK_SET);
switch (tag) {
case 0x100: flip = "0653"[data & 3]-'0'; break;
case 0x106:
for (i=0; i < 9; i++)
romm_cam[0][i] = getreal(11);
romm_coeff (romm_cam);
break;
case 0x107:
FORC3 cam_mul[c] = getreal(11);
break;
case 0x108: raw_width = data; break;
case 0x109: raw_height = data; break;
case 0x10a: left_margin = data; break;
case 0x10b: top_margin = data; break;
case 0x10c: width = data; break;
case 0x10d: height = data; break;
case 0x10e: ph1.format = data; break;
case 0x10f: data_offset = data+base; break;
case 0x110: meta_offset = data+base;
meta_length = len; break;
case 0x112: ph1.key_off = save - 4; break;
case 0x210: ph1.tag_210 = int_to_float(data); break;
case 0x21a: ph1.tag_21a = data; break;
case 0x21c: strip_offset = data+base; break;
case 0x21d: ph1.t_black = data; break;
case 0x222: ph1.split_col = data; break;
case 0x223: ph1.black_off = data+base; break;
case 0x301:
model[63] = 0;
fread (model, 1, 63, ifp);
if ((cp = strstr(model," camera"))) *cp = 0;
}
fseek (ifp, save, SEEK_SET);
}
load_raw = ph1.format < 3 ?
&CLASS phase_one_load_raw : &CLASS phase_one_load_raw_c;
maximum = 0xffff;
strcpy (make, "Phase One");
if (model[0]) return;
switch (raw_height) {
case 2060: strcpy (model,"LightPhase"); break;
case 2682: strcpy (model,"H 10"); break;
case 4128: strcpy (model,"H 20"); break;
case 5488: strcpy (model,"H 25"); break;
}
}
void CLASS parse_fuji (int offset)
{
unsigned entries, tag, len, save, c;
fseek (ifp, offset, SEEK_SET);
entries = get4();
if (entries > 255) return;
while (entries--) {
tag = get2();
len = get2();
save = ftell(ifp);
if (tag == 0x100) {
raw_height = get2();
raw_width = get2();
} else if (tag == 0x121) {
height = get2();
if ((width = get2()) == 4284) width += 3;
} else if (tag == 0x130) {
fuji_layout = fgetc(ifp) >> 7;
fuji_width = !(fgetc(ifp) & 8);
} else if (tag == 0x131) {
filters = 9;
FORC(36) xtrans[0][35-c] = fgetc(ifp) & 3;
} else if (tag == 0x2ff0) {
FORC4 cam_mul[c ^ 1] = get2();
} else if (tag == 0xc000) {
c = order;
order = 0x4949;
if ((tag = get4()) > 10000) tag = get4();
width = tag;
height = get4();
order = c;
}
fseek (ifp, save+len, SEEK_SET);
}
height <<= fuji_layout;
width >>= fuji_layout;
}
int CLASS parse_jpeg (int offset)
{
int len, save, hlen, mark;
fseek (ifp, offset, SEEK_SET);
if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) return 0;
while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda) {
order = 0x4d4d;
len = get2() - 2;
save = ftell(ifp);
if (mark == 0xc0 || mark == 0xc3) {
fgetc(ifp);
raw_height = get2();
raw_width = get2();
}
order = get2();
hlen = get4();
if (get4() == 0x48454150) /* "HEAP" */
parse_ciff (save+hlen, len-hlen, 0);
if (parse_tiff (save+6)) apply_tiff();
fseek (ifp, save+len, SEEK_SET);
}
return 1;
}
void CLASS parse_riff()
{
unsigned i, size, end;
char tag[4], date[64], month[64];
static const char mon[12][4] =
{ "Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec" };
struct tm t;
order = 0x4949;
fread (tag, 4, 1, ifp);
size = get4();
#ifdef LIBRAW_LIBRARY_BUILD
if((int)size<0)
throw LIBRAW_EXCEPTION_IO_EOF;
#endif
end = ftell(ifp) + size;
if (!memcmp(tag,"RIFF",4) || !memcmp(tag,"LIST",4)) {
get4();
while (ftell(ifp)+7 < end)
parse_riff();
} else if (!memcmp(tag,"nctg",4)) {
while (ftell(ifp)+7 < end) {
i = get2();
size = get2();
if ((i+1) >> 1 == 10 && size == 20)
get_timestamp(0);
else fseek (ifp, size, SEEK_CUR);
}
} else if (!memcmp(tag,"IDIT",4) && size < 64) {
fread (date, 64, 1, ifp);
date[size] = 0;
memset (&t, 0, sizeof t);
if (sscanf (date, "%*s %s %d %d:%d:%d %d", month, &t.tm_mday,
&t.tm_hour, &t.tm_min, &t.tm_sec, &t.tm_year) == 6) {
for (i=0; i < 12 && strcasecmp(mon[i],month); i++);
t.tm_mon = i;
t.tm_year -= 1900;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
} else
fseek (ifp, size, SEEK_CUR);
}
void CLASS parse_smal (int offset, int fsize)
{
int ver;
fseek (ifp, offset+2, SEEK_SET);
order = 0x4949;
ver = fgetc(ifp);
if (ver == 6)
fseek (ifp, 5, SEEK_CUR);
if (get4() != fsize) return;
if (ver > 6) data_offset = get4();
raw_height = height = get2();
raw_width = width = get2();
strcpy (make, "SMaL");
sprintf (model, "v%d %dx%d", ver, width, height);
if (ver == 6) load_raw = &CLASS smal_v6_load_raw;
if (ver == 9) load_raw = &CLASS smal_v9_load_raw;
}
void CLASS parse_cine()
{
unsigned off_head, off_setup, off_image, i;
order = 0x4949;
fseek (ifp, 4, SEEK_SET);
is_raw = get2() == 2;
fseek (ifp, 14, SEEK_CUR);
is_raw *= get4();
off_head = get4();
off_setup = get4();
off_image = get4();
timestamp = get4();
if ((i = get4())) timestamp = i;
fseek (ifp, off_head+4, SEEK_SET);
raw_width = get4();
raw_height = get4();
switch (get2(),get2()) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 16: load_raw = &CLASS unpacked_load_raw;
}
fseek (ifp, off_setup+792, SEEK_SET);
strcpy (make, "CINE");
sprintf (model, "%d", get4());
fseek (ifp, 12, SEEK_CUR);
switch ((i=get4()) & 0xffffff) {
case 3: filters = 0x94949494; break;
case 4: filters = 0x49494949; break;
default: is_raw = 0;
}
fseek (ifp, 72, SEEK_CUR);
switch ((get4()+3600) % 360) {
case 270: flip = 4; break;
case 180: flip = 1; break;
case 90: flip = 7; break;
case 0: flip = 2;
}
cam_mul[0] = getreal(11);
cam_mul[2] = getreal(11);
maximum = ~((~0u) << get4());
fseek (ifp, 668, SEEK_CUR);
shutter = get4()/1000000000.0;
fseek (ifp, off_image, SEEK_SET);
if (shot_select < is_raw)
fseek (ifp, shot_select*8, SEEK_CUR);
data_offset = (INT64) get4() + 8;
data_offset += (INT64) get4() << 32;
}
void CLASS parse_redcine()
{
unsigned i, len, rdvo;
order = 0x4d4d;
is_raw = 0;
fseek (ifp, 52, SEEK_SET);
width = get4();
height = get4();
fseek (ifp, 0, SEEK_END);
fseek (ifp, -(i = ftello(ifp) & 511), SEEK_CUR);
if (get4() != i || get4() != 0x52454f42) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: Tail is missing, parsing from head...\n"), ifname);
#endif
fseek (ifp, 0, SEEK_SET);
while ((len = get4()) != EOF) {
if (get4() == 0x52454456)
if (is_raw++ == shot_select)
data_offset = ftello(ifp) - 8;
fseek (ifp, len-8, SEEK_CUR);
}
} else {
rdvo = get4();
fseek (ifp, 12, SEEK_CUR);
is_raw = get4();
fseeko (ifp, rdvo+8 + shot_select*4, SEEK_SET);
data_offset = get4();
}
}
/*
All matrices are from Adobe DNG Converter unless otherwise noted.
*/
void CLASS adobe_coeff (const char *t_make, const char *t_model)
{
static const struct {
const char *prefix;
short t_black, t_maximum, trans[12];
} table[] = {
{ "AgfaPhoto DC-833m", 0, 0, /* DJC */
{ 11438,-3762,-1115,-2409,9914,2497,-1227,2295,5300 } },
{ "Apple QuickTake", 0, 0, /* DJC */
{ 21392,-5653,-3353,2406,8010,-415,7166,1427,2078 } },
{ "Canon EOS D2000", 0, 0,
{ 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } },
{ "Canon EOS D6000", 0, 0,
{ 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } },
{ "Canon EOS D30", 0, 0,
{ 9805,-2689,-1312,-5803,13064,3068,-2438,3075,8775 } },
{ "Canon EOS D60", 0, 0xfa0,
{ 6188,-1341,-890,-7168,14489,2937,-2640,3228,8483 } },
{ "Canon EOS 5D Mark III", 0, 0x3c80,
{ 6722,-635,-963,-4287,12460,2028,-908,2162,5668 } },
{ "Canon EOS 5D Mark II", 0, 0x3cf0,
{ 4716,603,-830,-7798,15474,2480,-1496,1937,6651 } },
{ "Canon EOS 5D", 0, 0xe6c,
{ 6347,-479,-972,-8297,15954,2480,-1968,2131,7649 } },
{ "Canon EOS 6D", 0, 0x3c82,
{ 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } },
{ "Canon EOS 7D", 0, 0x3510,
{ 6844,-996,-856,-3876,11761,2396,-593,1772,6198 } },
{ "Canon EOS 10D", 0, 0xfa0,
{ 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } },
{ "Canon EOS 20Da", 0, 0,
{ 14155,-5065,-1382,-6550,14633,2039,-1623,1824,6561 } },
{ "Canon EOS 20D", 0, 0xfff,
{ 6599,-537,-891,-8071,15783,2424,-1983,2234,7462 } },
{ "Canon EOS 30D", 0, 0,
{ 6257,-303,-1000,-7880,15621,2396,-1714,1904,7046 } },
{ "Canon EOS 40D", 0, 0x3f60,
{ 6071,-747,-856,-7653,15365,2441,-2025,2553,7315 } },
{ "Canon EOS 50D", 0, 0x3d93,
{ 4920,616,-593,-6493,13964,2784,-1774,3178,7005 } },
{ "Canon EOS 60D", 0, 0x2ff7,
{ 6719,-994,-925,-4408,12426,2211,-887,2129,6051 } },
{ "Canon EOS 70D", 0, 0x3c80,
{ 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } },
{ "Canon EOS 100D", 0, 0x350f,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 300D", 0, 0xfa0,
{ 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } },
{ "Canon EOS 350D", 0, 0xfff,
{ 6018,-617,-965,-8645,15881,2975,-1530,1719,7642 } },
{ "Canon EOS 400D", 0, 0xe8e,
{ 7054,-1501,-990,-8156,15544,2812,-1278,1414,7796 } },
{ "Canon EOS 450D", 0, 0x390d,
{ 5784,-262,-821,-7539,15064,2672,-1982,2681,7427 } },
{ "Canon EOS 500D", 0, 0x3479,
{ 4763,712,-646,-6821,14399,2640,-1921,3276,6561 } },
{ "Canon EOS 550D", 0, 0x3dd7,
{ 6941,-1164,-857,-3825,11597,2534,-416,1540,6039 } },
{ "Canon EOS 600D", 0, 0x3510,
{ 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } },
{ "Canon EOS 650D", 0, 0x354d,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 700D", 0, 0x3c00,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 1000D", 0, 0xe43,
{ 6771,-1139,-977,-7818,15123,2928,-1244,1437,7533 } },
{ "Canon EOS 1100D", 0, 0x3510,
{ 6444,-904,-893,-4563,12308,2535,-903,2016,6728 } },
{ "Canon EOS M", 0, 0,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS-1Ds Mark III", 0, 0x3bb0,
{ 5859,-211,-930,-8255,16017,2353,-1732,1887,7448 } },
{ "Canon EOS-1Ds Mark II", 0, 0xe80,
{ 6517,-602,-867,-8180,15926,2378,-1618,1771,7633 } },
{ "Canon EOS-1D Mark IV", 0, 0x3bb0,
{ 6014,-220,-795,-4109,12014,2361,-561,1824,5787 } },
{ "Canon EOS-1D Mark III", 0, 0x3bb0,
{ 6291,-540,-976,-8350,16145,2311,-1714,1858,7326 } },
{ "Canon EOS-1D Mark II N", 0, 0xe80,
{ 6240,-466,-822,-8180,15825,2500,-1801,1938,8042 } },
{ "Canon EOS-1D Mark II", 0, 0xe80,
{ 6264,-582,-724,-8312,15948,2504,-1744,1919,8664 } },
{ "Canon EOS-1DS", 0, 0xe20,
{ 4374,3631,-1743,-7520,15212,2472,-2892,3632,8161 } },
{ "Canon EOS-1D C", 0, 0x3c4e,
{ 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } },
{ "Canon EOS-1D X", 0, 0x3c4e,
{ 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } },
{ "Canon EOS-1D", 0, 0xe20,
{ 6806,-179,-1020,-8097,16415,1687,-3267,4236,7690 } },
{ "Canon PowerShot A530", 0, 0,
{ 0 } }, /* don't want the A5 matrix */
{ "Canon PowerShot A50", 0, 0,
{ -5300,9846,1776,3436,684,3939,-5540,9879,6200,-1404,11175,217 } },
{ "Canon PowerShot A5", 0, 0,
{ -4801,9475,1952,2926,1611,4094,-5259,10164,5947,-1554,10883,547 } },
{ "Canon PowerShot G10", 0, 0,
{ 11093,-3906,-1028,-5047,12492,2879,-1003,1750,5561 } },
{ "Canon PowerShot G11", 0, 0,
{ 12177,-4817,-1069,-1612,9864,2049,-98,850,4471 } },
{ "Canon PowerShot G12", 0, 0,
{ 13244,-5501,-1248,-1508,9858,1935,-270,1083,4366 } },
{ "Canon PowerShot G15", 0, 0,
{ 7474,-2301,-567,-4056,11456,2975,-222,716,4181 } },
{ "Canon PowerShot G16", 0, 0,
{ 14130,-8071,127,2199,6528,1551,3402,-1721,4960 } },
{ "Canon PowerShot G1 X", 0, 0,
{ 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } },
{ "Canon PowerShot G1", 0, 0,
{ -4778,9467,2172,4743,-1141,4344,-5146,9908,6077,-1566,11051,557 } },
{ "Canon PowerShot G2", 0, 0,
{ 9087,-2693,-1049,-6715,14382,2537,-2291,2819,7790 } },
{ "Canon PowerShot G3", 0, 0,
{ 9212,-2781,-1073,-6573,14189,2605,-2300,2844,7664 } },
{ "Canon PowerShot G5", 0, 0,
{ 9757,-2872,-933,-5972,13861,2301,-1622,2328,7212 } },
{ "Canon PowerShot G6", 0, 0,
{ 9877,-3775,-871,-7613,14807,3072,-1448,1305,7485 } },
{ "Canon PowerShot G9", 0, 0,
{ 7368,-2141,-598,-5621,13254,2625,-1418,1696,5743 } },
{ "Canon PowerShot Pro1", 0, 0,
{ 10062,-3522,-999,-7643,15117,2730,-765,817,7323 } },
{ "Canon PowerShot Pro70", 34, 0,
{ -4155,9818,1529,3939,-25,4522,-5521,9870,6610,-2238,10873,1342 } },
{ "Canon PowerShot Pro90", 0, 0,
{ -4963,9896,2235,4642,-987,4294,-5162,10011,5859,-1770,11230,577 } },
{ "Canon PowerShot S30", 0, 0,
{ 10566,-3652,-1129,-6552,14662,2006,-2197,2581,7670 } },
{ "Canon PowerShot S40", 0, 0,
{ 8510,-2487,-940,-6869,14231,2900,-2318,2829,9013 } },
{ "Canon PowerShot S45", 0, 0,
{ 8163,-2333,-955,-6682,14174,2751,-2077,2597,8041 } },
{ "Canon PowerShot S50", 0, 0,
{ 8882,-2571,-863,-6348,14234,2288,-1516,2172,6569 } },
{ "Canon PowerShot S60", 0, 0,
{ 8795,-2482,-797,-7804,15403,2573,-1422,1996,7082 } },
{ "Canon PowerShot S70", 0, 0,
{ 9976,-3810,-832,-7115,14463,2906,-901,989,7889 } },
{ "Canon PowerShot S90", 0, 0,
{ 12374,-5016,-1049,-1677,9902,2078,-83,852,4683 } },
{ "Canon PowerShot S95", 0, 0,
{ 13440,-5896,-1279,-1236,9598,1931,-180,1001,4651 } },
{ "Canon PowerShot S120", 0, 0, /* LibRaw */
{ 10800,-4782,-628,-2057,10783,1176,-802,2091,4739 } },
{ "Canon PowerShot S110", 0, 0,
{ 8039,-2643,-654,-3783,11230,2930,-206,690,4194 } },
{ "Canon PowerShot S100", 0, 0,
{ 7968,-2565,-636,-2873,10697,2513,180,667,4211 } },
{ "Canon PowerShot SX1 IS", 0, 0,
{ 6578,-259,-502,-5974,13030,3309,-308,1058,4970 } },
{ "Canon PowerShot SX50 HS", 0, 0,
{ 12432,-4753,-1247,-2110,10691,1629,-412,1623,4926 } },
{ "Canon PowerShot A3300", 0, 0, /* DJC */
{ 10826,-3654,-1023,-3215,11310,1906,0,999,4960 } },
{ "Canon PowerShot A470", 0, 0, /* DJC */
{ 12513,-4407,-1242,-2680,10276,2405,-878,2215,4734 } },
{ "Canon PowerShot A610", 0, 0, /* DJC */
{ 15591,-6402,-1592,-5365,13198,2168,-1300,1824,5075 } },
{ "Canon PowerShot A620", 0, 0, /* DJC */
{ 15265,-6193,-1558,-4125,12116,2010,-888,1639,5220 } },
{ "Canon PowerShot A630", 0, 0, /* DJC */
{ 14201,-5308,-1757,-6087,14472,1617,-2191,3105,5348 } },
{ "Canon PowerShot A640", 0, 0, /* DJC */
{ 13124,-5329,-1390,-3602,11658,1944,-1612,2863,4885 } },
{ "Canon PowerShot A650", 0, 0, /* DJC */
{ 9427,-3036,-959,-2581,10671,1911,-1039,1982,4430 } },
{ "Canon PowerShot A720", 0, 0, /* DJC */
{ 14573,-5482,-1546,-1266,9799,1468,-1040,1912,3810 } },
{ "Canon PowerShot S3 IS", 0, 0, /* DJC */
{ 14062,-5199,-1446,-4712,12470,2243,-1286,2028,4836 } },
{ "Canon PowerShot SX110 IS", 0, 0, /* DJC */
{ 14134,-5576,-1527,-1991,10719,1273,-1158,1929,3581 } },
{ "Canon PowerShot SX220", 0, 0, /* DJC */
{ 13898,-5076,-1447,-1405,10109,1297,-244,1860,3687 } },
{ "Casio EX-S20", 0, 0, /* DJC */
{ 11634,-3924,-1128,-4968,12954,2015,-1588,2648,7206 } },
{ "Casio EX-Z750", 0, 0, /* DJC */
{ 10819,-3873,-1099,-4903,13730,1175,-1755,3751,4632 } },
{ "Casio EX-Z10", 128, 0xfff, /* DJC */
{ 9790,-3338,-603,-2321,10222,2099,-344,1273,4799 } },
{ "CINE 650", 0, 0,
{ 3390,480,-500,-800,3610,340,-550,2336,1192 } },
{ "CINE 660", 0, 0,
{ 3390,480,-500,-800,3610,340,-550,2336,1192 } },
{ "CINE", 0, 0,
{ 20183,-4295,-423,-3940,15330,3985,-280,4870,9800 } },
{ "Contax N Digital", 0, 0xf1e,
{ 7777,1285,-1053,-9280,16543,2916,-3677,5679,7060 } },
{ "Epson R-D1", 0, 0,
{ 6827,-1878,-732,-8429,16012,2564,-704,592,7145 } },
{ "Fujifilm E550", 0, 0,
{ 11044,-3888,-1120,-7248,15168,2208,-1531,2277,8069 } },
{ "Fujifilm E900", 0, 0,
{ 9183,-2526,-1078,-7461,15071,2574,-2022,2440,8639 } },
{ "Fujifilm F5", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F6", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F77", 0, 0xfe9,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F7", 0, 0,
{ 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } },
{ "Fujifilm F8", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm S100FS", 514, 0,
{ 11521,-4355,-1065,-6524,13767,3058,-1466,1984,6045 } },
{ "Fujifilm S200EXR", 512, 0x3fff,
{ 11401,-4498,-1312,-5088,12751,2613,-838,1568,5941 } },
{ "Fujifilm S20Pro", 0, 0,
{ 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } },
{ "Fujifilm S2Pro", 128, 0,
{ 12492,-4690,-1402,-7033,15423,1647,-1507,2111,7697 } },
{ "Fujifilm S3Pro", 0, 0,
{ 11807,-4612,-1294,-8927,16968,1988,-2120,2741,8006 } },
{ "Fujifilm S5Pro", 0, 0,
{ 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } },
{ "Fujifilm S5000", 0, 0,
{ 8754,-2732,-1019,-7204,15069,2276,-1702,2334,6982 } },
{ "Fujifilm S5100", 0, 0,
{ 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } },
{ "Fujifilm S5500", 0, 0,
{ 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } },
{ "Fujifilm S5200", 0, 0,
{ 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } },
{ "Fujifilm S5600", 0, 0,
{ 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } },
{ "Fujifilm S6", 0, 0,
{ 12628,-4887,-1401,-6861,14996,1962,-2198,2782,7091 } },
{ "Fujifilm S7000", 0, 0,
{ 10190,-3506,-1312,-7153,15051,2238,-2003,2399,7505 } },
{ "Fujifilm S9000", 0, 0,
{ 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } },
{ "Fujifilm S9500", 0, 0,
{ 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } },
{ "Fujifilm S9100", 0, 0,
{ 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } },
{ "Fujifilm S9600", 0, 0,
{ 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } },
{ "Fujifilm SL1000", 0, 0,
{ 11705,-4262,-1107,-2282,10791,1709,-555,1713,4945 } },
{ "Fujifilm IS-1", 0, 0,
{ 21461,-10807,-1441,-2332,10599,1999,289,875,7703 } },
{ "Fujifilm IS Pro", 0, 0,
{ 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } },
{ "Fujifilm HS10 HS11", 0, 0xf68,
{ 12440,-3954,-1183,-1123,9674,1708,-83,1614,4086 } },
{ "Fujifilm HS20EXR", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm HS3", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm HS50EXR", 0, 0,
{ 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } },
{ "Fujifilm X100S", 0, 0,
{ 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } },
{ "Fujifilm X100", 0, 0,
{ 12161,-4457,-1069,-5034,12874,2400,-795,1724,6904 } },
{ "Fujifilm X10", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X20", 0, 0,
{ 11768,-4971,-1133,-4904,12927,2183,-480,1723,4605 } },
{ "Fujifilm X-Pro1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-A1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-E1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-E2", 0, 0,
{ 12066,-5927,-367,-1969,9878,1503,-721,2034,5453 } },
{ "Fujifilm XF1", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X-M1", 0, 0,
{ 13193,-6685,-425,-2229,10458,1534,-878,1763,5217 } },
{ "Fujifilm X-S1", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm XQ1", 0, 0,
{ 14305,-7365,-687,-3117,12383,432,-287,1660,4361 } },
{ "Hasselblad Lunar", 128, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Hasselblad Stellar", 200, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{ "Imacon Ixpress", 0, 0, /* DJC */
{ 7025,-1415,-704,-5188,13765,1424,-1248,2742,6038 } },
{ "Kodak NC2000", 0, 0,
{ 13891,-6055,-803,-465,9919,642,2121,82,1291 } },
{ "Kodak DCS315C", 8, 0,
{ 17523,-4827,-2510,756,8546,-137,6113,1649,2250 } },
{ "Kodak DCS330C", 8, 0,
{ 20620,-7572,-2801,-103,10073,-396,3551,-233,2220 } },
{ "Kodak DCS420", 0, 0,
{ 10868,-1852,-644,-1537,11083,484,2343,628,2216 } },
{ "Kodak DCS460", 0, 0,
{ 10592,-2206,-967,-1944,11685,230,2206,670,1273 } },
{ "Kodak EOSDCS1", 0, 0,
{ 10592,-2206,-967,-1944,11685,230,2206,670,1273 } },
{ "Kodak EOSDCS3B", 0, 0,
{ 9898,-2700,-940,-2478,12219,206,1985,634,1031 } },
{ "Kodak DCS520C", 178, 0,
{ 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } },
{ "Kodak DCS560C", 177, 0,
{ 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } },
{ "Kodak DCS620C", 177, 0,
{ 23617,-10175,-3149,-2054,11749,-272,2586,-489,3453 } },
{ "Kodak DCS620X", 176, 0,
{ 13095,-6231,154,12221,-21,-2137,895,4602,2258 } },
{ "Kodak DCS660C", 173, 0,
{ 18244,-6351,-2739,-791,11193,-521,3711,-129,2802 } },
{ "Kodak DCS720X", 0, 0,
{ 11775,-5884,950,9556,1846,-1286,-1019,6221,2728 } },
{ "Kodak DCS760C", 0, 0,
{ 16623,-6309,-1411,-4344,13923,323,2285,274,2926 } },
{ "Kodak DCS Pro SLR", 0, 0,
{ 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } },
{ "Kodak DCS Pro 14nx", 0, 0,
{ 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } },
{ "Kodak DCS Pro 14", 0, 0,
{ 7791,3128,-776,-8588,16458,2039,-2455,4006,6198 } },
{ "Kodak ProBack645", 0, 0,
{ 16414,-6060,-1470,-3555,13037,473,2545,122,4948 } },
{ "Kodak ProBack", 0, 0,
{ 21179,-8316,-2918,-915,11019,-165,3477,-180,4210 } },
{ "Kodak P712", 0, 0,
{ 9658,-3314,-823,-5163,12695,2768,-1342,1843,6044 } },
{ "Kodak P850", 0, 0xf7c,
{ 10511,-3836,-1102,-6946,14587,2558,-1481,1792,6246 } },
{ "Kodak P880", 0, 0xfff,
{ 12805,-4662,-1376,-7480,15267,2360,-1626,2194,7904 } },
{ "Kodak EasyShare Z980", 0, 0,
{ 11313,-3559,-1101,-3893,11891,2257,-1214,2398,4908 } },
{ "Kodak EasyShare Z981", 0, 0,
{ 12729,-4717,-1188,-1367,9187,2582,274,860,4411 } },
{ "Kodak EasyShare Z990", 0, 0xfed,
{ 11749,-4048,-1309,-1867,10572,1489,-138,1449,4522 } },
{ "Kodak EASYSHARE Z1015", 0, 0xef1,
{ 11265,-4286,-992,-4694,12343,2647,-1090,1523,5447 } },
{ "Leaf CMost", 0, 0,
{ 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } },
{ "Leaf Valeo 6", 0, 0,
{ 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } },
{ "Leaf Aptus 54S", 0, 0,
{ 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } },
{ "Leaf Aptus 65", 0, 0,
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf Aptus 75", 0, 0,
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf", 0, 0,
{ 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } },
{ "Mamiya ZD", 0, 0,
{ 7645,2579,-1363,-8689,16717,2015,-3712,5941,5961 } },
{ "Micron 2010", 110, 0, /* DJC */
{ 16695,-3761,-2151,155,9682,163,3433,951,4904 } },
{ "Minolta DiMAGE 5", 0, 0xf7d,
{ 8983,-2942,-963,-6556,14476,2237,-2426,2887,8014 } },
{ "Minolta DiMAGE 7Hi", 0, 0xf7d,
{ 11368,-3894,-1242,-6521,14358,2339,-2475,3056,7285 } },
{ "Minolta DiMAGE 7", 0, 0xf7d,
{ 9144,-2777,-998,-6676,14556,2281,-2470,3019,7744 } },
{ "Minolta DiMAGE A1", 0, 0xf8b,
{ 9274,-2547,-1167,-8220,16323,1943,-2273,2720,8340 } },
{ "Minolta DiMAGE A200", 0, 0,
{ 8560,-2487,-986,-8112,15535,2771,-1209,1324,7743 } },
{ "Minolta DiMAGE A2", 0, 0xf8f,
{ 9097,-2726,-1053,-8073,15506,2762,-966,981,7763 } },
{ "Minolta DiMAGE Z2", 0, 0, /* DJC */
{ 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } },
{ "Minolta DYNAX 5", 0, 0xffb,
{ 10284,-3283,-1086,-7957,15762,2316,-829,882,6644 } },
{ "Minolta DYNAX 7", 0, 0xffb,
{ 10239,-3104,-1099,-8037,15727,2451,-927,925,6871 } },
{ "Motorola PIXL", 0, 0, /* DJC */
{ 8898,-989,-1033,-3292,11619,1674,-661,3178,5216 } },
{ "Nikon D100", 0, 0,
{ 5902,-933,-782,-8983,16719,2354,-1402,1455,6464 } },
{ "Nikon D1H", 0, 0,
{ 7577,-2166,-926,-7454,15592,1934,-2377,2808,8606 } },
{ "Nikon D1X", 0, 0,
{ 7702,-2245,-975,-9114,17242,1875,-2679,3055,8521 } },
{ "Nikon D1", 0, 0, /* multiplied by 2.218750, 1.0, 1.148438 */
{ 16772,-4726,-2141,-7611,15713,1972,-2846,3494,9521 } },
{ "Nikon D200", 0, 0xfbc,
{ 8367,-2248,-763,-8758,16447,2422,-1527,1550,8053 } },
{ "Nikon D2H", 0, 0,
{ 5710,-901,-615,-8594,16617,2024,-2975,4120,6830 } },
{ "Nikon D2X", 0, 0,
{ 10231,-2769,-1255,-8301,15900,2552,-797,680,7148 } },
{ "Nikon D3000", 0, 0,
{ 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } },
{ "Nikon D3100", 0, 0,
{ 7911,-2167,-813,-5327,13150,2408,-1288,2483,7968 } },
{ "Nikon D3200", 0, 0xfb9,
{ 7013,-1408,-635,-5268,12902,2640,-1470,2801,7379 } },
{ "Nikon D300", 0, 0,
{ 9030,-1992,-715,-8465,16302,2255,-2689,3217,8069 } },
{ "Nikon D3X", 0, 0,
{ 7171,-1986,-648,-8085,15555,2718,-2170,2512,7457 } },
{ "Nikon D3S", 0, 0,
{ 8828,-2406,-694,-4874,12603,2541,-660,1509,7587 } },
{ "Nikon D3", 0, 0,
{ 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } },
{ "Nikon D40X", 0, 0,
{ 8819,-2543,-911,-9025,16928,2151,-1329,1213,8449 } },
{ "Nikon D40", 0, 0,
{ 6992,-1668,-806,-8138,15748,2543,-874,850,7897 } },
{ "Nikon D4", 0, 0,
{ 10076,-4135,-659,-4586,13006,746,-1189,2107,6185 } },
{ "Nikon D5000", 0, 0xf00,
{ 7309,-1403,-519,-8474,16008,2622,-2433,2826,8064 } },
{ "Nikon D5100", 0, 0x3de6,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon D5200", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{"Nikon D5300",0, 0,
{ 10645,-5086,-698,-4938,13608,761,-1107,1874,5312 } },
{ "Nikon D50", 0, 0,
{ 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } },
{ "Nikon D600", 0, 0x3e07,
{ 8178,-2245,-609,-4857,12394,2776,-1207,2086,7298 } },
{"Nikon D610",0, 0,
{ 10426,-4005,-444,-3565,11764,1403,-1206,2266,6549 } },
{ "Nikon D60", 0, 0,
{ 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } },
{ "Nikon D7000", 0, 0,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon D7100", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D700", 0, 0,
{ 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } },
{ "Nikon D70", 0, 0,
{ 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } },
{ "Nikon D800", 0, 0,
{ 7866,-2108,-555,-4869,12483,2681,-1176,2069,7501 } },
{ "Nikon D80", 0, 0,
{ 8629,-2410,-883,-9055,16940,2171,-1490,1363,8520 } },
{ "Nikon D90", 0, 0xf00,
{ 7309,-1403,-519,-8474,16008,2622,-2434,2826,8064 } },
{"Nikon Df",0, 0,
{ 10076,-4135,-659,-4586,13006,746,-1189,2107,6185 } },
{ "Nikon E700", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E800", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E950", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E995", 0, 0, /* copied from E5000 */
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E2100", 0, 0, /* copied from Z2, new white balance */
{ 13142,-4152,-1596,-4655,12374,2282,-1769,2696,6711} },
{ "Nikon E2500", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E3200", 0, 0, /* DJC */
{ 9846,-2085,-1019,-3278,11109,2170,-774,2134,5745 } },
{ "Nikon E4300", 0, 0, /* copied from Minolta DiMAGE Z2 */
{ 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } },
{ "Nikon E4500", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E5000", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E5400", 0, 0,
{ 9349,-2987,-1001,-7919,15766,2266,-2098,2680,6839 } },
{ "Nikon E5700", 0, 0,
{ -5368,11478,2368,5537,-113,3148,-4969,10021,5782,778,9028,211 } },
{ "Nikon E8400", 0, 0,
{ 7842,-2320,-992,-8154,15718,2599,-1098,1342,7560 } },
{ "Nikon E8700", 0, 0,
{ 8489,-2583,-1036,-8051,15583,2643,-1307,1407,7354 } },
{ "Nikon E8800", 0, 0,
{ 7971,-2314,-913,-8451,15762,2894,-1442,1520,7610 } },
{ "Nikon COOLPIX A", 0, 0,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon COOLPIX P330", 0, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P6000", 0, 0,
{ 9698,-3367,-914,-4706,12584,2368,-837,968,5801 } },
{ "Nikon COOLPIX P7000", 0, 0,
{ 11432,-3679,-1111,-3169,11239,2202,-791,1380,4455 } },
{ "Nikon COOLPIX P7100", 0, 0,
{ 11053,-4269,-1024,-1976,10182,2088,-526,1263,4469 } },
{ "Nikon COOLPIX P7700", 200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P7800", 200, 0,
{ 13443,-6418,-673,-1309,10025,1131,-462,1827,4782 } },
{ "Nikon 1 V2", 0, 0,
{ 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } },
{ "Nikon 1 J3", 0, 0,
{ 8144,-2671,-473,-1740,9834,1601,-58,1971,4296 } },
{ "Nikon 1 AW1", 0, 0,
{ 8144,-2671,-473,-1740,9834,1601,-58,1971,4296 } },
{ "Nikon 1 ", 0, 0,
{ 8994,-2667,-865,-4594,12324,2552,-699,1786,6260 } },
{ "Olympus C5050", 0, 0,
{ 10508,-3124,-1273,-6079,14294,1901,-1653,2306,6237 } },
{ "Olympus C5060", 0, 0,
{ 10445,-3362,-1307,-7662,15690,2058,-1135,1176,7602 } },
{ "Olympus C7070", 0, 0,
{ 10252,-3531,-1095,-7114,14850,2436,-1451,1723,6365 } },
{ "Olympus C70", 0, 0,
{ 10793,-3791,-1146,-7498,15177,2488,-1390,1577,7321 } },
{ "Olympus C80", 0, 0,
{ 8606,-2509,-1014,-8238,15714,2703,-942,979,7760 } },
{ "Olympus E-10", 0, 0xffc,
{ 12745,-4500,-1416,-6062,14542,1580,-1934,2256,6603 } },
{ "Olympus E-1", 0, 0,
{ 11846,-4767,-945,-7027,15878,1089,-2699,4122,8311 } },
{ "Olympus E-20", 0, 0xffc,
{ 13173,-4732,-1499,-5807,14036,1895,-2045,2452,7142 } },
{ "Olympus E-300", 0, 0,
{ 7828,-1761,-348,-5788,14071,1830,-2853,4518,6557 } },
{ "Olympus E-330", 0, 0,
{ 8961,-2473,-1084,-7979,15990,2067,-2319,3035,8249 } },
{ "Olympus E-30", 0, 0xfbc,
{ 8144,-1861,-1111,-7763,15894,1929,-1865,2542,7607 } },
{ "Olympus E-3", 0, 0xf99,
{ 9487,-2875,-1115,-7533,15606,2010,-1618,2100,7389 } },
{ "Olympus E-400", 0, 0,
{ 6169,-1483,-21,-7107,14761,2536,-2904,3580,8568 } },
{ "Olympus E-410", 0, 0xf6a,
{ 8856,-2582,-1026,-7761,15766,2082,-2009,2575,7469 } },
{ "Olympus E-420", 0, 0xfd7,
{ 8746,-2425,-1095,-7594,15612,2073,-1780,2309,7416 } },
{ "Olympus E-450", 0, 0xfd2,
{ 8745,-2425,-1095,-7594,15613,2073,-1780,2309,7416 } },
{ "Olympus E-500", 0, 0,
{ 8136,-1968,-299,-5481,13742,1871,-2556,4205,6630 } },
{ "Olympus E-510", 0, 0xf6a,
{ 8785,-2529,-1033,-7639,15624,2112,-1783,2300,7817 } },
{ "Olympus E-520", 0, 0xfd2,
{ 8344,-2322,-1020,-7596,15635,2048,-1748,2269,7287 } },
{ "Olympus E-5", 0, 0xeec,
{ 11200,-3783,-1325,-4576,12593,2206,-695,1742,7504 } },
{ "Olympus E-600", 0, 0xfaf,
{ 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } },
{ "Olympus E-620", 0, 0xfaf,
{ 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } },
{ "Olympus E-P1", 0, 0xffd,
{ 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } },
{ "Olympus E-P2", 0, 0xffd,
{ 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } },
{ "Olympus E-P3", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "OLYMPUS E-P5", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL1s", 0, 0,
{ 11409,-3872,-1393,-4572,12757,2003,-709,1810,7415 } },
{ "Olympus E-PL1", 0, 0,
{ 11408,-4289,-1215,-4286,12385,2118,-387,1467,7787 } },
{ "Olympus E-PL2", 0, 0xcf3,
{ 15030,-5552,-1806,-3987,12387,1767,-592,1670,7023 } },
{ "Olympus E-PL3", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-PL5", 0, 0xfcb,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PM1", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-PM2", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{"Olympus E-M1", 0, 0,
{ 11663,-5527,-419,-1683,9915,1389,-582,1933,5016 } },
{ "Olympus E-M5", 0, 0xfe1,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus SP350", 0, 0,
{ 12078,-4836,-1069,-6671,14306,2578,-786,939,7418 } },
{ "Olympus SP3", 0, 0,
{ 11766,-4445,-1067,-6901,14421,2707,-1029,1217,7572 } },
{ "Olympus SP500UZ", 0, 0xfff,
{ 9493,-3415,-666,-5211,12334,3260,-1548,2262,6482 } },
{ "Olympus SP510UZ", 0, 0xffe,
{ 10593,-3607,-1010,-5881,13127,3084,-1200,1805,6721 } },
{ "Olympus SP550UZ", 0, 0xffe,
{ 11597,-4006,-1049,-5432,12799,2957,-1029,1750,6516 } },
{ "Olympus SP560UZ", 0, 0xff9,
{ 10915,-3677,-982,-5587,12986,2911,-1168,1968,6223 } },
{ "Olympus SP570UZ", 0, 0,
{ 11522,-4044,-1146,-4736,12172,2904,-988,1829,6039 } },
{"Olympus STYLUS1",0, 0,
{ 11976,-5518,-545,-1419,10472,846,-475,1766,4524 } },
{ "Olympus XZ-10", 0, 0,
{ 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } },
{ "Olympus XZ-1", 0, 0,
{ 10901,-4095,-1074,-1141,9208,2293,-62,1417,5158 } },
{ "Olympus XZ-2", 0, 0,
{ 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } },
{ "OmniVision ov5647", 0, 0, /* DJC */
{ 12782,-4059,-379,-478,9066,1413,1340,1513,5176 } },
{ "Pentax *ist DL2", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Pentax *ist DL", 0, 0,
{ 10829,-2838,-1115,-8339,15817,2696,-837,680,11939 } },
{ "Pentax *ist DS2", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Pentax *ist DS", 0, 0,
{ 10371,-2333,-1206,-8688,16231,2602,-1230,1116,11282 } },
{ "Pentax *ist D", 0, 0,
{ 9651,-2059,-1189,-8881,16512,2487,-1460,1345,10687 } },
{ "Pentax K10D", 0, 0,
{ 9566,-2863,-803,-7170,15172,2112,-818,803,9705 } },
{ "Pentax K1", 0, 0,
{ 11095,-3157,-1324,-8377,15834,2720,-1108,947,11688 } },
{ "Pentax K20D", 0, 0,
{ 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } },
{ "Pentax K200D", 0, 0,
{ 9186,-2678,-907,-8693,16517,2260,-1129,1094,8524 } },
{ "Pentax K2000", 0, 0,
{ 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } },
{ "Pentax K-m", 0, 0,
{ 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } },
{ "Pentax K-x", 0, 0,
{ 8843,-2837,-625,-5025,12644,2668,-411,1234,7410 } },
{ "Pentax K-r", 0, 0,
{ 9895,-3077,-850,-5304,13035,2521,-883,1768,6936 } },
{ "Pentax K-5 II", 0, 0,
{ 8170,-2725,-639,-4440,12017,2744,-771,1465,6599 } },
{ "Pentax K-5", 0, 0,
{ 8713,-2833,-743,-4342,11900,2772,-722,1543,6247 } },
{ "Pentax K-7", 0, 0,
{ 9142,-2947,-678,-8648,16967,1663,-2224,2898,8615 } },
{ "Pentax MX-1", 0, 0,
{ 8804,-2523,-1238,-2423,11627,860,-682,1774,4753 } },
{ "Pentax Q10", 0, 0,
{ 12995,-5593,-1107,-1879,10139,2027,-64,1233,4919 } },
{ "Pentax 645D", 0, 0x3e00,
{ 10646,-3593,-1158,-3329,11699,1831,-667,2874,6287 } },
{ "Panasonic DMC-FZ8", 0, 0xf7f,
{ 8986,-2755,-802,-6341,13575,3077,-1476,2144,6379 } },
{ "Panasonic DMC-FZ18", 0, 0,
{ 9932,-3060,-935,-5809,13331,2753,-1267,2155,5575 } },
{ "Panasonic DMC-FZ28", 15, 0xf96,
{ 10109,-3488,-993,-5412,12812,2916,-1305,2140,5543 } },
{ "Panasonic DMC-FZ30", 0, 0xf94,
{ 10976,-4029,-1141,-7918,15491,2600,-1670,2071,8246 } },
{ "Panasonic DMC-FZ3", 143, 0,
{ 9938,-2780,-890,-4604,12393,2480,-1117,2304,4620 } },
{ "Panasonic DMC-FZ4", 143, 0,
{ 13639,-5535,-1371,-1698,9633,2430,316,1152,4108 } },
{ "Panasonic DMC-FZ50", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Leica V-LUX1", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Panasonic DMC-L10", 15, 0xf96,
{ 8025,-1942,-1050,-7920,15904,2100,-2456,3005,7039 } },
{ "Panasonic DMC-L1", 0, 0xf7f,
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Leica DIGILUX 3", 0, 0xf7f,
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Panasonic DMC-LC1", 0, 0,
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Leica DIGILUX 2", 0, 0,
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Panasonic DMC-LF1", 143, 0,
{ 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } },
{ "Leica C", 143, 0,
{ 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } },
{ "Panasonic DMC-LX1", 0, 0xf7f,
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Leica D-LUX2", 0, 0xf7f,
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Panasonic DMC-LX2", 0, 0,
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Leica D-LUX3", 0, 0,
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Panasonic DMC-LX3", 15, 0,
{ 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } },
{ "Leica D-LUX 4", 15, 0,
{ 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } },
{ "Panasonic DMC-LX5", 143, 0,
{ 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } },
{ "Leica D-LUX 5", 143, 0,
{ 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } },
{ "Panasonic DMC-LX7", 143, 0,
{ 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } },
{ "Leica D-LUX 6", 143, 0,
{ 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } },
{ "Panasonic DMC-FZ100", 143, 0xfff,
{ 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } },
{ "Leica V-LUX 2", 143, 0xfff,
{ 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } },
{ "Panasonic DMC-FZ150", 143, 0xfff,
{ 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } },
{ "Leica V-LUX 3", 143, 0xfff,
{ 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } },
{ "Panasonic DMC-FZ200", 143, 0xfff,
{ 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } },
{ "Leica V-LUX 4", 143, 0xfff,
{ 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } },
{ "Panasonic DMC-FX150", 15, 0xfff,
{ 9082,-2907,-925,-6119,13377,3058,-1797,2641,5609 } },
{ "Panasonic DMC-G10", 0, 0,
{ 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } },
{ "Panasonic DMC-G1", 15, 0xf94,
{ 8199,-2065,-1056,-8124,16156,2033,-2458,3022,7220 } },
{ "Panasonic DMC-G2", 15, 0xf3c,
{ 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } },
{ "Panasonic DMC-G3", 143, 0xfff,
{ 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } },
{ "Panasonic DMC-G5", 143, 0xfff,
{ 7798,-2562,-740,-3879,11584,2613,-1055,2248,5434 } },
{ "Panasonic DMC-G6", 143, 0xfff, /* DJC */
{ 6395,-2583,-40,-3677,9109,4569,-1502,2806,6431 } },
{ "Panasonic DMC-GF1", 15, 0xf92,
{ 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } },
{ "Panasonic DMC-GF2", 143, 0xfff,
{ 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } },
{ "Panasonic DMC-GF3", 143, 0xfff,
{ 9051,-2468,-1204,-5212,13276,2121,-1197,2510,6890 } },
{ "Panasonic DMC-GF5", 143, 0xfff,
{ 8228,-2945,-660,-3938,11792,2430,-1094,2278,5793 } },
{ "Panasonic DMC-GF6", 143, 0,
{ 8130,-2801,-946,-3520,11289,2552,-1314,2511,5791 } },
{ "Panasonic DMC-GH1", 15, 0xf92,
{ 6299,-1466,-532,-6535,13852,2969,-2331,3112,5984 } },
{ "Panasonic DMC-GH2", 15, 0xf95,
{ 7780,-2410,-806,-3913,11724,2484,-1018,2390,5298 } },
{ "Panasonic DMC-GH3", 144, 0,
{ 6559,-1752,-491,-3672,11407,2586,-962,1875,5130 } },
{ "Panasonic DMC-GM1", 143, 0,
{ 8977,-3976,-425,-3050,11095,1117,-1217,2563,4750 } },
{ "Panasonic DMC-GX1", 143, 0,
{ 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } },
{"Panasonic DMC-GX7",143,0,
{7541,-2355,-591,-3163,10598,1894,-933,2109,5006}},
{ "Phase One H 20", 0, 0, /* DJC */
{ 1313,1855,-109,-6715,15908,808,-327,1840,6020 } },
{ "Phase One H 25", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One P 2", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One P 30", 0, 0,
{ 4516,-245,-37,-7020,14976,2173,-3206,4671,7087 } },
{ "Phase One P 45", 0, 0,
{ 5053,-24,-117,-5684,14076,1702,-2619,4492,5849 } },
{ "Phase One P40", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Phase One P65", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Red One", 704, 0xffff, /* DJC */
{ 21014,-7891,-2613,-3056,12201,856,-2203,5125,8042 } },
{ "Samsung EK-GN120", 0, 0, /* Adobe; Galaxy NX */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung EX1", 0, 0x3e00,
{ 8898,-2498,-994,-3144,11328,2066,-760,1381,4576 } },
{ "Samsung EX2F", 0, 0x7ff,
{ 10648,-3897,-1055,-2022,10573,1668,-492,1611,4742 } },
{ "Samsung NX300", 0, 0,
{ 8873,-3984,-372,-3759,12305,1013,-994,1981,4788 } },
{ "Samsung NX2000", 0, 0,
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX2", 0, 0xfff, /* NX20, NX200, NX210 */
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX1000", 0, 0,
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX1100", 0, 0,
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX", 0, 0, /* NX5, NX10, NX11, NX100 */
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung WB2000", 0, 0xfff,
{ 12093,-3557,-1155,-1000,9534,1733,-22,1787,4576 } },
{ "Samsung GX-1", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Samsung S85", 0, 0, /* DJC */
{ 11885,-3968,-1473,-4214,12299,1916,-835,1655,5549 } },
// Foveon: LibRaw color data
{ "Sigma SD9", 15, 4095, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
//{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } },
{ "Sigma SD10", 15, 16383, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
//{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } },
{ "Sigma SD14", 15, 16383, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
//{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } },
{ "Sigma SD15", 15, 4095, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
//{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } },
// Merills + SD1
{ "Sigma SD1", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP1 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP2 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP3 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
// Sigma DP (non-Merill Versions)
{ "Sigma DP", 0, 4095, /* LibRaw */
// { 7401,-1169,-567,2059,3769,1510,664,3367,5328 } },
{ 13100,-3638,-847,6855,2369,580,2723,3218,3251 } },
{ "Sinar", 0, 0, /* DJC */
{ 16442,-2956,-2422,-2877,12128,750,-1136,6066,4559 } },
{ "Sony DSC-F828", 0, 0,
{ 7924,-1910,-777,-8226,15459,2998,-1517,2199,6818,-7242,11401,3481 } },
{ "Sony DSC-R1", -512, 0,
{ 8512,-2641,-694,-8042,15670,2526,-1821,2117,7414 } },
{ "Sony DSC-V3", 0, 0,
{ 7511,-2571,-692,-7894,15088,3060,-948,1111,8128 } },
{ "Sony DSC-RX100M2", -200, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{ "Sony DSC-RX100", -200, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{"Sony DSC-RX10",0, 0,
{ 8562,-3595,-385,-2715,11089,1128,-1023,2081,4400 } },
{ "Sony DSC-RX1R", -128, 0,
{ 8195,-2800,-422,-4261,12273,1709,-1505,2400,5624 } },
{ "Sony DSC-RX1", -128, 0,
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
{ "Sony DSLR-A100", 0, 0xfeb,
{ 9437,-2811,-774,-8405,16215,2290,-710,596,7181 } },
{ "Sony DSLR-A290", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A2", 0, 0,
{ 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } },
{ "Sony DSLR-A300", 0, 0,
{ 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } },
{ "Sony DSLR-A330", 0, 0,
{ 9847,-3091,-929,-8485,16346,2225,-714,595,7103 } },
{ "Sony DSLR-A350", 0, 0xffc,
{ 6038,-1484,-578,-9146,16746,2513,-875,746,7217 } },
{ "Sony DSLR-A380", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A390", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A450", -128, 0xfeb,
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A580", -128, 0xfeb,
{ 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } },
{ "Sony DSLR-A5", -128, 0xfeb,
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A700", -128, 0,
{ 5775,-805,-359,-8574,16295,2391,-1943,2341,7249 } },
{ "Sony DSLR-A850", -128, 0,
{ 5413,-1162,-365,-5665,13098,2866,-608,1179,8440 } },
{ "Sony DSLR-A900", -128, 0,
{ 5209,-1072,-397,-8845,16120,2919,-1618,1803,8654 } },
{"Sony ILCE-3000",-128, 0,
{ 14009,-8208,729,3738,4752,2932,5743,-3800,6494 } },
{"Sony ILCE-A7R",-128, 0,
{ 8592,-3219,-348,-3846,12042,1475,-1079,2166,5893 } },
{"Sony ILCE-A7",-128, 0,
{ 8592,-3219,-348,-3846,12042,1475,-1079,2166,5893 } },
{ "Sony NEX-5T", -128, 0,
{ 7623,-2693,-347,-4060,11875,1928,-1363,2329,5752 } },
{ "Sony NEX-5N", -128, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony NEX-5R", -128, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-3N", -128, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-3", -128, 0, /* Adobe */
{ 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } },
{ "Sony NEX-5", -128, 0, /* Adobe */
{ 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } },
{ "Sony NEX-6", -128, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-7", -128, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony NEX", -128, 0, /* NEX-C3, NEX-F3 */
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A33", -128, 0,
{ 6069,-1221,-366,-5221,12779,2734,-1024,2066,6834 } },
{ "Sony SLT-A35", -128, 0,
{ 5986,-1618,-415,-4557,11820,3120,-681,1404,6971 } },
{ "Sony SLT-A37", -128, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A55", -128, 0,
{ 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } },
{ "Sony SLT-A57", -128, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A58", -128, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A65", -128, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony SLT-A77", -128, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony SLT-A99", -128, 0,
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
};
double cam_xyz[4][3];
char name[130];
int i, j;
sprintf (name, "%s %s", t_make, t_model);
for (i=0; i < sizeof table / sizeof *table; i++)
if (!strncasecmp(name, table[i].prefix, strlen(table[i].prefix))) {
if (table[i].t_black>0) black = (ushort) table[i].t_black;
else if(table[i].t_black <0 && black == 0 ) black = (ushort) (-table[i].t_black);
if (table[i].t_maximum) maximum = (ushort) table[i].t_maximum;
if (table[i].trans[0]) {
for (j=0; j < 12; j++)
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.cam_xyz[0][j] =
#endif
cam_xyz[0][j] = table[i].trans[j] / 10000.0;
cam_xyz_coeff (cam_xyz);
}
break;
}
}
void CLASS simple_coeff (int index)
{
static const float table[][12] = {
/* index 0 -- all Foveon cameras */
{ 1.4032,-0.2231,-0.1016,-0.5263,1.4816,0.017,-0.0112,0.0183,0.9113 },
/* index 1 -- Kodak DC20 and DC25 */
{ 2.25,0.75,-1.75,-0.25,-0.25,0.75,0.75,-0.25,-0.25,-1.75,0.75,2.25 },
/* index 2 -- Logitech Fotoman Pixtura */
{ 1.893,-0.418,-0.476,-0.495,1.773,-0.278,-1.017,-0.655,2.672 },
/* index 3 -- Nikon E880, E900, and E990 */
{ -1.936280, 1.800443, -1.448486, 2.584324,
1.405365, -0.524955, -0.289090, 0.408680,
-1.204965, 1.082304, 2.941367, -1.818705 }
};
int i, c;
for (raw_color = i=0; i < 3; i++)
FORCC rgb_cam[i][c] = table[index][i*colors+c];
}
short CLASS guess_byte_order (int words)
{
uchar test[4][2];
int t=2, msb;
double diff, sum[2] = {0,0};
fread (test[0], 2, 2, ifp);
for (words-=2; words--; ) {
fread (test[t], 2, 1, ifp);
for (msb=0; msb < 2; msb++) {
diff = (test[t^2][msb] << 8 | test[t^2][!msb])
- (test[t ][msb] << 8 | test[t ][!msb]);
sum[msb] += diff*diff;
}
t = (t+1) & 3;
}
return sum[0] < sum[1] ? 0x4d4d : 0x4949;
}
float CLASS find_green (int bps, int bite, int off0, int off1)
{
UINT64 bitbuf=0;
int vbits, col, i, c;
ushort img[2][2064];
double sum[]={0,0};
FORC(2) {
fseek (ifp, c ? off1:off0, SEEK_SET);
for (vbits=col=0; col < width; col++) {
for (vbits -= bps; vbits < 0; vbits += bite) {
bitbuf <<= bite;
for (i=0; i < bite; i+=8)
bitbuf |= (unsigned) (fgetc(ifp) << i);
}
img[c][col] = bitbuf << (64-bps-vbits) >> (64-bps);
}
}
FORC(width-1) {
sum[ c & 1] += ABS(img[0][c]-img[1][c+1]);
sum[~c & 1] += ABS(img[1][c]-img[0][c+1]);
}
return 100 * log(sum[0]/sum[1]);
}
/*
Identify which camera created this file, and set global variables
accordingly.
*/
void CLASS identify()
{
static const short pana[][6] = {
{ 3130, 1743, 4, 0, -6, 0 },
{ 3130, 2055, 4, 0, -6, 0 },
{ 3130, 2319, 4, 0, -6, 0 },
{ 3170, 2103, 18, 0,-42, 20 },
{ 3170, 2367, 18, 13,-42,-21 },
{ 3177, 2367, 0, 0, -1, 0 },
{ 3304, 2458, 0, 0, -1, 0 },
{ 3330, 2463, 9, 0, -5, 0 },
{ 3330, 2479, 9, 0,-17, 4 },
{ 3370, 1899, 15, 0,-44, 20 },
{ 3370, 2235, 15, 0,-44, 20 },
{ 3370, 2511, 15, 10,-44,-21 },
{ 3690, 2751, 3, 0, -8, -3 },
{ 3710, 2751, 0, 0, -3, 0 },
{ 3724, 2450, 0, 0, 0, -2 },
{ 3770, 2487, 17, 0,-44, 19 },
{ 3770, 2799, 17, 15,-44,-19 },
{ 3880, 2170, 6, 0, -6, 0 },
{ 4060, 3018, 0, 0, 0, -2 },
{ 4290, 2391, 3, 0, -8, -1 },
{ 4330, 2439, 17, 15,-44,-19 },
{ 4508, 2962, 0, 0, -3, -4 },
{ 4508, 3330, 0, 0, -3, -6 },
};
static const ushort canon[][6] = {
{ 1944, 1416, 0, 0, 48, 0 },
{ 2144, 1560, 4, 8, 52, 2 },
{ 2224, 1456, 48, 6, 0, 2 },
{ 2376, 1728, 12, 6, 52, 2 },
{ 2672, 1968, 12, 6, 44, 2 },
{ 3152, 2068, 64, 12, 0, 0 },
{ 3160, 2344, 44, 12, 4, 4 },
{ 3344, 2484, 4, 6, 52, 6 },
{ 3516, 2328, 42, 14, 0, 0 },
{ 3596, 2360, 74, 12, 0, 0 },
{ 3744, 2784, 52, 12, 8, 12 },
{ 3944, 2622, 30, 18, 6, 2 },
{ 3948, 2622, 42, 18, 0, 2 },
{ 3984, 2622, 76, 20, 0, 2 },
{ 4104, 3048, 48, 12, 24, 12 },
{ 4116, 2178, 4, 2, 0, 0 },
{ 4152, 2772, 192, 12, 0, 0 },
{ 4160, 3124, 104, 11, 8, 65 },
{ 4176, 3062, 96, 17, 8, 0 },
{ 4312, 2876, 22, 18, 0, 2 },
{ 4352, 2874, 62, 18, 0, 0 },
{ 4476, 2954, 90, 34, 0, 0 },
{ 4480, 3348, 12, 10, 36, 12 },
{ 4496, 3366, 80, 50, 12, 0 },
{ 4832, 3204, 62, 26, 0, 0 },
{ 4832, 3228, 62, 51, 0, 0 },
{ 5108, 3349, 98, 13, 0, 0 },
{ 5120, 3318, 142, 45, 62, 0 },
{ 5280, 3528, 72, 52, 0, 0 },
{ 5344, 3516, 142, 51, 0, 0 },
{ 5344, 3584, 126,100, 0, 2 },
{ 5360, 3516, 158, 51, 0, 0 },
{ 5568, 3708, 72, 38, 0, 0 },
{ 5712, 3774, 62, 20, 10, 2 },
{ 5792, 3804, 158, 51, 0, 0 },
{ 5920, 3950, 122, 80, 2, 0 },
};
static const struct {
ushort id;
char t_model[20];
} unique[] = {
{ 0x168, "EOS 10D" }, { 0x001, "EOS-1D" },
{ 0x175, "EOS 20D" }, { 0x174, "EOS-1D Mark II" },
{ 0x234, "EOS 30D" }, { 0x232, "EOS-1D Mark II N" },
{ 0x190, "EOS 40D" }, { 0x169, "EOS-1D Mark III" },
{ 0x261, "EOS 50D" }, { 0x281, "EOS-1D Mark IV" },
{ 0x287, "EOS 60D" }, { 0x167, "EOS-1DS" },
{ 0x170, "EOS 300D" }, { 0x188, "EOS-1Ds Mark II" },
{ 0x176, "EOS 450D" }, { 0x215, "EOS-1Ds Mark III" },
{ 0x189, "EOS 350D" }, { 0x324, "EOS-1D C" },
{ 0x236, "EOS 400D" }, { 0x269, "EOS-1D X" },
{ 0x252, "EOS 500D" }, { 0x213, "EOS 5D" },
{ 0x270, "EOS 550D" }, { 0x218, "EOS 5D Mark II" },
{ 0x286, "EOS 600D" }, { 0x285, "EOS 5D Mark III" },
{ 0x301, "EOS 650D" }, { 0x302, "EOS 6D" },
{ 0x325, "EOS 70D" }, { 0x326, "EOS 700D" }, { 0x250, "EOS 7D" },
{ 0x254, "EOS 1000D" },
{ 0x288, "EOS 1100D" },
{ 0x346, "EOS 100D" },
{ 0x331, "EOS M" },
};
static const struct {
ushort id;
char t_model[20];
} sony_unique[] = {
{2,"DSC-R1"},
{256,"DSLR-A100"},
{257,"DSLR-A900"},
{258,"DSLR-A700"},
{259,"DSLR-A200"},
{260,"DSLR-A350"},
{261,"DSLR-A300"},
{262,"DSLR-A900"},
{263,"DSLR-A380"},
{264,"DSLR-A330"},
{265,"DSLR-A230"},
{266,"DSLR-A290"},
{269,"DSLR-A850"},
{270,"DSLR-A850"},
{273,"DSLR-A550"},
{274,"DSLR-A500"},
{275,"DSLR-A450"},
{278,"NEX-5"},
{279,"NEX-3"},
{280,"SLT-A33"},
{281,"SLT-A55"},
{282,"DSLR-A560"},
{283,"DSLR-A580"},
{284,"NEX-C3"},
{285,"SLT-A35"},
{286,"SLT-A65"},
{287,"SLT-A77"},
{288,"NEX-5N"},
{289,"NEX-7"},
{290,"NEX-VG20E"},
{291,"SLT-A37"},
{292,"SLT-A57"},
{293,"NEX-F3"},
{294,"SLT-A99"},
{295,"NEX-6"},
{296,"NEX-5R"},
{297,"DSC-RX100"},
{298,"DSC-RX1"},
{299,"NEX-VG900"},
{300,"NEX-VG30E"},
{302,"ILCE-3000"},
{303,"SLT-A58"},
{305,"NEX-3N"},
{306,"ILCE-A7"},
{307,"NEX-5T"},
{308,"DSC-RX100M2"},
{310,"DSC-RX1R"},
{311,"ILCE-A7R"},
};
static const struct {
unsigned fsize;
ushort rw, rh;
uchar lm, tm, rm, bm, lf, cf, max, flags;
char t_make[10], t_model[20];
ushort offset;
} table[] = {
{ 786432,1024, 768, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-080C" },
{ 1447680,1392,1040, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-145C" },
{ 1920000,1600,1200, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-201C" },
{ 5067304,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C" },
{ 5067316,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C",12 },
{ 10134608,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C" },
{ 10134620,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C",12 },
{ 16157136,3272,2469, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-810C" },
{ 15980544,3264,2448, 0, 0, 0, 0, 8,0x61,0,1,"AgfaPhoto","DC-833m" },
{ 2868726,1384,1036, 0, 0, 0, 0,64,0x49,0,8,"Baumer","TXG14",1078 },
{ 5298000,2400,1766,12,12,44, 2,40,0x94,0,2,"Canon","PowerShot SD300" },
{ 6553440,2664,1968, 4, 4,44, 4,40,0x94,0,2,"Canon","PowerShot A460" },
{ 6573120,2672,1968,12, 8,44, 0,40,0x94,0,2,"Canon","PowerShot A610" },
{ 6653280,2672,1992,10, 6,42, 2,40,0x94,0,2,"Canon","PowerShot A530" },
{ 7710960,2888,2136,44, 8, 4, 0,40,0x94,0,2,"Canon","PowerShot S3 IS" },
{ 9219600,3152,2340,36,12, 4, 0,40,0x94,0,2,"Canon","PowerShot A620" },
{ 9243240,3152,2346,12, 7,44,13,40,0x49,0,2,"Canon","PowerShot A470" },
{ 10341600,3336,2480, 6, 5,32, 3,40,0x94,0,2,"Canon","PowerShot A720 IS" },
{ 10383120,3344,2484,12, 6,44, 6,40,0x94,0,2,"Canon","PowerShot A630" },
{ 12945240,3736,2772,12, 6,52, 6,40,0x94,0,2,"Canon","PowerShot A640" },
{ 15636240,4104,3048,48,12,24,12,40,0x94,0,2,"Canon","PowerShot A650" },
{ 15467760,3720,2772, 6,12,30, 0,40,0x94,0,2,"Canon","PowerShot SX110 IS" },
{ 15534576,3728,2778,12, 9,44, 9,40,0x94,0,2,"Canon","PowerShot SX120 IS" },
{ 18653760,4080,3048,24,12,24,12,40,0x94,0,2,"Canon","PowerShot SX20 IS" },
{ 19131120,4168,3060,92,16, 4, 1, 8,0x94,0,2,"Canon","PowerShot SX220 HS" },
{ 21936096,4464,3276,25,10,73,12,40,0x16,0,2,"Canon","PowerShot SX30 IS" },
{ 24724224,4704,3504, 8,16,56, 8,40,0x49,0,2,"Canon","PowerShot A3300 IS" },
{ 1976352,1632,1211, 0, 2, 0, 1, 0,0x94,0,1,"Casio","QV-2000UX" },
{ 3217760,2080,1547, 0, 0,10, 1, 0,0x94,0,1,"Casio","QV-3*00EX" },
{ 6218368,2585,1924, 0, 0, 9, 0, 0,0x94,0,1,"Casio","QV-5700" },
{ 7816704,2867,2181, 0, 0,34,36, 0,0x16,0,1,"Casio","EX-Z60" },
{ 2937856,1621,1208, 0, 0, 1, 0, 0,0x94,7,13,"Casio","EX-S20" },
{ 4948608,2090,1578, 0, 0,32,34, 0,0x94,7,1,"Casio","EX-S100" },
{ 6054400,2346,1720, 2, 0,32, 0, 0,0x94,7,1,"Casio","QV-R41" },
{ 7426656,2568,1928, 0, 0, 0, 0, 0,0x94,0,1,"Casio","EX-P505" },
{ 7530816,2602,1929, 0, 0,22, 0, 0,0x94,7,1,"Casio","QV-R51" },
{ 7542528,2602,1932, 0, 0,32, 0, 0,0x94,7,1,"Casio","EX-Z50" },
{ 7562048,2602,1937, 0, 0,25, 0, 0,0x16,7,1,"Casio","EX-Z500" },
{ 7753344,2602,1986, 0, 0,32,26, 0,0x94,7,1,"Casio","EX-Z55" },
{ 9313536,2858,2172, 0, 0,14,30, 0,0x94,7,1,"Casio","EX-P600" },
{ 10834368,3114,2319, 0, 0,27, 0, 0,0x94,0,1,"Casio","EX-Z750" },
{ 10843712,3114,2321, 0, 0,25, 0, 0,0x94,0,1,"Casio","EX-Z75" },
{ 10979200,3114,2350, 0, 0,32,32, 0,0x94,7,1,"Casio","EX-P700" },
{ 12310144,3285,2498, 0, 0, 6,30, 0,0x94,0,1,"Casio","EX-Z850" },
{ 12489984,3328,2502, 0, 0,47,35, 0,0x94,0,1,"Casio","EX-Z8" },
{ 15499264,3754,2752, 0, 0,82, 0, 0,0x94,0,1,"Casio","EX-Z1050" },
{ 18702336,4096,3044, 0, 0,24, 0,80,0x94,7,1,"Casio","EX-ZR100" },
{ 7684000,2260,1700, 0, 0, 0, 0,13,0x94,0,1,"Casio","QV-4000" },
{ 787456,1024, 769, 0, 1, 0, 0, 0,0x49,0,0,"Creative","PC-CAM 600" },
{ 3840000,1600,1200, 0, 0, 0, 0,65,0x49,0,0,"Foculus","531C" },
{ 307200, 640, 480, 0, 0, 0, 0, 0,0x94,0,0,"Generic","640x480" },
{ 62464, 256, 244, 1, 1, 6, 1, 0,0x8d,0,0,"Kodak","DC20" },
{ 124928, 512, 244, 1, 1,10, 1, 0,0x8d,0,0,"Kodak","DC20" },
{ 1652736,1536,1076, 0,52, 0, 0, 0,0x61,0,0,"Kodak","DCS200" },
{ 4159302,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330" },
{ 4162462,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330",3160 },
{ 6163328,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603" },
{ 6166488,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603",3160 },
{ 460800, 640, 480, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" },
{ 9116448,2848,2134, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" },
{ 614400, 640, 480, 0, 3, 0, 0,64,0x94,0,0,"Kodak","KAI-0340" },
{ 3884928,1608,1207, 0, 0, 0, 0,96,0x16,0,0,"Micron","2010",3212 },
{ 1138688,1534, 986, 0, 0, 0, 0, 0,0x61,0,0,"Minolta","RD175",513 },
{ 1581060,1305, 969, 0, 0,18, 6, 6,0x1e,4,1,"Nikon","E900" },
{ 2465792,1638,1204, 0, 0,22, 1, 6,0x4b,5,1,"Nikon","E950" },
{ 2940928,1616,1213, 0, 0, 0, 7,30,0x94,0,1,"Nikon","E2100" },
{ 4771840,2064,1541, 0, 0, 0, 1, 6,0xe1,0,1,"Nikon","E990" },
{ 4775936,2064,1542, 0, 0, 0, 0,30,0x94,0,1,"Nikon","E3700" },
{ 5865472,2288,1709, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E4500" },
{ 5869568,2288,1710, 0, 0, 0, 0, 6,0x16,0,1,"Nikon","E4300" },
{ 7438336,2576,1925, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E5000" },
{ 8998912,2832,2118, 0, 0, 0, 0,30,0x94,7,1,"Nikon","COOLPIX S6" },
{ 5939200,2304,1718, 0, 0, 0, 0,30,0x16,0,0,"Olympus","C770UZ" },
{ 3178560,2064,1540, 0, 0, 0, 0, 0,0x94,0,1,"Pentax","Optio S" },
{ 4841984,2090,1544, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S" },
{ 6114240,2346,1737, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S4" },
{ 10702848,3072,2322, 0, 0, 0,21,30,0x94,0,1,"Pentax","Optio 750Z" },
{ 13248000,2208,3000, 0, 0, 0, 0,13,0x61,0,0,"Pixelink","A782" },
{ 6291456,2048,1536, 0, 0, 0, 0,96,0x61,0,0,"RoverShot","3320AF" },
{ 311696, 644, 484, 0, 0, 0, 0, 0,0x16,0,8,"ST Micro","STV680 VGA" },
{ 16098048,3288,2448, 0, 0,24, 0, 9,0x94,0,1,"Samsung","S85" },
{ 16215552,3312,2448, 0, 0,48, 0, 9,0x94,0,1,"Samsung","S85" },
{ 20487168,3648,2808, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" },
{ 24000000,4000,3000, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" },
{ 12582980,3072,2048, 0, 0, 0, 0,33,0x61,0,0,"Sinar","3072x2048",68 },
{ 33292868,4080,4080, 0, 0, 0, 0,33,0x61,0,0,"Sinar","4080x4080",68 },
{ 44390468,4080,5440, 0, 0, 0, 0,33,0x61,0,0,"Sinar","4080x5440",68 },
{ 1409024,1376,1024, 0, 0, 1, 0, 0,0x49,0,0,"Sony","XCD-SX910CR" },
{ 2818048,1376,1024, 0, 0, 1, 0,97,0x49,0,0,"Sony","XCD-SX910CR" },
};
static const char *corp[] =
{ "AgfaPhoto", "Canon", "Casio", "Epson", "Fujifilm",
"Mamiya", "Minolta", "Motorola", "Kodak", "Konica", "Leica",
"Nikon", "Nokia", "Olympus", "Pentax", "Phase One", "Ricoh",
"Samsung", "Sigma", "Sinar", "Sony" };
char head[32], *cp;
int hlen, flen, fsize, zero_fsize=1, i, c;
struct jhead jh;
tiff_flip = flip = filters = UINT_MAX; /* unknown */
raw_height = raw_width = fuji_width = fuji_layout = cr2_slice[0] = 0;
maximum = height = width = top_margin = left_margin = 0;
cdesc[0] = desc[0] = artist[0] = make[0] = model[0] = model2[0] = 0;
iso_speed = shutter = aperture = focal_len = unique_id = 0;
tiff_nifds = 0;
memset (tiff_ifd, 0, sizeof tiff_ifd);
memset (gpsdata, 0, sizeof gpsdata);
memset (cblack, 0, sizeof cblack);
memset (white, 0, sizeof white);
memset (mask, 0, sizeof mask);
thumb_offset = thumb_length = thumb_width = thumb_height = 0;
load_raw = thumb_load_raw = 0;
write_thumb = &CLASS jpeg_thumb;
data_offset = meta_length = tiff_bps = tiff_compress = 0;
kodak_cbpp = zero_after_ff = dng_version = load_flags = 0;
timestamp = shot_order = tiff_samples = black = is_foveon = 0;
mix_green = profile_length = data_error = zero_is_bad = 0;
pixel_aspect = is_raw = raw_color = 1;
tile_width = tile_length = 0;
for (i=0; i < 4; i++) {
cam_mul[i] = i == 1;
pre_mul[i] = i < 3;
FORC3 cmatrix[c][i] = 0;
FORC3 rgb_cam[c][i] = c == i;
}
colors = 3;
for (i=0; i < 0x10000; i++) curve[i] = i;
order = get2();
hlen = get4();
fseek (ifp, 0, SEEK_SET);
fread (head, 1, 32, ifp);
fseek (ifp, 0, SEEK_END);
flen = fsize = ftell(ifp);
if ((cp = (char *) memmem (head, 32, (char*)"MMMM", 4)) ||
(cp = (char *) memmem (head, 32, (char*)"IIII", 4))) {
parse_phase_one (cp-head);
if (cp-head && parse_tiff(0)) apply_tiff();
} else if (order == 0x4949 || order == 0x4d4d) {
if (!memcmp (head+6,"HEAPCCDR",8)) {
data_offset = hlen;
parse_ciff (hlen, flen-hlen, 0);
load_raw = &CLASS canon_load_raw;
} else if (parse_tiff(0)) apply_tiff();
} else if (!memcmp (head,"\xff\xd8\xff\xe1",4) &&
!memcmp (head+6,"Exif",4)) {
fseek (ifp, 4, SEEK_SET);
data_offset = 4 + get2();
fseek (ifp, data_offset, SEEK_SET);
if (fgetc(ifp) != 0xff)
parse_tiff(12);
thumb_offset = 0;
} else if (!memcmp (head+25,"ARECOYK",7)) {
strcpy (make, "Contax");
strcpy (model,"N Digital");
fseek (ifp, 33, SEEK_SET);
get_timestamp(1);
fseek (ifp, 60, SEEK_SET);
FORC4 cam_mul[c ^ (c >> 1)] = get4();
} else if (!strcmp (head, "PXN")) {
strcpy (make, "Logitech");
strcpy (model,"Fotoman Pixtura");
} else if (!strcmp (head, "qktk")) {
strcpy (make, "Apple");
strcpy (model,"QuickTake 100");
load_raw = &CLASS quicktake_100_load_raw;
} else if (!strcmp (head, "qktn")) {
strcpy (make, "Apple");
strcpy (model,"QuickTake 150");
load_raw = &CLASS kodak_radc_load_raw;
} else if (!memcmp (head,"FUJIFILM",8)) {
fseek (ifp, 84, SEEK_SET);
thumb_offset = get4();
thumb_length = get4();
fseek (ifp, 92, SEEK_SET);
parse_fuji (get4());
if (thumb_offset > 120) {
fseek (ifp, 120, SEEK_SET);
is_raw += (i = get4()) && 1;
if (is_raw == 2 && shot_select)
parse_fuji (i);
}
load_raw = &CLASS unpacked_load_raw;
fseek (ifp, 100+28*(shot_select > 0), SEEK_SET);
parse_tiff (data_offset = get4());
parse_tiff (thumb_offset+12);
apply_tiff();
} else if (!memcmp (head,"RIFF",4)) {
fseek (ifp, 0, SEEK_SET);
parse_riff();
} else if (!memcmp (head,"\0\001\0\001\0@",6)) {
fseek (ifp, 6, SEEK_SET);
fread (make, 1, 8, ifp);
fread (model, 1, 8, ifp);
fread (model2, 1, 16, ifp);
data_offset = get2();
get2();
raw_width = get2();
raw_height = get2();
load_raw = &CLASS nokia_load_raw;
filters = 0x61616161;
} else if (!memcmp (head,"NOKIARAW",8)) {
strcpy (make, "NOKIA");
strcpy (model, "X2");
order = 0x4949;
fseek (ifp, 300, SEEK_SET);
data_offset = get4();
i = get4();
width = get2();
height = get2();
data_offset += i - width * 5 / 4 * height;
load_raw = &CLASS nokia_load_raw;
filters = 0x61616161;
} else if (!memcmp (head,"ARRI",4)) {
order = 0x4949;
fseek (ifp, 20, SEEK_SET);
width = get4();
height = get4();
strcpy (make, "ARRI");
fseek (ifp, 668, SEEK_SET);
fread (model, 1, 64, ifp);
data_offset = 4096;
load_raw = &CLASS packed_load_raw;
load_flags = 88;
filters = 0x61616161;
} else if (!memcmp (head,"XPDS",4)) {
order = 0x4949;
fseek (ifp, 0x800, SEEK_SET);
fread (make, 1, 41, ifp);
raw_height = get2();
raw_width = get2();
fseek (ifp, 56, SEEK_CUR);
fread (model, 1, 30, ifp);
data_offset = 0x10000;
load_raw = &CLASS canon_rmf_load_raw;
} else if (!memcmp (head+4,"RED1",4)) {
strcpy (make, "Red");
strcpy (model,"One");
parse_redcine();
load_raw = &CLASS redcine_load_raw;
gamma_curve (1/2.4, 12.92, 1, 4095);
filters = 0x49494949;
} else if (!memcmp (head,"DSC-Image",9))
parse_rollei();
else if (!memcmp (head,"PWAD",4))
parse_sinar_ia();
else if (!memcmp (head,"\0MRM",4))
parse_minolta(0);
else if (!memcmp (head,"FOVb",4))
{
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
if(!imgdata.params.force_foveon_x3f)
parse_foveon();
else
#endif
parse_x3f();
#else
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
parse_foveon();
#endif
#endif
}
else if (!memcmp (head,"CI",2))
parse_cine();
else
for (zero_fsize=i=0; i < sizeof table / sizeof *table; i++)
if (fsize == table[i].fsize) {
strcpy (make, table[i].t_make );
strcpy (model, table[i].t_model);
flip = table[i].flags >> 2;
zero_is_bad = table[i].flags & 2;
if (table[i].flags & 1)
parse_external_jpeg();
data_offset = table[i].offset;
raw_width = table[i].rw;
raw_height = table[i].rh;
left_margin = table[i].lm;
top_margin = table[i].tm;
width = raw_width - left_margin - table[i].rm;
height = raw_height - top_margin - table[i].bm;
filters = 0x1010101 * table[i].cf;
colors = 4 - !((filters & filters >> 1) & 0x5555);
load_flags = table[i].lf;
switch (tiff_bps = (fsize-data_offset)*8 / (raw_width*raw_height)) {
case 6:
load_raw = &CLASS minolta_rd175_load_raw; break;
case 8:
load_raw = &CLASS eight_bit_load_raw; break;
case 10: case 12:
load_flags |= 128;
load_raw = &CLASS packed_load_raw; break;
case 16:
order = 0x4949 | 0x404 * (load_flags & 1);
tiff_bps -= load_flags >> 4;
tiff_bps -= load_flags = load_flags >> 1 & 7;
load_raw = &CLASS unpacked_load_raw;
}
maximum = (1 << tiff_bps) - (1 << table[i].max);
}
if (zero_fsize) fsize = 0;
if (make[0] == 0) parse_smal (0, flen);
if (make[0] == 0) {
parse_jpeg(0);
fseek(ifp,0,SEEK_END);
int sz = ftell(ifp);
if (!strncmp(model,"ov",2) && sz>=6404096 && !fseek (ifp, -6404096, SEEK_END) &&
fread (head, 1, 32, ifp) && !strcmp(head,"BRCMn")) {
strcpy (make, "OmniVision");
data_offset = ftell(ifp) + 0x8000-32;
width = raw_width;
raw_width = 2611;
load_raw = &CLASS nokia_load_raw;
filters = 0x16161616;
} else is_raw = 0;
}
for (i=0; i < sizeof corp / sizeof *corp; i++)
if (strcasestr (make, corp[i])) /* Simplify company names */
strcpy (make, corp[i]);
if ((!strcmp(make,"Kodak") || !strcmp(make,"Leica")) &&
((cp = strcasestr(model," DIGITAL CAMERA")) ||
(cp = strstr(model,"FILE VERSION"))))
*cp = 0;
cp = make + strlen(make); /* Remove trailing spaces */
while (*--cp == ' ') *cp = 0;
cp = model + strlen(model);
while (*--cp == ' ') *cp = 0;
i = strlen(make); /* Remove make from model */
if (!strncasecmp (model, make, i) && model[i++] == ' ')
memmove (model, model+i, 64-i);
if (!strncmp (model,"FinePix ",8))
strcpy (model, model+8);
if (!strncmp (model,"Digital Camera ",15))
strcpy (model, model+15);
desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0;
if (!is_raw) goto notraw;
if (!height) height = raw_height;
if (!width) width = raw_width;
if (height == 2624 && width == 3936) /* Pentax K10D and Samsung GX10 */
{ height = 2616; width = 3896; }
if (height == 3136 && width == 4864) /* Pentax K20D and Samsung GX20 */
{ height = 3124; width = 4688; filters = 0x16161616; }
if (width == 4352 && (!strcmp(model,"K-r") || !strcmp(model,"K-x")))
{ width = 4309; filters = 0x16161616; }
if (width >= 4960 && !strncmp(model,"K-5",3))
{ left_margin = 10; width = 4950; filters = 0x16161616; }
if (width == 4736 && !strcmp(model,"K-7"))
{ height = 3122; width = 4684; filters = 0x16161616; top_margin = 2; }
if (width == 7424 && !strcmp(model,"645D"))
{ height = 5502; width = 7328; filters = 0x61616161; top_margin = 29;
left_margin = 48; }
if (height == 3014 && width == 4096) /* Ricoh GX200 */
width = 4014;
if (dng_version) {
if (filters == UINT_MAX) filters = 0;
if (filters) is_raw = tiff_samples;
else colors = tiff_samples;
switch (tiff_compress) {
case 0: /* Compression not set, assuming uncompressed */
case 1: load_raw = &CLASS packed_dng_load_raw; break;
case 7: load_raw = &CLASS lossless_dng_load_raw; break;
case 34892: load_raw = &CLASS lossy_dng_load_raw; break;
default: load_raw = 0;
}
goto dng_skip;
}
if (!strcmp(make,"Canon") && !fsize && tiff_bps != 15) {
if (!load_raw)
load_raw = &CLASS lossless_jpeg_load_raw;
for (i=0; i < sizeof canon / sizeof *canon; i++)
if (raw_width == canon[i][0] && raw_height == canon[i][1]) {
width = raw_width - (left_margin = canon[i][2]);
height = raw_height - (top_margin = canon[i][3]);
width -= canon[i][4];
height -= canon[i][5];
}
if ((unique_id | 0x20000) == 0x2720000) {
left_margin = 8;
top_margin = 16;
}
}
if (!strcmp(make,"Canon") && unique_id)
{
for (i=0; i < sizeof unique / sizeof *unique; i++)
if (unique_id == 0x80000000 + unique[i].id)
{
adobe_coeff ("Canon", unique[i].t_model);
strcpy(model,unique[i].t_model);
}
}
if (!strcasecmp(make,"Sony") && unique_id)
{
for (i=0; i < sizeof sony_unique / sizeof *sony_unique; i++)
if (unique_id == sony_unique[i].id)
{
adobe_coeff ("Sony", sony_unique[i].t_model);
strcpy(model,sony_unique[i].t_model);
}
}
if (!strcmp(make,"Nikon")) {
if (!load_raw)
load_raw = &CLASS packed_load_raw;
if (model[0] == 'E')
load_flags |= !data_offset << 2 | 2;
}
/* Set parameters based on camera name (for non-DNG files). */
if (!strcmp(model,"KAI-0340")
&& find_green (16, 16, 3840, 5120) < 25) {
height = 480;
top_margin = filters = 0;
strcpy (model,"C603");
}
if (is_foveon) {
if (height*2 < width) pixel_aspect = 0.5;
if (height > width) pixel_aspect = 2;
filters = 0;
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
if(!imgdata.params.force_foveon_x3f)
simple_coeff(0);
#endif
} else if (!strcmp(make,"Canon") && tiff_bps == 15) {
switch (width) {
case 3344: width -= 66;
case 3872: width -= 6;
}
if (height > width) SWAP(height,width);
filters = 0;
tiff_samples = colors = 3;
load_raw = &CLASS canon_sraw_load_raw;
} else if (!strcmp(model,"PowerShot 600")) {
height = 613;
width = 854;
raw_width = 896;
colors = 4;
filters = 0xe1e4e1e4;
load_raw = &CLASS canon_600_load_raw;
} else if (!strcmp(model,"PowerShot A5") ||
!strcmp(model,"PowerShot A5 Zoom")) {
height = 773;
width = 960;
raw_width = 992;
pixel_aspect = 256/235.0;
filters = 0x1e4e1e4e;
goto canon_a5;
} else if (!strcmp(model,"PowerShot A50")) {
height = 968;
width = 1290;
raw_width = 1320;
filters = 0x1b4e4b1e;
goto canon_a5;
} else if (!strcmp(model,"PowerShot Pro70")) {
height = 1024;
width = 1552;
filters = 0x1e4b4e1b;
canon_a5:
colors = 4;
tiff_bps = 10;
load_raw = &CLASS packed_load_raw;
load_flags = 40;
} else if (!strcmp(model,"PowerShot Pro90 IS") ||
!strcmp(model,"PowerShot G1")) {
colors = 4;
filters = 0xb4b4b4b4;
} else if (!strcmp(model,"PowerShot A610")) {
if (canon_s2is()) strcpy (model+10, "S2 IS");
} else if (!strcmp(model,"PowerShot SX220 HS")) {
mask[0][0] = top_margin = 16;
mask[0][2] = top_margin + height;
mask[0][3] = left_margin = 92;
} else if (!strcmp(model,"PowerShot S120")) {
raw_width = 4192;
raw_height = 3062;
width = 4022;
height = 3017;
mask[0][0] = top_margin = 30;
mask[0][2] = top_margin + height;
left_margin = 120;
mask[0][1] = 23;
mask[0][3] = 72;
} else if (!strcmp(model,"PowerShot G16")) {
mask[0][0] = 0;
mask[0][2] = 80;
mask[0][1] = 0;
mask[0][3] = 16;
top_margin = 28;
left_margin = 120;
width = raw_width-left_margin-48;
height = raw_height-top_margin-14;
} else if (!strcmp(model,"PowerShot SX50 HS")) {
mask[0][0] = top_margin = 17;
mask[0][2] = raw_height;
mask[0][3] = 80;
filters = 0x49494949;
} else if (!strcmp(model,"PowerShot G10")) {
filters = 0x49494949;
} else if (!strcmp(model,"EOS D2000C")) {
filters = 0x61616161;
black = curve[200];
} else if (!strcmp(model,"D1")) {
cam_mul[0] *= 256/527.0;
cam_mul[2] *= 256/317.0;
} else if (!strcmp(model,"D1X")) {
width -= 4;
pixel_aspect = 0.5;
} else if (!strcmp(model,"D40X") ||
!strcmp(model,"D60") ||
!strcmp(model,"D80") ||
!strcmp(model,"D3000")) {
height -= 3;
width -= 4;
} else if (!strcmp(model,"D3") ||
!strcmp(model,"D3S") ||
!strcmp(model,"D700")) {
width -= 4;
left_margin = 2;
} else if (!strcmp(model,"D3100")) {
width -= 28;
left_margin = 6;
} else if (!strcmp(model,"D5000") ||
!strcmp(model,"D90")) {
width -= 42;
} else if (!strcmp(model,"D5100") ||
!strcmp(model,"D7000") ||
!strcmp(model,"COOLPIX A")) {
width -= 44;
} else if (!strcmp(model,"D3200") ||
!strcmp(model,"D600") ||
!strcmp(model,"D610") ||
!strncmp(model,"D800",4)) {
width -= 46;
} else if (!strcmp(model,"D4")) {
width -= 52;
left_margin = 2;
} else if (!strncmp(model,"D40",3) ||
!strncmp(model,"D50",3) ||
!strncmp(model,"D70",3)) {
width--;
} else if (!strcmp(model,"D100")) {
if (load_flags)
raw_width = (width += 3) + 3;
} else if (!strcmp(model,"D200")) {
left_margin = 1;
width -= 4;
filters = 0x94949494;
} else if (!strncmp(model,"D2H",3)) {
left_margin = 6;
width -= 14;
} else if (!strncmp(model,"D2X",3)) {
if (width == 3264) width -= 32;
else width -= 8;
} else if (!strncmp(model,"D300",4)) {
width -= 32;
} else if (!strcmp(make,"Nikon") && !strcmp(model,"Df")) {
left_margin=4;
width-=64;
} else if (!strcmp(make,"Nikon") && raw_width == 4032) {
adobe_coeff ("Nikon","COOLPIX P7700");
} else if (!strncmp(model,"COOLPIX P",9)) {
load_flags = 24;
filters = 0x94949494;
if (model[9] == '7' && iso_speed >= 400)
black = 255;
} else if (!strncmp(model,"1 ",2)) {
height -= 2;
} else if (fsize == 1581060) {
simple_coeff(3);
pre_mul[0] = 1.2085;
pre_mul[1] = 1.0943;
pre_mul[3] = 1.1103;
} else if (fsize == 3178560) {
cam_mul[0] *= 4;
cam_mul[2] *= 4;
} else if (fsize == 4771840) {
if (!timestamp && nikon_e995())
strcpy (model, "E995");
if (strcmp(model,"E995")) {
filters = 0xb4b4b4b4;
simple_coeff(3);
pre_mul[0] = 1.196;
pre_mul[1] = 1.246;
pre_mul[2] = 1.018;
}
} else if (fsize == 2940928) {
if (!timestamp && !nikon_e2100())
strcpy (model,"E2500");
if (!strcmp(model,"E2500")) {
height -= 2;
load_flags = 6;
colors = 4;
filters = 0x4b4b4b4b;
}
} else if (fsize == 4775936) {
if (!timestamp) nikon_3700();
if (model[0] == 'E' && atoi(model+1) < 3700)
filters = 0x49494949;
if (!strcmp(model,"Optio 33WR")) {
flip = 1;
filters = 0x16161616;
}
if (make[0] == 'O') {
i = find_green (12, 32, 1188864, 3576832);
c = find_green (12, 32, 2383920, 2387016);
if (abs(i) < abs(c)) {
SWAP(i,c);
load_flags = 24;
}
if (i < 0) filters = 0x61616161;
}
} else if (fsize == 5869568) {
if (!timestamp && minolta_z2()) {
strcpy (make, "Minolta");
strcpy (model,"DiMAGE Z2");
}
load_flags = 6 + 24*(make[0] == 'M');
} else if (fsize == 6291456) {
fseek (ifp, 0x300000, SEEK_SET);
if ((order = guess_byte_order(0x10000)) == 0x4d4d) {
height -= (top_margin = 16);
width -= (left_margin = 28);
maximum = 0xf5c0;
strcpy (make, "ISG");
model[0] = 0;
}
} else if (!strcmp(make,"Fujifilm")) {
if (!strcmp(model+7,"S2Pro")) {
strcpy (model,"S2Pro");
height = 2144;
width = 2880;
flip = 6;
} else if (load_raw != &CLASS packed_load_raw)
maximum = (is_raw == 2 && shot_select) ? 0x2f00 : 0x3e00;
top_margin = (raw_height - height) >> 2 << 1;
left_margin = (raw_width - width ) >> 2 << 1;
if (width == 2848 || width == 3664) filters = 0x16161616;
if (width == 4032 || width == 4952) left_margin = 0;
if (width == 3328 && (width -= 66)) left_margin = 34;
if (width == 4936) left_margin = 4;
if (!strcmp(model,"HS50EXR")) {
width += 2;
left_margin = 0;
filters = 0x16161616;
}
if (fuji_layout) raw_width *= is_raw;
} else if (!strcmp(model,"KD-400Z")) {
height = 1712;
width = 2312;
raw_width = 2336;
goto konica_400z;
} else if (!strcmp(model,"KD-510Z")) {
goto konica_510z;
} else if (!strcasecmp(make,"Minolta")) {
if (!load_raw && (maximum = 0xfff))
load_raw = &CLASS unpacked_load_raw;
if (!strncmp(model,"DiMAGE A",8)) {
if (!strcmp(model,"DiMAGE A200"))
filters = 0x49494949;
tiff_bps = 12;
load_raw = &CLASS packed_load_raw;
} else if (!strncmp(model,"ALPHA",5) ||
!strncmp(model,"DYNAX",5) ||
!strncmp(model,"MAXXUM",6)) {
sprintf (model+20, "DYNAX %-10s", model+6+(model[0]=='M'));
adobe_coeff (make, model+20);
load_raw = &CLASS packed_load_raw;
} else if (!strncmp(model,"DiMAGE G",8)) {
if (model[8] == '4') {
height = 1716;
width = 2304;
} else if (model[8] == '5') {
konica_510z:
height = 1956;
width = 2607;
raw_width = 2624;
} else if (model[8] == '6') {
height = 2136;
width = 2848;
}
data_offset += 14;
filters = 0x61616161;
konica_400z:
load_raw = &CLASS unpacked_load_raw;
maximum = 0x3df;
order = 0x4d4d;
}
} else if (!strcmp(model,"*ist D")) {
load_raw = &CLASS unpacked_load_raw;
data_error = -1;
} else if (!strcmp(model,"*ist DS")) {
height -= 2;
} else if (!strcmp(make,"Samsung") && raw_width == 4704) {
height -= top_margin = 8;
width -= 2 * (left_margin = 8);
load_flags = 32;
} else if (!strcmp(make,"Samsung") && raw_height == 3714) {
height -= 18;
width = 5536;
filters = 0x49494949;
} else if (!strcmp(make,"Samsung") && raw_width == 5632) {
order = 0x4949;
height = 3694;
top_margin = 2;
width = 5574 - (left_margin = 32 + tiff_bps);
if (tiff_bps == 12) load_flags = 80;
} else if (!strcmp(model,"EX1")) {
order = 0x4949;
height -= 20;
top_margin = 2;
if ((width -= 6) > 3682) {
height -= 10;
width -= 46;
top_margin = 8;
}
} else if (!strcmp(model,"WB2000")) {
order = 0x4949;
height -= 3;
top_margin = 2;
if ((width -= 10) > 3718) {
height -= 28;
width -= 56;
top_margin = 8;
}
} else if (strstr(model,"WB550")) {
strcpy (model, "WB550");
} else if (!strcmp(model,"EX2F")) {
height = 3045;
width = 4070;
top_margin = 3;
order = 0x4949;
filters = 0x49494949;
load_raw = &CLASS unpacked_load_raw;
} else if (!strcmp(model,"STV680 VGA")) {
black = 16;
} else if (!strcmp(model,"N95")) {
height = raw_height - (top_margin = 2);
} else if (!strcmp(model,"640x480")) {
gamma_curve (0.45, 4.5, 1, 255);
} else if (!strcmp(make,"Hasselblad")) {
if (load_raw == &CLASS lossless_jpeg_load_raw)
load_raw = &CLASS hasselblad_load_raw;
if (raw_width == 7262) {
height = 5444;
width = 7248;
top_margin = 4;
left_margin = 7;
filters = 0x61616161;
} else if (raw_width == 7410) {
height = 5502;
width = 7328;
top_margin = 4;
left_margin = 41;
filters = 0x61616161;
} else if (raw_width == 9044) {
height = 6716;
width = 8964;
top_margin = 8;
left_margin = 40;
black += load_flags = 256;
maximum = 0x8101;
} else if (raw_width == 4090) {
strcpy (model, "V96C");
height -= (top_margin = 6);
width -= (left_margin = 3) + 7;
filters = 0x61616161;
}
} else if (!strcmp(make,"Sinar")) {
if (!load_raw) load_raw = &CLASS unpacked_load_raw;
maximum = 0x3fff;
} else if (!strcmp(make,"Leaf")) {
maximum = 0x3fff;
fseek (ifp, data_offset, SEEK_SET);
if (ljpeg_start (&jh, 1) && jh.bits == 15)
maximum = 0x1fff;
if (tiff_samples > 1) filters = 0;
if (tiff_samples > 1 || tile_length < raw_height) {
load_raw = &CLASS leaf_hdr_load_raw;
raw_width = tile_width;
}
if ((width | height) == 2048) {
if (tiff_samples == 1) {
filters = 1;
strcpy (cdesc, "RBTG");
strcpy (model, "CatchLight");
top_margin = 8; left_margin = 18; height = 2032; width = 2016;
} else {
strcpy (model, "DCB2");
top_margin = 10; left_margin = 16; height = 2028; width = 2022;
}
} else if (width+height == 3144+2060) {
if (!model[0]) strcpy (model, "Cantare");
if (width > height) {
top_margin = 6; left_margin = 32; height = 2048; width = 3072;
filters = 0x61616161;
} else {
left_margin = 6; top_margin = 32; width = 2048; height = 3072;
filters = 0x16161616;
}
if (!cam_mul[0] || model[0] == 'V') filters = 0;
else is_raw = tiff_samples;
} else if (width == 2116) {
strcpy (model, "Valeo 6");
height -= 2 * (top_margin = 30);
width -= 2 * (left_margin = 55);
filters = 0x49494949;
} else if (width == 3171) {
strcpy (model, "Valeo 6");
height -= 2 * (top_margin = 24);
width -= 2 * (left_margin = 24);
filters = 0x16161616;
}
} else if (!strcmp(make,"Leica") || !strcmp(make,"Panasonic")) {
if ((flen - data_offset) / (raw_width*8/7) == raw_height)
load_raw = &CLASS panasonic_load_raw;
if (!load_raw) {
load_raw = &CLASS unpacked_load_raw;
load_flags = 4;
}
zero_is_bad = 1;
if ((height += 12) > raw_height) height = raw_height;
for (i=0; i < sizeof pana / sizeof *pana; i++)
if (raw_width == pana[i][0] && raw_height == pana[i][1]) {
left_margin = pana[i][2];
top_margin = pana[i][3];
width += pana[i][4];
height += pana[i][5];
}
filters = 0x01010101 * (uchar) "\x94\x61\x49\x16"
[((filters-1) ^ (left_margin & 1) ^ (top_margin << 1)) & 3];
} else if (!strcmp(model,"C770UZ")) {
height = 1718;
width = 2304;
filters = 0x16161616;
load_raw = &CLASS packed_load_raw;
load_flags = 30;
} else if (!strcmp(make,"Olympus")) {
height += height & 1;
if (exif_cfa) filters = exif_cfa;
if (width == 4100) width -= 4;
if (width == 4080) width -= 24;
if (load_raw == &CLASS unpacked_load_raw)
load_flags = 4;
tiff_bps = 12;
if (!strcmp(model,"E-300") ||
!strcmp(model,"E-500")) {
width -= 20;
if (load_raw == &CLASS unpacked_load_raw) {
maximum = 0xfc3;
memset (cblack, 0, sizeof cblack);
}
} else if (!strcmp(model,"STYLUS1")) {
width -= 14;
maximum = 0xfff;
} else if (!strcmp(model,"E-330")) {
width -= 30;
if (load_raw == &CLASS unpacked_load_raw)
maximum = 0xf79;
} else if (!strcmp(model,"SP550UZ")) {
thumb_length = flen - (thumb_offset = 0xa39800);
thumb_height = 480;
thumb_width = 640;
}
} else if (!strcmp(model,"N Digital")) {
height = 2047;
width = 3072;
filters = 0x61616161;
data_offset = 0x1a00;
load_raw = &CLASS packed_load_raw;
} else if (!strcmp(model,"DSC-F828")) {
width = 3288;
left_margin = 5;
mask[1][3] = -17;
data_offset = 862144;
load_raw = &CLASS sony_load_raw;
filters = 0x9c9c9c9c;
colors = 4;
strcpy (cdesc, "RGBE");
} else if (!strcmp(model,"DSC-V3")) {
width = 3109;
left_margin = 59;
mask[0][1] = 9;
data_offset = 787392;
load_raw = &CLASS sony_load_raw;
} else if (!strcmp(make,"Sony") && raw_width == 3984) {
adobe_coeff ("Sony","DSC-R1");
width = 3925;
order = 0x4d4d;
} else if (!strcmp(make,"Sony") && !strcmp(model,"ILCE-3000")) {
width -= 32;
} else if (!strcmp(make,"Sony") && raw_width == 5504) {
width -= 8;
} else if (!strcmp(make,"Sony") && raw_width == 6048) {
width -= 24;
} else if (!strcmp(make,"Sony") && raw_width == 7392) {
width -= 24; // 21 pix really
} else if (!strcmp(model,"DSLR-A100")) {
if (width == 3880) {
height--;
width = ++raw_width;
} else {
order = 0x4d4d;
load_flags = 2;
}
filters = 0x61616161;
} else if (!strcmp(model,"DSLR-A350")) {
height -= 4;
} else if (!strcmp(model,"PIXL")) {
height -= top_margin = 4;
width -= left_margin = 32;
gamma_curve (0, 7, 1, 255);
} else if (!strcmp(model,"C603") || !strcmp(model,"C330")) {
order = 0x4949;
if (filters && data_offset) {
fseek (ifp, 168, SEEK_SET);
read_shorts (curve, 256);
} else gamma_curve (0, 3.875, 1, 255);
load_raw = filters ? &CLASS eight_bit_load_raw
: &CLASS kodak_yrgb_load_raw;
} else if (!strncasecmp(model,"EasyShare",9)) {
data_offset = data_offset < 0x15000 ? 0x15000 : 0x17000;
load_raw = &CLASS packed_load_raw;
} else if (!strcasecmp(make,"Kodak")) {
if (filters == UINT_MAX) filters = 0x61616161;
if (!strncmp(model,"NC2000",6)) {
width -= 4;
left_margin = 2;
} else if (!strcmp(model,"EOSDCS3B")) {
width -= 4;
left_margin = 2;
} else if (!strcmp(model,"EOSDCS1")) {
width -= 4;
left_margin = 2;
} else if (!strcmp(model,"DCS420")) {
width -= 4;
left_margin = 2;
} else if (!strncmp(model,"DCS460 ",7)) {
model[6] = 0;
width -= 4;
left_margin = 2;
} else if (!strcmp(model,"DCS460A")) {
width -= 4;
left_margin = 2;
colors = 1;
filters = 0;
} else if (!strcmp(model,"DCS660M")) {
black = 214;
colors = 1;
filters = 0;
} else if (!strcmp(model,"DCS760M")) {
colors = 1;
filters = 0;
}
if (!strcmp(model+4,"20X"))
strcpy (cdesc, "MYCY");
if (strstr(model,"DC25")) {
strcpy (model, "DC25");
data_offset = 15424;
}
if (!strncmp(model,"DC2",3)) {
raw_height = 2 + (height = 242);
if (flen < 100000) {
raw_width = 256; width = 249;
pixel_aspect = (4.0*height) / (3.0*width);
} else {
raw_width = 512; width = 501;
pixel_aspect = (493.0*height) / (373.0*width);
}
top_margin = left_margin = 1;
colors = 4;
filters = 0x8d8d8d8d;
simple_coeff(1);
pre_mul[1] = 1.179;
pre_mul[2] = 1.209;
pre_mul[3] = 1.036;
load_raw = &CLASS eight_bit_load_raw;
} else if (!strcmp(model,"40")) {
strcpy (model, "DC40");
height = 512;
width = 768;
data_offset = 1152;
load_raw = &CLASS kodak_radc_load_raw;
} else if (strstr(model,"DC50")) {
strcpy (model, "DC50");
height = 512;
width = 768;
data_offset = 19712;
load_raw = &CLASS kodak_radc_load_raw;
} else if (strstr(model,"DC120")) {
strcpy (model, "DC120");
height = 976;
width = 848;
pixel_aspect = height/0.75/width;
load_raw = tiff_compress == 7 ?
&CLASS kodak_jpeg_load_raw : &CLASS kodak_dc120_load_raw;
} else if (!strcmp(model,"DCS200")) {
thumb_height = 128;
thumb_width = 192;
thumb_offset = 6144;
thumb_misc = 360;
write_thumb = &CLASS layer_thumb;
black = 17;
}
} else if (!strcmp(model,"Fotoman Pixtura")) {
height = 512;
width = 768;
data_offset = 3632;
load_raw = &CLASS kodak_radc_load_raw;
filters = 0x61616161;
simple_coeff(2);
} else if (!strncmp(model,"QuickTake",9)) {
if (head[5]) strcpy (model+10, "200");
fseek (ifp, 544, SEEK_SET);
height = get2();
width = get2();
data_offset = (get4(),get2()) == 30 ? 738:736;
if (height > width) {
SWAP(height,width);
fseek (ifp, data_offset-6, SEEK_SET);
flip = ~get2() & 3 ? 5:6;
}
filters = 0x61616161;
} else if (!strcmp(make,"Rollei") && !load_raw) {
switch (raw_width) {
case 1316:
height = 1030;
width = 1300;
top_margin = 1;
left_margin = 6;
break;
case 2568:
height = 1960;
width = 2560;
top_margin = 2;
left_margin = 8;
}
filters = 0x16161616;
load_raw = &CLASS rollei_load_raw;
}
else if (!strcmp(model,"GRAS-50S5C")) {
height = 2048;
width = 2440;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x49494949;
order = 0x4949;
maximum = 0xfffC;
} else if (!strcmp(model,"BB-500CL")) {
height = 2058;
width = 2448;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x3fff;
} else if (!strcmp(model,"BB-500GE")) {
height = 2058;
width = 2456;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x3fff;
} else if (!strcmp(model,"SVS625CL")) {
height = 2050;
width = 2448;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x0fff;
}
/* Early reject for damaged images */
if (!load_raw || height < 22 || width < 22 ||
tiff_bps > 16 || tiff_samples > 4 || colors > 4 || colors < 1)
{
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2);
#endif
return;
}
if (!model[0])
sprintf (model, "%dx%d", width, height);
if (filters == UINT_MAX) filters = 0x94949494;
if (raw_color) adobe_coeff (make, model);
if (load_raw == &CLASS kodak_radc_load_raw)
if (raw_color) adobe_coeff ("Apple","Quicktake");
if (thumb_offset && !thumb_height) {
fseek (ifp, thumb_offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
dng_skip:
if (fuji_width) {
fuji_width = width >> !fuji_layout;
if (~fuji_width & 1) filters = 0x49494949;
width = (height >> fuji_layout) + fuji_width;
height = width - 1;
pixel_aspect = 1;
} else {
if (raw_height < height) raw_height = height;
if (raw_width < width ) raw_width = width;
}
if (!tiff_bps) tiff_bps = 12;
if (!maximum) maximum = (1 << tiff_bps) - 1;
if (!load_raw || height < 22 || width < 22 ||
tiff_bps > 16 || tiff_samples > 4 || colors > 4)
is_raw = 0;
#ifdef NO_JASPER
if (load_raw == &CLASS redcine_load_raw) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: You must link dcraw with %s!!\n"),
ifname, "libjasper");
#endif
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_JASPER;
#endif
}
#endif
#ifdef NO_JPEG
if (load_raw == &CLASS kodak_jpeg_load_raw ||
load_raw == &CLASS lossy_dng_load_raw) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: You must link dcraw with %s!!\n"),
ifname, "libjpeg");
#endif
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_JPEGLIB;
#endif
}
#endif
if (!cdesc[0])
strcpy (cdesc, colors == 3 ? "RGBG":"GMCY");
if (!raw_height) raw_height = height;
if (!raw_width ) raw_width = width;
if (filters > 999 && colors == 3)
filters |= ((filters >> 2 & 0x22222222) |
(filters << 2 & 0x88888888)) & filters << 1;
notraw:
if (flip == UINT_MAX) flip = tiff_flip;
if (flip == UINT_MAX) flip = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2);
#endif
}
void CLASS convert_to_rgb()
{
#ifndef LIBRAW_LIBRARY_BUILD
int row, col, c;
#endif
int i, j, k;
#ifndef LIBRAW_LIBRARY_BUILD
ushort *img;
float out[3];
#endif
float out_cam[3][4];
double num, inverse[3][3];
static const double xyzd50_srgb[3][3] =
{ { 0.436083, 0.385083, 0.143055 },
{ 0.222507, 0.716888, 0.060608 },
{ 0.013930, 0.097097, 0.714022 } };
static const double rgb_rgb[3][3] =
{ { 1,0,0 }, { 0,1,0 }, { 0,0,1 } };
static const double adobe_rgb[3][3] =
{ { 0.715146, 0.284856, 0.000000 },
{ 0.000000, 1.000000, 0.000000 },
{ 0.000000, 0.041166, 0.958839 } };
static const double wide_rgb[3][3] =
{ { 0.593087, 0.404710, 0.002206 },
{ 0.095413, 0.843149, 0.061439 },
{ 0.011621, 0.069091, 0.919288 } };
static const double prophoto_rgb[3][3] =
{ { 0.529317, 0.330092, 0.140588 },
{ 0.098368, 0.873465, 0.028169 },
{ 0.016879, 0.117663, 0.865457 } };
static const double (*out_rgb[])[3] =
{ rgb_rgb, adobe_rgb, wide_rgb, prophoto_rgb, xyz_rgb };
static const char *name[] =
{ "sRGB", "Adobe RGB (1998)", "WideGamut D65", "ProPhoto D65", "XYZ" };
static const unsigned phead[] =
{ 1024, 0, 0x2100000, 0x6d6e7472, 0x52474220, 0x58595a20, 0, 0, 0,
0x61637370, 0, 0, 0x6e6f6e65, 0, 0, 0, 0, 0xf6d6, 0x10000, 0xd32d };
unsigned pbody[] =
{ 10, 0x63707274, 0, 36, /* cprt */
0x64657363, 0, 40, /* desc */
0x77747074, 0, 20, /* wtpt */
0x626b7074, 0, 20, /* bkpt */
0x72545243, 0, 14, /* rTRC */
0x67545243, 0, 14, /* gTRC */
0x62545243, 0, 14, /* bTRC */
0x7258595a, 0, 20, /* rXYZ */
0x6758595a, 0, 20, /* gXYZ */
0x6258595a, 0, 20 }; /* bXYZ */
static const unsigned pwhite[] = { 0xf351, 0x10000, 0x116cc };
unsigned pcurve[] = { 0x63757276, 0, 1, 0x1000000 };
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,0,2);
#endif
gamma_curve (gamm[0], gamm[1], 0, 0);
memcpy (out_cam, rgb_cam, sizeof out_cam);
#ifndef LIBRAW_LIBRARY_BUILD
raw_color |= colors == 1 || document_mode ||
output_color < 1 || output_color > 5;
#else
raw_color |= colors == 1 ||
output_color < 1 || output_color > 5;
#endif
if (!raw_color) {
oprof = (unsigned *) calloc (phead[0], 1);
merror (oprof, "convert_to_rgb()");
memcpy (oprof, phead, sizeof phead);
if (output_color == 5) oprof[4] = oprof[5];
oprof[0] = 132 + 12*pbody[0];
for (i=0; i < pbody[0]; i++) {
oprof[oprof[0]/4] = i ? (i > 1 ? 0x58595a20 : 0x64657363) : 0x74657874;
pbody[i*3+2] = oprof[0];
oprof[0] += (pbody[i*3+3] + 3) & -4;
}
memcpy (oprof+32, pbody, sizeof pbody);
oprof[pbody[5]/4+2] = strlen(name[output_color-1]) + 1;
memcpy ((char *)oprof+pbody[8]+8, pwhite, sizeof pwhite);
pcurve[3] = (short)(256/gamm[5]+0.5) << 16;
for (i=4; i < 7; i++)
memcpy ((char *)oprof+pbody[i*3+2], pcurve, sizeof pcurve);
pseudoinverse ((double (*)[3]) out_rgb[output_color-1], inverse, 3);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++) {
for (num = k=0; k < 3; k++)
num += xyzd50_srgb[i][k] * inverse[j][k];
oprof[pbody[j*3+23]/4+i+2] = num * 0x10000 + 0.5;
}
for (i=0; i < phead[0]/4; i++)
oprof[i] = htonl(oprof[i]);
strcpy ((char *)oprof+pbody[2]+8, "auto-generated by dcraw");
strcpy ((char *)oprof+pbody[5]+12, name[output_color-1]);
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
for (out_cam[i][j] = k=0; k < 3; k++)
out_cam[i][j] += out_rgb[output_color-1][i][k] * rgb_cam[k][j];
}
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr, raw_color ? _("Building histograms...\n") :
_("Converting to %s colorspace...\n"), name[output_color-1]);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
convert_to_rgb_loop(out_cam);
#else
memset (histogram, 0, sizeof histogram);
for (img=image[0], row=0; row < height; row++)
for (col=0; col < width; col++, img+=4) {
if (!raw_color) {
out[0] = out[1] = out[2] = 0;
FORCC {
out[0] += out_cam[0][c] * img[c];
out[1] += out_cam[1][c] * img[c];
out[2] += out_cam[2][c] * img[c];
}
FORC3 img[c] = CLIP((int) out[c]);
}
else if (document_mode)
img[0] = img[fcol(row,col)];
FORCC histogram[c][img[c] >> 3]++;
}
#endif
if (colors == 4 && output_color) colors = 3;
#ifndef LIBRAW_LIBRARY_BUILD
if (document_mode && filters) colors = 1;
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,1,2);
#endif
}
void CLASS fuji_rotate()
{
int i, row, col;
double step;
float r, c, fr, fc;
unsigned ur, uc;
ushort wide, high, (*img)[4], (*pix)[4];
if (!fuji_width) return;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Rotating image 45 degrees...\n"));
#endif
fuji_width = (fuji_width - 1 + shrink) >> shrink;
step = sqrt(0.5);
wide = fuji_width / step;
high = (height - fuji_width) / step;
img = (ushort (*)[4]) calloc (high, wide*sizeof *img);
merror (img, "fuji_rotate()");
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,0,2);
#endif
for (row=0; row < high; row++)
for (col=0; col < wide; col++) {
ur = r = fuji_width + (row-col)*step;
uc = c = (row+col)*step;
if (ur > height-2 || uc > width-2) continue;
fr = r - ur;
fc = c - uc;
pix = image + ur*width + uc;
for (i=0; i < colors; i++)
img[row*wide+col][i] =
(pix[ 0][i]*(1-fc) + pix[ 1][i]*fc) * (1-fr) +
(pix[width][i]*(1-fc) + pix[width+1][i]*fc) * fr;
}
free (image);
width = wide;
height = high;
image = img;
fuji_width = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,1,2);
#endif
}
void CLASS stretch()
{
ushort newdim, (*img)[4], *pix0, *pix1;
int row, col, c;
double rc, frac;
if (pixel_aspect == 1) return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,0,2);
#endif
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Stretching the image...\n"));
#endif
if (pixel_aspect < 1) {
newdim = height / pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (width, newdim*sizeof *img);
merror (img, "stretch()");
for (rc=row=0; row < newdim; row++, rc+=pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c*width];
if (c+1 < height) pix1 += width*4;
for (col=0; col < width; col++, pix0+=4, pix1+=4)
FORCC img[row*width+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
height = newdim;
} else {
newdim = width * pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (height, newdim*sizeof *img);
merror (img, "stretch()");
for (rc=col=0; col < newdim; col++, rc+=1/pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c];
if (c+1 < width) pix1 += 4;
for (row=0; row < height; row++, pix0+=width*4, pix1+=width*4)
FORCC img[row*newdim+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
width = newdim;
}
free (image);
image = img;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,1,2);
#endif
}
int CLASS flip_index (int row, int col)
{
if (flip & 4) SWAP(row,col);
if (flip & 2) row = iheight - 1 - row;
if (flip & 1) col = iwidth - 1 - col;
return row * iwidth + col;
}
void CLASS tiff_set (ushort *ntag,
ushort tag, ushort type, int count, int val)
{
struct tiff_tag *tt;
int c;
tt = (struct tiff_tag *)(ntag+1) + (*ntag)++;
tt->tag = tag;
tt->type = type;
tt->count = count;
if (type < 3 && count <= 4)
FORC(4) tt->val.c[c] = val >> (c << 3);
else if (type == 3 && count <= 2)
FORC(2) tt->val.s[c] = val >> (c << 4);
else tt->val.i = val;
}
#define TOFF(ptr) ((char *)(&(ptr)) - (char *)th)
void CLASS tiff_head (struct tiff_hdr *th, int full)
{
int c, psize=0;
struct tm *t;
memset (th, 0, sizeof *th);
th->t_order = htonl(0x4d4d4949) >> 16;
th->magic = 42;
th->ifd = 10;
if (full) {
tiff_set (&th->ntag, 254, 4, 1, 0);
tiff_set (&th->ntag, 256, 4, 1, width);
tiff_set (&th->ntag, 257, 4, 1, height);
tiff_set (&th->ntag, 258, 3, colors, output_bps);
if (colors > 2)
th->tag[th->ntag-1].val.i = TOFF(th->bps);
FORC4 th->bps[c] = output_bps;
tiff_set (&th->ntag, 259, 3, 1, 1);
tiff_set (&th->ntag, 262, 3, 1, 1 + (colors > 1));
}
tiff_set (&th->ntag, 270, 2, 512, TOFF(th->t_desc));
tiff_set (&th->ntag, 271, 2, 64, TOFF(th->t_make));
tiff_set (&th->ntag, 272, 2, 64, TOFF(th->t_model));
if (full) {
if (oprof) psize = ntohl(oprof[0]);
tiff_set (&th->ntag, 273, 4, 1, sizeof *th + psize);
tiff_set (&th->ntag, 277, 3, 1, colors);
tiff_set (&th->ntag, 278, 4, 1, height);
tiff_set (&th->ntag, 279, 4, 1, height*width*colors*output_bps/8);
} else
tiff_set (&th->ntag, 274, 3, 1, "12435867"[flip]-'0');
tiff_set (&th->ntag, 282, 5, 1, TOFF(th->rat[0]));
tiff_set (&th->ntag, 283, 5, 1, TOFF(th->rat[2]));
tiff_set (&th->ntag, 284, 3, 1, 1);
tiff_set (&th->ntag, 296, 3, 1, 2);
tiff_set (&th->ntag, 305, 2, 32, TOFF(th->soft));
tiff_set (&th->ntag, 306, 2, 20, TOFF(th->date));
tiff_set (&th->ntag, 315, 2, 64, TOFF(th->t_artist));
tiff_set (&th->ntag, 34665, 4, 1, TOFF(th->nexif));
if (psize) tiff_set (&th->ntag, 34675, 7, psize, sizeof *th);
tiff_set (&th->nexif, 33434, 5, 1, TOFF(th->rat[4]));
tiff_set (&th->nexif, 33437, 5, 1, TOFF(th->rat[6]));
tiff_set (&th->nexif, 34855, 3, 1, iso_speed);
tiff_set (&th->nexif, 37386, 5, 1, TOFF(th->rat[8]));
if (gpsdata[1]) {
tiff_set (&th->ntag, 34853, 4, 1, TOFF(th->ngps));
tiff_set (&th->ngps, 0, 1, 4, 0x202);
tiff_set (&th->ngps, 1, 2, 2, gpsdata[29]);
tiff_set (&th->ngps, 2, 5, 3, TOFF(th->gps[0]));
tiff_set (&th->ngps, 3, 2, 2, gpsdata[30]);
tiff_set (&th->ngps, 4, 5, 3, TOFF(th->gps[6]));
tiff_set (&th->ngps, 5, 1, 1, gpsdata[31]);
tiff_set (&th->ngps, 6, 5, 1, TOFF(th->gps[18]));
tiff_set (&th->ngps, 7, 5, 3, TOFF(th->gps[12]));
tiff_set (&th->ngps, 18, 2, 12, TOFF(th->gps[20]));
tiff_set (&th->ngps, 29, 2, 12, TOFF(th->gps[23]));
memcpy (th->gps, gpsdata, sizeof th->gps);
}
th->rat[0] = th->rat[2] = 300;
th->rat[1] = th->rat[3] = 1;
FORC(6) th->rat[4+c] = 1000000;
th->rat[4] *= shutter;
th->rat[6] *= aperture;
th->rat[8] *= focal_len;
strncpy (th->t_desc, desc, 512);
strncpy (th->t_make, make, 64);
strncpy (th->t_model, model, 64);
strcpy (th->soft, "dcraw v"DCRAW_VERSION);
t = localtime (×tamp);
sprintf (th->date, "%04d:%02d:%02d %02d:%02d:%02d",
t->tm_year+1900,t->tm_mon+1,t->tm_mday,t->tm_hour,t->tm_min,t->tm_sec);
strncpy (th->t_artist, artist, 64);
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS jpeg_thumb_writer (FILE *tfp,char *t_humb,int t_humb_length)
{
ushort exif[5];
struct tiff_hdr th;
fputc (0xff, tfp);
fputc (0xd8, tfp);
if (strcmp (t_humb+6, "Exif")) {
memcpy (exif, "\xff\xe1 Exif\0\0", 10);
exif[1] = htons (8 + sizeof th);
fwrite (exif, 1, sizeof exif, tfp);
tiff_head (&th, 0);
fwrite (&th, 1, sizeof th, tfp);
}
fwrite (t_humb+2, 1, t_humb_length-2, tfp);
}
void CLASS jpeg_thumb()
{
char *thumb;
thumb = (char *) malloc (thumb_length);
merror (thumb, "jpeg_thumb()");
fread (thumb, 1, thumb_length, ifp);
jpeg_thumb_writer(ofp,thumb,thumb_length);
free (thumb);
}
#else
void CLASS jpeg_thumb()
{
char *thumb;
ushort exif[5];
struct tiff_hdr th;
thumb = (char *) malloc (thumb_length);
merror (thumb, "jpeg_thumb()");
fread (thumb, 1, thumb_length, ifp);
fputc (0xff, ofp);
fputc (0xd8, ofp);
if (strcmp (thumb+6, "Exif")) {
memcpy (exif, "\xff\xe1 Exif\0\0", 10);
exif[1] = htons (8 + sizeof th);
fwrite (exif, 1, sizeof exif, ofp);
tiff_head (&th, 0);
fwrite (&th, 1, sizeof th, ofp);
}
fwrite (thumb+2, 1, thumb_length-2, ofp);
free (thumb);
}
#endif
void CLASS write_ppm_tiff()
{
struct tiff_hdr th;
uchar *ppm;
ushort *ppm2;
int c, row, col, soff, rstep, cstep;
int perc, val, total, t_white=0x2000;
#ifdef LIBRAW_LIBRARY_BUILD
perc = width * height * auto_bright_thr; /* 99th percentile white level */
#else
perc = width * height * 0.01; /* 99th percentile white level */
#endif
if (fuji_width) perc /= 2;
if (!((highlight & ~2) || no_auto_bright))
for (t_white=c=0; c < colors; c++) {
for (val=0x2000, total=0; --val > 32; )
if ((total += histogram[c][val]) > perc) break;
if (t_white < val) t_white = val;
}
gamma_curve (gamm[0], gamm[1], 2, (t_white << 3)/bright);
iheight = height;
iwidth = width;
if (flip & 4) SWAP(height,width);
ppm = (uchar *) calloc (width, colors*output_bps/8);
ppm2 = (ushort *) ppm;
merror (ppm, "write_ppm_tiff()");
if (output_tiff) {
tiff_head (&th, 1);
fwrite (&th, sizeof th, 1, ofp);
if (oprof)
fwrite (oprof, ntohl(oprof[0]), 1, ofp);
} else if (colors > 3)
fprintf (ofp,
"P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLTYPE %s\nENDHDR\n",
width, height, colors, (1 << output_bps)-1, cdesc);
else
fprintf (ofp, "P%d\n%d %d\n%d\n",
colors/2+5, width, height, (1 << output_bps)-1);
soff = flip_index (0, 0);
cstep = flip_index (0, 1) - soff;
rstep = flip_index (1, 0) - flip_index (0, width);
for (row=0; row < height; row++, soff += rstep) {
for (col=0; col < width; col++, soff += cstep)
if (output_bps == 8)
FORCC ppm [col*colors+c] = curve[image[soff][c]] >> 8;
else FORCC ppm2[col*colors+c] = curve[image[soff][c]];
if (output_bps == 16 && !output_tiff && htons(0x55aa) != 0x55aa)
swab ((char*)ppm2, (char*)ppm2, width*colors*2);
fwrite (ppm, colors*output_bps/8, width, ofp);
}
free (ppm);
}
| ./CrossVul/dataset_final_sorted/CWE-189/cpp/bad_1605_3 |
crossvul-cpp_data_good_1605_3 | /*
Copyright 2008-2013 LibRaw LLC (info@libraw.org)
LibRaw is free software; you can redistribute it and/or modify
it under the terms of the one of three licenses as you choose:
1. GNU LESSER GENERAL PUBLIC LICENSE version 2.1
(See file LICENSE.LGPL provided in LibRaw distribution archive for details).
2. COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
(See file LICENSE.CDDL provided in LibRaw distribution archive for details).
3. LibRaw Software License 27032010
(See file LICENSE.LibRaw.pdf provided in LibRaw distribution archive for details).
This file is generated from Dave Coffin's dcraw.c
dcraw.c -- Dave Coffin's raw photo decoder
Copyright 1997-2010 by Dave Coffin, dcoffin a cybercom o net
Look into dcraw homepage (probably http://cybercom.net/~dcoffin/dcraw/)
for more information
*/
#line 261 "dcraw/dcraw.c"
#include <math.h>
#define CLASS LibRaw::
#include "libraw/libraw_types.h"
#define LIBRAW_LIBRARY_BUILD
#define LIBRAW_IO_REDEFINED
#include "libraw/libraw.h"
#include "internal/defines.h"
#include "internal/var_defines.h"
#line 272 "dcraw/dcraw.c"
int CLASS fcol (int row, int col)
{
static const char filter[16][16] =
{ { 2,1,1,3,2,3,2,0,3,2,3,0,1,2,1,0 },
{ 0,3,0,2,0,1,3,1,0,1,1,2,0,3,3,2 },
{ 2,3,3,2,3,1,1,3,3,1,2,1,2,0,0,3 },
{ 0,1,0,1,0,2,0,2,2,0,3,0,1,3,2,1 },
{ 3,1,1,2,0,1,0,2,1,3,1,3,0,1,3,0 },
{ 2,0,0,3,3,2,3,1,2,0,2,0,3,2,2,1 },
{ 2,3,3,1,2,1,2,1,2,1,1,2,3,0,0,1 },
{ 1,0,0,2,3,0,0,3,0,3,0,3,2,1,2,3 },
{ 2,3,3,1,1,2,1,0,3,2,3,0,2,3,1,3 },
{ 1,0,2,0,3,0,3,2,0,1,1,2,0,1,0,2 },
{ 0,1,1,3,3,2,2,1,1,3,3,0,2,1,3,2 },
{ 2,3,2,0,0,1,3,0,2,0,1,2,3,0,1,0 },
{ 1,3,1,2,3,2,3,2,0,2,0,1,1,0,3,0 },
{ 0,2,0,3,1,0,0,1,1,3,3,2,3,2,2,1 },
{ 2,1,3,2,3,1,2,1,0,3,0,2,0,2,0,2 },
{ 0,3,1,0,0,2,0,3,2,1,3,1,1,3,1,3 } };
if (filters == 1) return filter[(row+top_margin)&15][(col+left_margin)&15];
if (filters == 9) return xtrans[(row+top_margin+6)%6][(col+left_margin+6)%6];
return FC(row,col);
}
#ifndef __GLIBC__
char *my_memmem (char *haystack, size_t haystacklen,
char *needle, size_t needlelen)
{
char *c;
for (c = haystack; c <= haystack + haystacklen - needlelen; c++)
if (!memcmp (c, needle, needlelen))
return c;
return 0;
}
#define memmem my_memmem
char *my_strcasestr (char *haystack, const char *needle)
{
char *c;
for (c = haystack; *c; c++)
if (!strncasecmp(c, needle, strlen(needle)))
return c;
return 0;
}
#define strcasestr my_strcasestr
#endif
#line 340 "dcraw/dcraw.c"
ushort CLASS sget2 (uchar *s)
{
if (order == 0x4949) /* "II" means little-endian */
return s[0] | s[1] << 8;
else /* "MM" means big-endian */
return s[0] << 8 | s[1];
}
ushort CLASS get2()
{
uchar str[2] = { 0xff,0xff };
fread (str, 1, 2, ifp);
return sget2(str);
}
unsigned CLASS sget4 (uchar *s)
{
if (order == 0x4949)
return s[0] | s[1] << 8 | s[2] << 16 | s[3] << 24;
else
return s[0] << 24 | s[1] << 16 | s[2] << 8 | s[3];
}
#define sget4(s) sget4((uchar *)s)
unsigned CLASS get4()
{
uchar str[4] = { 0xff,0xff,0xff,0xff };
fread (str, 1, 4, ifp);
return sget4(str);
}
unsigned CLASS getint (int type)
{
return type == 3 ? get2() : get4();
}
float CLASS int_to_float (int i)
{
union { int i; float f; } u;
u.i = i;
return u.f;
}
double CLASS getreal (int type)
{
union { char c[8]; double d; } u;
int i, rev;
switch (type) {
case 3: return (unsigned short) get2();
case 4: return (unsigned int) get4();
case 5: u.d = (unsigned int) get4();
return u.d / (unsigned int) get4();
case 8: return (signed short) get2();
case 9: return (signed int) get4();
case 10: u.d = (signed int) get4();
return u.d / (signed int) get4();
case 11: return int_to_float (get4());
case 12:
rev = 7 * ((order == 0x4949) == (ntohs(0x1234) == 0x1234));
for (i=0; i < 8; i++)
u.c[i ^ rev] = fgetc(ifp);
return u.d;
default: return fgetc(ifp);
}
}
void CLASS read_shorts (ushort *pixel, int count)
{
if (fread (pixel, 2, count, ifp) < count) derror();
if ((order == 0x4949) == (ntohs(0x1234) == 0x1234))
swab ((char*)pixel, (char*)pixel, count*2);
}
void CLASS canon_600_fixed_wb (int temp)
{
static const short mul[4][5] = {
{ 667, 358,397,565,452 },
{ 731, 390,367,499,517 },
{ 1119, 396,348,448,537 },
{ 1399, 485,431,508,688 } };
int lo, hi, i;
float frac=0;
for (lo=4; --lo; )
if (*mul[lo] <= temp) break;
for (hi=0; hi < 3; hi++)
if (*mul[hi] >= temp) break;
if (lo != hi)
frac = (float) (temp - *mul[lo]) / (*mul[hi] - *mul[lo]);
for (i=1; i < 5; i++)
pre_mul[i-1] = 1 / (frac * mul[hi][i] + (1-frac) * mul[lo][i]);
}
/* Return values: 0 = white 1 = near white 2 = not white */
int CLASS canon_600_color (int ratio[2], int mar)
{
int clipped=0, target, miss;
if (flash_used) {
if (ratio[1] < -104)
{ ratio[1] = -104; clipped = 1; }
if (ratio[1] > 12)
{ ratio[1] = 12; clipped = 1; }
} else {
if (ratio[1] < -264 || ratio[1] > 461) return 2;
if (ratio[1] < -50)
{ ratio[1] = -50; clipped = 1; }
if (ratio[1] > 307)
{ ratio[1] = 307; clipped = 1; }
}
target = flash_used || ratio[1] < 197
? -38 - (398 * ratio[1] >> 10)
: -123 + (48 * ratio[1] >> 10);
if (target - mar <= ratio[0] &&
target + 20 >= ratio[0] && !clipped) return 0;
miss = target - ratio[0];
if (abs(miss) >= mar*4) return 2;
if (miss < -20) miss = -20;
if (miss > mar) miss = mar;
ratio[0] = target - miss;
return 1;
}
void CLASS canon_600_auto_wb()
{
int mar, row, col, i, j, st, count[] = { 0,0 };
int test[8], total[2][8], ratio[2][2], stat[2];
memset (&total, 0, sizeof total);
i = canon_ev + 0.5;
if (i < 10) mar = 150;
else if (i > 12) mar = 20;
else mar = 280 - 20 * i;
if (flash_used) mar = 80;
for (row=14; row < height-14; row+=4)
for (col=10; col < width; col+=2) {
for (i=0; i < 8; i++)
test[(i & 4) + FC(row+(i >> 1),col+(i & 1))] =
BAYER(row+(i >> 1),col+(i & 1));
for (i=0; i < 8; i++)
if (test[i] < 150 || test[i] > 1500) goto next;
for (i=0; i < 4; i++)
if (abs(test[i] - test[i+4]) > 50) goto next;
for (i=0; i < 2; i++) {
for (j=0; j < 4; j+=2)
ratio[i][j >> 1] = ((test[i*4+j+1]-test[i*4+j]) << 10) / test[i*4+j];
stat[i] = canon_600_color (ratio[i], mar);
}
if ((st = stat[0] | stat[1]) > 1) goto next;
for (i=0; i < 2; i++)
if (stat[i])
for (j=0; j < 2; j++)
test[i*4+j*2+1] = test[i*4+j*2] * (0x400 + ratio[i][j]) >> 10;
for (i=0; i < 8; i++)
total[st][i] += test[i];
count[st]++;
next: ;
}
if (count[0] | count[1]) {
st = count[0]*200 < count[1];
for (i=0; i < 4; i++)
pre_mul[i] = 1.0 / (total[st][i] + total[st][i+4]);
}
}
void CLASS canon_600_coeff()
{
static const short table[6][12] = {
{ -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 },
{ -1203,1715,-1136,1648, 1388,-876,267,245, -1641,2153,3921,-3409 },
{ -615,1127,-1563,2075, 1437,-925,509,3, -756,1268,2519,-2007 },
{ -190,702,-1886,2398, 2153,-1641,763,-251, -452,964,3040,-2528 },
{ -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 },
{ -807,1319,-1785,2297, 1388,-876,769,-257, -230,742,2067,-1555 } };
int t=0, i, c;
float mc, yc;
mc = pre_mul[1] / pre_mul[2];
yc = pre_mul[3] / pre_mul[2];
if (mc > 1 && mc <= 1.28 && yc < 0.8789) t=1;
if (mc > 1.28 && mc <= 2) {
if (yc < 0.8789) t=3;
else if (yc <= 2) t=4;
}
if (flash_used) t=5;
for (raw_color = i=0; i < 3; i++)
FORCC rgb_cam[i][c] = table[t][i*4 + c] / 1024.0;
}
void CLASS canon_600_load_raw()
{
uchar data[1120], *dp;
ushort *pix;
int irow, row;
for (irow=row=0; irow < height; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (data, 1, 1120, ifp) < 1120) derror();
pix = raw_image + row*raw_width;
for (dp=data; dp < data+1120; dp+=10, pix+=8) {
pix[0] = (dp[0] << 2) + (dp[1] >> 6 );
pix[1] = (dp[2] << 2) + (dp[1] >> 4 & 3);
pix[2] = (dp[3] << 2) + (dp[1] >> 2 & 3);
pix[3] = (dp[4] << 2) + (dp[1] & 3);
pix[4] = (dp[5] << 2) + (dp[9] & 3);
pix[5] = (dp[6] << 2) + (dp[9] >> 2 & 3);
pix[6] = (dp[7] << 2) + (dp[9] >> 4 & 3);
pix[7] = (dp[8] << 2) + (dp[9] >> 6 );
}
if ((row+=2) > height) row = 1;
}
}
void CLASS canon_600_correct()
{
int row, col, val;
static const short mul[4][2] =
{ { 1141,1145 }, { 1128,1109 }, { 1178,1149 }, { 1128,1109 } };
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++) {
if ((val = BAYER(row,col) - black) < 0) val = 0;
val = val * mul[row & 3][col & 1] >> 9;
BAYER(row,col) = val;
}
}
canon_600_fixed_wb(1311);
canon_600_auto_wb();
canon_600_coeff();
maximum = (0x3ff - black) * 1109 >> 9;
black = 0;
}
int CLASS canon_s2is()
{
unsigned row;
for (row=0; row < 100; row++) {
fseek (ifp, row*3340 + 3284, SEEK_SET);
if (getc(ifp) > 15) return 1;
}
return 0;
}
unsigned CLASS getbithuff (int nbits, ushort *huff)
{
#ifdef LIBRAW_NOTHREADS
static unsigned bitbuf=0;
static int vbits=0, reset=0;
#else
#define bitbuf tls->getbits.bitbuf
#define vbits tls->getbits.vbits
#define reset tls->getbits.reset
#endif
unsigned c;
if (nbits > 25) return 0;
if (nbits < 0)
return bitbuf = vbits = reset = 0;
if (nbits == 0 || vbits < 0) return 0;
while (!reset && vbits < nbits && (c = fgetc(ifp)) != EOF &&
!(reset = zero_after_ff && c == 0xff && fgetc(ifp))) {
bitbuf = (bitbuf << 8) + (uchar) c;
vbits += 8;
}
c = bitbuf << (32-vbits) >> (32-nbits);
if (huff) {
vbits -= huff[c] >> 8;
c = (uchar) huff[c];
} else
vbits -= nbits;
if (vbits < 0) derror();
return c;
#ifndef LIBRAW_NOTHREADS
#undef bitbuf
#undef vbits
#undef reset
#endif
}
#define getbits(n) getbithuff(n,0)
#define gethuff(h) getbithuff(*h,h+1)
/*
Construct a decode tree according the specification in *source.
The first 16 bytes specify how many codes should be 1-bit, 2-bit
3-bit, etc. Bytes after that are the leaf values.
For example, if the source is
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0,
0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff },
then the code is
00 0x04
010 0x03
011 0x05
100 0x06
101 0x02
1100 0x07
1101 0x01
11100 0x08
11101 0x09
11110 0x00
111110 0x0a
1111110 0x0b
1111111 0xff
*/
ushort * CLASS make_decoder_ref (const uchar **source)
{
int max, len, h, i, j;
const uchar *count;
ushort *huff;
count = (*source += 16) - 17;
for (max=16; max && !count[max]; max--);
huff = (ushort *) calloc (1 + (1 << max), sizeof *huff);
merror (huff, "make_decoder()");
huff[0] = max;
for (h=len=1; len <= max; len++)
for (i=0; i < count[len]; i++, ++*source)
for (j=0; j < 1 << (max-len); j++)
if (h <= 1 << max)
huff[h++] = len << 8 | **source;
return huff;
}
ushort * CLASS make_decoder (const uchar *source)
{
return make_decoder_ref (&source);
}
void CLASS crw_init_tables (unsigned table, ushort *huff[2])
{
static const uchar first_tree[3][29] = {
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0,
0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff },
{ 0,2,2,3,1,1,1,1,2,0,0,0,0,0,0,0,
0x03,0x02,0x04,0x01,0x05,0x00,0x06,0x07,0x09,0x08,0x0a,0x0b,0xff },
{ 0,0,6,3,1,1,2,0,0,0,0,0,0,0,0,0,
0x06,0x05,0x07,0x04,0x08,0x03,0x09,0x02,0x00,0x0a,0x01,0x0b,0xff },
};
static const uchar second_tree[3][180] = {
{ 0,2,2,2,1,4,2,1,2,5,1,1,0,0,0,139,
0x03,0x04,0x02,0x05,0x01,0x06,0x07,0x08,
0x12,0x13,0x11,0x14,0x09,0x15,0x22,0x00,0x21,0x16,0x0a,0xf0,
0x23,0x17,0x24,0x31,0x32,0x18,0x19,0x33,0x25,0x41,0x34,0x42,
0x35,0x51,0x36,0x37,0x38,0x29,0x79,0x26,0x1a,0x39,0x56,0x57,
0x28,0x27,0x52,0x55,0x58,0x43,0x76,0x59,0x77,0x54,0x61,0xf9,
0x71,0x78,0x75,0x96,0x97,0x49,0xb7,0x53,0xd7,0x74,0xb6,0x98,
0x47,0x48,0x95,0x69,0x99,0x91,0xfa,0xb8,0x68,0xb5,0xb9,0xd6,
0xf7,0xd8,0x67,0x46,0x45,0x94,0x89,0xf8,0x81,0xd5,0xf6,0xb4,
0x88,0xb1,0x2a,0x44,0x72,0xd9,0x87,0x66,0xd4,0xf5,0x3a,0xa7,
0x73,0xa9,0xa8,0x86,0x62,0xc7,0x65,0xc8,0xc9,0xa1,0xf4,0xd1,
0xe9,0x5a,0x92,0x85,0xa6,0xe7,0x93,0xe8,0xc1,0xc6,0x7a,0x64,
0xe1,0x4a,0x6a,0xe6,0xb3,0xf1,0xd3,0xa5,0x8a,0xb2,0x9a,0xba,
0x84,0xa4,0x63,0xe5,0xc5,0xf3,0xd2,0xc4,0x82,0xaa,0xda,0xe4,
0xf2,0xca,0x83,0xa3,0xa2,0xc3,0xea,0xc2,0xe2,0xe3,0xff,0xff },
{ 0,2,2,1,4,1,4,1,3,3,1,0,0,0,0,140,
0x02,0x03,0x01,0x04,0x05,0x12,0x11,0x06,
0x13,0x07,0x08,0x14,0x22,0x09,0x21,0x00,0x23,0x15,0x31,0x32,
0x0a,0x16,0xf0,0x24,0x33,0x41,0x42,0x19,0x17,0x25,0x18,0x51,
0x34,0x43,0x52,0x29,0x35,0x61,0x39,0x71,0x62,0x36,0x53,0x26,
0x38,0x1a,0x37,0x81,0x27,0x91,0x79,0x55,0x45,0x28,0x72,0x59,
0xa1,0xb1,0x44,0x69,0x54,0x58,0xd1,0xfa,0x57,0xe1,0xf1,0xb9,
0x49,0x47,0x63,0x6a,0xf9,0x56,0x46,0xa8,0x2a,0x4a,0x78,0x99,
0x3a,0x75,0x74,0x86,0x65,0xc1,0x76,0xb6,0x96,0xd6,0x89,0x85,
0xc9,0xf5,0x95,0xb4,0xc7,0xf7,0x8a,0x97,0xb8,0x73,0xb7,0xd8,
0xd9,0x87,0xa7,0x7a,0x48,0x82,0x84,0xea,0xf4,0xa6,0xc5,0x5a,
0x94,0xa4,0xc6,0x92,0xc3,0x68,0xb5,0xc8,0xe4,0xe5,0xe6,0xe9,
0xa2,0xa3,0xe3,0xc2,0x66,0x67,0x93,0xaa,0xd4,0xd5,0xe7,0xf8,
0x88,0x9a,0xd7,0x77,0xc4,0x64,0xe2,0x98,0xa5,0xca,0xda,0xe8,
0xf3,0xf6,0xa9,0xb2,0xb3,0xf2,0xd2,0x83,0xba,0xd3,0xff,0xff },
{ 0,0,6,2,1,3,3,2,5,1,2,2,8,10,0,117,
0x04,0x05,0x03,0x06,0x02,0x07,0x01,0x08,
0x09,0x12,0x13,0x14,0x11,0x15,0x0a,0x16,0x17,0xf0,0x00,0x22,
0x21,0x18,0x23,0x19,0x24,0x32,0x31,0x25,0x33,0x38,0x37,0x34,
0x35,0x36,0x39,0x79,0x57,0x58,0x59,0x28,0x56,0x78,0x27,0x41,
0x29,0x77,0x26,0x42,0x76,0x99,0x1a,0x55,0x98,0x97,0xf9,0x48,
0x54,0x96,0x89,0x47,0xb7,0x49,0xfa,0x75,0x68,0xb6,0x67,0x69,
0xb9,0xb8,0xd8,0x52,0xd7,0x88,0xb5,0x74,0x51,0x46,0xd9,0xf8,
0x3a,0xd6,0x87,0x45,0x7a,0x95,0xd5,0xf6,0x86,0xb4,0xa9,0x94,
0x53,0x2a,0xa8,0x43,0xf5,0xf7,0xd4,0x66,0xa7,0x5a,0x44,0x8a,
0xc9,0xe8,0xc8,0xe7,0x9a,0x6a,0x73,0x4a,0x61,0xc7,0xf4,0xc6,
0x65,0xe9,0x72,0xe6,0x71,0x91,0x93,0xa6,0xda,0x92,0x85,0x62,
0xf3,0xc5,0xb2,0xa4,0x84,0xba,0x64,0xa5,0xb3,0xd2,0x81,0xe5,
0xd3,0xaa,0xc4,0xca,0xf2,0xb1,0xe4,0xd1,0x83,0x63,0xea,0xc3,
0xe2,0x82,0xf1,0xa3,0xc2,0xa1,0xc1,0xe3,0xa2,0xe1,0xff,0xff }
};
if (table > 2) table = 2;
huff[0] = make_decoder ( first_tree[table]);
huff[1] = make_decoder (second_tree[table]);
}
/*
Return 0 if the image starts with compressed data,
1 if it starts with uncompressed low-order bits.
In Canon compressed data, 0xff is always followed by 0x00.
*/
int CLASS canon_has_lowbits()
{
uchar test[0x4000];
int ret=1, i;
fseek (ifp, 0, SEEK_SET);
fread (test, 1, sizeof test, ifp);
for (i=540; i < sizeof test - 1; i++)
if (test[i] == 0xff) {
if (test[i+1]) return 1;
ret=0;
}
return ret;
}
void CLASS canon_load_raw()
{
ushort *pixel, *prow, *huff[2];
int nblocks, lowbits, i, c, row, r, save, val;
int block, diffbuf[64], leaf, len, diff, carry=0, pnum=0, base[2];
crw_init_tables (tiff_compress, huff);
lowbits = canon_has_lowbits();
if (!lowbits) maximum = 0x3ff;
fseek (ifp, 540 + lowbits*raw_height*raw_width/4, SEEK_SET);
zero_after_ff = 1;
getbits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row+=8) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pixel = raw_image + row*raw_width;
nblocks = MIN (8, raw_height-row) * raw_width >> 6;
for (block=0; block < nblocks; block++) {
memset (diffbuf, 0, sizeof diffbuf);
for (i=0; i < 64; i++ ) {
leaf = gethuff(huff[i > 0]);
if (leaf == 0 && i) break;
if (leaf == 0xff) continue;
i += leaf >> 4;
len = leaf & 15;
if (len == 0) continue;
diff = getbits(len);
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
if (i < 64) diffbuf[i] = diff;
}
diffbuf[0] += carry;
carry = diffbuf[0];
for (i=0; i < 64; i++ ) {
if (pnum++ % raw_width == 0)
base[0] = base[1] = 512;
if ((pixel[(block << 6) + i] = base[i & 1] += diffbuf[i]) >> 10)
derror();
}
}
if (lowbits) {
save = ftell(ifp);
fseek (ifp, 26 + row*raw_width/4, SEEK_SET);
for (prow=pixel, i=0; i < raw_width*2; i++) {
c = fgetc(ifp);
for (r=0; r < 8; r+=2, prow++) {
val = (*prow << 2) + ((c >> r) & 3);
if (raw_width == 2672 && val < 512) val += 2;
*prow = val;
}
}
fseek (ifp, save, SEEK_SET);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
FORC(2) free (huff[c]);
throw;
}
#endif
FORC(2) free (huff[c]);
}
#line 841 "dcraw/dcraw.c"
int CLASS ljpeg_start (struct jhead *jh, int info_only)
{
int c, tag;
ushort len;
uchar data[0x10000];
const uchar *dp;
memset (jh, 0, sizeof *jh);
jh->restart = INT_MAX;
fread (data, 2, 1, ifp);
if (data[1] != 0xd8) return 0;
do {
fread (data, 2, 2, ifp);
tag = data[0] << 8 | data[1];
len = (data[2] << 8 | data[3]) - 2;
if (tag <= 0xff00) return 0;
fread (data, 1, len, ifp);
switch (tag) {
case 0xffc3:
jh->sraw = ((data[7] >> 4) * (data[7] & 15) - 1) & 3;
case 0xffc0:
jh->bits = data[0];
jh->high = data[1] << 8 | data[2];
jh->wide = data[3] << 8 | data[4];
jh->clrs = data[5] + jh->sraw;
if (len == 9 && !dng_version) getc(ifp);
break;
case 0xffc4:
if (info_only) break;
for (dp = data; dp < data+len && (c = *dp++) < 4; )
jh->free[c] = jh->huff[c] = make_decoder_ref (&dp);
break;
case 0xffda:
jh->psv = data[1+data[0]*2];
jh->bits -= data[3+data[0]*2] & 15;
break;
case 0xffdd:
jh->restart = data[0] << 8 | data[1];
}
} while (tag != 0xffda);
if (info_only) return 1;
if (jh->clrs > 6 || !jh->huff[0]) return 0;
FORC(5) if (!jh->huff[c+1]) jh->huff[c+1] = jh->huff[c];
if (jh->sraw) {
FORC(4) jh->huff[2+c] = jh->huff[1];
FORC(jh->sraw) jh->huff[1+c] = jh->huff[0];
}
jh->row = (ushort *) calloc (jh->wide*jh->clrs, 4);
merror (jh->row, "ljpeg_start()");
return zero_after_ff = 1;
}
void CLASS ljpeg_end (struct jhead *jh)
{
int c;
FORC4 if (jh->free[c]) free (jh->free[c]);
free (jh->row);
}
int CLASS ljpeg_diff (ushort *huff)
{
int len, diff;
if(!huff)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 2);
#endif
len = gethuff(huff);
if (len == 16 && (!dng_version || dng_version >= 0x1010000))
return -32768;
diff = getbits(len);
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
return diff;
}
ushort * CLASS ljpeg_row (int jrow, struct jhead *jh)
{
int col, c, diff, pred, spred=0;
ushort mark=0, *row[3];
if (jrow * jh->wide % jh->restart == 0) {
FORC(6) jh->vpred[c] = 1 << (jh->bits-1);
if (jrow) {
fseek (ifp, -2, SEEK_CUR);
do mark = (mark << 8) + (c = fgetc(ifp));
while (c != EOF && mark >> 4 != 0xffd);
}
getbits(-1);
}
FORC3 row[c] = jh->row + jh->wide*jh->clrs*((jrow+c) & 1);
for (col=0; col < jh->wide; col++)
FORC(jh->clrs) {
diff = ljpeg_diff (jh->huff[c]);
if (jh->sraw && c <= jh->sraw && (col | c))
pred = spred;
else if (col) pred = row[0][-jh->clrs];
else pred = (jh->vpred[c] += diff) - diff;
if (jrow && col) switch (jh->psv) {
case 1: break;
case 2: pred = row[1][0]; break;
case 3: pred = row[1][-jh->clrs]; break;
case 4: pred = pred + row[1][0] - row[1][-jh->clrs]; break;
case 5: pred = pred + ((row[1][0] - row[1][-jh->clrs]) >> 1); break;
case 6: pred = row[1][0] + ((pred - row[1][-jh->clrs]) >> 1); break;
case 7: pred = (pred + row[1][0]) >> 1; break;
default: pred = 0;
}
if ((**row = pred + diff) >> jh->bits) derror();
if (c <= jh->sraw) spred = **row;
row[0]++; row[1]++;
}
return row[2];
}
void CLASS lossless_jpeg_load_raw()
{
int jwide, jrow, jcol, val, jidx, i, j, row=0, col=0;
struct jhead jh;
ushort *rp;
if (!ljpeg_start (&jh, 0)) return;
if(jh.wide<1 || jh.high<1 || jh.clrs<1 || jh.bits <1)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 2);
#endif
jwide = jh.wide * jh.clrs;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (jrow=0; jrow < jh.high; jrow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
rp = ljpeg_row (jrow, &jh);
if (load_flags & 1)
row = jrow & 1 ? height-1-jrow/2 : jrow/2;
for (jcol=0; jcol < jwide; jcol++) {
val = curve[*rp++];
if (cr2_slice[0]) {
jidx = jrow*jwide + jcol;
i = jidx / (cr2_slice[1]*jh.high);
if ((j = i >= cr2_slice[0]))
i = cr2_slice[0];
jidx -= i * (cr2_slice[1]*jh.high);
row = jidx / cr2_slice[1+j];
col = jidx % cr2_slice[1+j] + i*cr2_slice[1];
}
if (raw_width == 3984 && (col -= 2) < 0)
col += (row--,raw_width);
if(row>raw_height)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 3);
#endif
if ((unsigned) row < raw_height) RAW(row,col) = val;
if (++col >= raw_width)
col = (row++,0);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw;
}
#endif
ljpeg_end (&jh);
}
void CLASS canon_sraw_load_raw()
{
struct jhead jh;
short *rp=0, (*ip)[4];
int jwide, slice, scol, ecol, row, col, jrow=0, jcol=0, pix[3], c;
int v[3]={0,0,0}, ver, hue;
char *cp;
if (!ljpeg_start (&jh, 0) || jh.clrs < 4) return;
jwide = (jh.wide >>= 1) * jh.clrs;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (ecol=slice=0; slice <= cr2_slice[0]; slice++) {
scol = ecol;
ecol += cr2_slice[1] * 2 / jh.clrs;
if (!cr2_slice[0] || ecol > raw_width-1) ecol = raw_width & -2;
for (row=0; row < height; row += (jh.clrs >> 1) - 1) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
ip = (short (*)[4]) image + row*width;
for (col=scol; col < ecol; col+=2, jcol+=jh.clrs) {
if ((jcol %= jwide) == 0)
rp = (short *) ljpeg_row (jrow++, &jh);
if (col >= width) continue;
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.sraw_ycc>=2)
{
FORC (jh.clrs-2)
{
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col + (c >> 1)*width + (c & 1)][1] = ip[col + (c >> 1)*width + (c & 1)][2] = 8192;
}
ip[col][1] = rp[jcol+jh.clrs-2] - 8192;
ip[col][2] = rp[jcol+jh.clrs-1] - 8192;
}
else if(imgdata.params.sraw_ycc)
{
FORC (jh.clrs-2)
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col][1] = rp[jcol+jh.clrs-2] - 8192;
ip[col][2] = rp[jcol+jh.clrs-1] - 8192;
}
else
#endif
{
FORC (jh.clrs-2)
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col][1] = rp[jcol+jh.clrs-2] - 16384;
ip[col][2] = rp[jcol+jh.clrs-1] - 16384;
}
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.sraw_ycc>=2)
{
ljpeg_end (&jh);
maximum = 0x3fff;
return;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (cp=model2; *cp && !isdigit(*cp); cp++);
sscanf (cp, "%d.%d.%d", v, v+1, v+2);
ver = (v[0]*1000 + v[1])*1000 + v[2];
hue = (jh.sraw+1) << 2;
if (unique_id >= 0x80000281 || (unique_id == 0x80000218 && ver > 1000006))
hue = jh.sraw << 1;
ip = (short (*)[4]) image;
rp = ip[0];
for (row=0; row < height; row++, ip+=width) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (row & (jh.sraw >> 1))
for (col=0; col < width; col+=2)
for (c=1; c < 3; c++)
if (row == height-1)
ip[col][c] = ip[col-width][c];
else ip[col][c] = (ip[col-width][c] + ip[col+width][c] + 1) >> 1;
for (col=1; col < width; col+=2)
for (c=1; c < 3; c++)
if (col == width-1)
ip[col][c] = ip[col-1][c];
else ip[col][c] = (ip[col-1][c] + ip[col+1][c] + 1) >> 1;
}
#ifdef LIBRAW_LIBRARY_BUILD
if(!imgdata.params.sraw_ycc)
#endif
for ( ; rp < ip[0]; rp+=4) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (unique_id == 0x80000218 ||
unique_id == 0x80000250 ||
unique_id == 0x80000261 ||
unique_id == 0x80000281 ||
unique_id == 0x80000287) {
rp[1] = (rp[1] << 2) + hue;
rp[2] = (rp[2] << 2) + hue;
pix[0] = rp[0] + (( 50*rp[1] + 22929*rp[2]) >> 14);
pix[1] = rp[0] + ((-5640*rp[1] - 11751*rp[2]) >> 14);
pix[2] = rp[0] + ((29040*rp[1] - 101*rp[2]) >> 14);
} else {
if (unique_id < 0x80000218) rp[0] -= 512;
pix[0] = rp[0] + rp[2];
pix[2] = rp[0] + rp[1];
pix[1] = rp[0] + ((-778*rp[1] - (rp[2] << 11)) >> 12);
}
FORC3 rp[c] = CLIP(pix[c] * sraw_mul[c] >> 10);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
#endif
ljpeg_end (&jh);
maximum = 0x3fff;
}
void CLASS adobe_copy_pixel (unsigned row, unsigned col, ushort **rp)
{
int c;
if (is_raw == 2 && shot_select) (*rp)++;
if (raw_image) {
if (row < raw_height && col < raw_width)
RAW(row,col) = curve[**rp];
*rp += is_raw;
} else {
if (row < height && col < width)
FORC(tiff_samples)
image[row*width+col][c] = curve[(*rp)[c]];
*rp += tiff_samples;
}
if (is_raw == 2 && shot_select) (*rp)--;
}
void CLASS lossless_dng_load_raw()
{
unsigned save, trow=0, tcol=0, jwide, jrow, jcol, row, col;
struct jhead jh;
ushort *rp;
while (trow < raw_height) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
save = ftell(ifp);
if (tile_length < INT_MAX)
fseek (ifp, get4(), SEEK_SET);
if (!ljpeg_start (&jh, 0)) break;
jwide = jh.wide;
if (filters) jwide *= jh.clrs;
jwide /= is_raw;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=col=jrow=0; jrow < jh.high; jrow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
rp = ljpeg_row (jrow, &jh);
for (jcol=0; jcol < jwide; jcol++) {
adobe_copy_pixel (trow+row, tcol+col, &rp);
if (++col >= tile_width || col >= raw_width)
row += 1 + (col = 0);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
#endif
fseek (ifp, save+4, SEEK_SET);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
ljpeg_end (&jh);
}
}
void CLASS packed_dng_load_raw()
{
ushort *pixel, *rp;
int row, col;
pixel = (ushort *) calloc (raw_width, tiff_samples*sizeof *pixel);
merror (pixel, "packed_dng_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (tiff_bps == 16)
read_shorts (pixel, raw_width * tiff_samples);
else {
getbits(-1);
for (col=0; col < raw_width * tiff_samples; col++)
pixel[col] = getbits(tiff_bps);
}
for (rp=pixel, col=0; col < raw_width; col++)
adobe_copy_pixel (row, col, &rp);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
free (pixel);
throw ;
}
#endif
free (pixel);
}
void CLASS pentax_load_raw()
{
ushort bit[2][15], huff[4097];
int dep, row, col, diff, c, i;
ushort vpred[2][2] = {{0,0},{0,0}}, hpred[2];
fseek (ifp, meta_offset, SEEK_SET);
dep = (get2() + 12) & 15;
fseek (ifp, 12, SEEK_CUR);
FORC(dep) bit[0][c] = get2();
FORC(dep) bit[1][c] = fgetc(ifp);
FORC(dep)
for (i=bit[0][c]; i <= ((bit[0][c]+(4096 >> bit[1][c])-1) & 4095); )
huff[++i] = bit[1][c] << 8 | c;
huff[0] = 12;
fseek (ifp, data_offset, SEEK_SET);
getbits(-1);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
diff = ljpeg_diff (huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
RAW(row,col) = hpred[col & 1];
if (hpred[col & 1] >> tiff_bps) derror();
}
}
}
void CLASS nikon_load_raw()
{
static const uchar nikon_tree[][32] = {
{ 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy */
5,4,3,6,2,7,1,0,8,9,11,10,12 },
{ 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy after split */
0x39,0x5a,0x38,0x27,0x16,5,4,3,2,1,0,11,12,12 },
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, /* 12-bit lossless */
5,4,6,3,7,2,8,1,9,0,10,11,12 },
{ 0,1,4,3,1,1,1,1,1,2,0,0,0,0,0,0, /* 14-bit lossy */
5,6,4,7,8,3,9,2,1,0,10,11,12,13,14 },
{ 0,1,5,1,1,1,1,1,1,1,2,0,0,0,0,0, /* 14-bit lossy after split */
8,0x5c,0x4b,0x3a,0x29,7,6,5,4,3,2,1,0,13,14 },
{ 0,1,4,2,2,3,1,2,0,0,0,0,0,0,0,0, /* 14-bit lossless */
7,6,8,5,9,4,10,3,11,12,2,0,1,13,14 } };
ushort *huff, ver0, ver1, vpred[2][2], hpred[2], csize;
int i, min, max, step=0, tree=0, split=0, row, col, len, shl, diff;
fseek (ifp, meta_offset, SEEK_SET);
ver0 = fgetc(ifp);
ver1 = fgetc(ifp);
if (ver0 == 0x49 || ver1 == 0x58)
fseek (ifp, 2110, SEEK_CUR);
if (ver0 == 0x46) tree = 2;
if (tiff_bps == 14) tree += 3;
read_shorts (vpred[0], 4);
max = 1 << tiff_bps & 0x7fff;
if ((csize = get2()) > 1)
step = max / (csize-1);
if (ver0 == 0x44 && ver1 == 0x20 && step > 0) {
for (i=0; i < csize; i++)
curve[i*step] = get2();
for (i=0; i < max; i++)
curve[i] = ( curve[i-i%step]*(step-i%step) +
curve[i-i%step+step]*(i%step) ) / step;
fseek (ifp, meta_offset+562, SEEK_SET);
split = get2();
} else if (ver0 != 0x46 && csize <= 0x4001)
read_shorts (curve, max=csize);
while (curve[max-2] == curve[max-1]) max--;
huff = make_decoder (nikon_tree[tree]);
fseek (ifp, data_offset, SEEK_SET);
getbits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (min=row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (split && row == split) {
free (huff);
huff = make_decoder (nikon_tree[tree+1]);
max += (min = 16) << 1;
}
for (col=0; col < raw_width; col++) {
i = gethuff(huff);
len = i & 15;
shl = i >> 4;
diff = ((getbits(len-shl) << 1) + 1) << shl >> 1;
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - !shl;
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
if ((ushort)(hpred[col & 1] + min) >= max) derror();
RAW(row,col) = curve[LIM((short)hpred[col & 1],0,0x3fff)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
free (huff);
throw;
}
#endif
free (huff);
}
/*
Returns 1 for a Coolpix 995, 0 for anything else.
*/
int CLASS nikon_e995()
{
int i, histo[256];
const uchar often[] = { 0x00, 0x55, 0xaa, 0xff };
memset (histo, 0, sizeof histo);
fseek (ifp, -2000, SEEK_END);
for (i=0; i < 2000; i++)
histo[fgetc(ifp)]++;
for (i=0; i < 4; i++)
if (histo[often[i]] < 200)
return 0;
return 1;
}
/*
Returns 1 for a Coolpix 2100, 0 for anything else.
*/
int CLASS nikon_e2100()
{
uchar t[12];
int i;
fseek (ifp, 0, SEEK_SET);
for (i=0; i < 1024; i++) {
fread (t, 1, 12, ifp);
if (((t[2] & t[4] & t[7] & t[9]) >> 4
& t[1] & t[6] & t[8] & t[11] & 3) != 3)
return 0;
}
return 1;
}
void CLASS nikon_3700()
{
int bits, i;
uchar dp[24];
static const struct {
int bits;
char t_make[12], t_model[15];
} table[] = {
{ 0x00, "Pentax", "Optio 33WR" },
{ 0x03, "Nikon", "E3200" },
{ 0x32, "Nikon", "E3700" },
{ 0x33, "Olympus", "C740UZ" } };
fseek (ifp, 3072, SEEK_SET);
fread (dp, 1, 24, ifp);
bits = (dp[8] & 3) << 4 | (dp[20] & 3);
for (i=0; i < sizeof table / sizeof *table; i++)
if (bits == table[i].bits) {
strcpy (make, table[i].t_make );
strcpy (model, table[i].t_model);
}
}
/*
Separates a Minolta DiMAGE Z2 from a Nikon E4300.
*/
int CLASS minolta_z2()
{
int i, nz;
char tail[424];
fseek (ifp, -sizeof tail, SEEK_END);
fread (tail, 1, sizeof tail, ifp);
for (nz=i=0; i < sizeof tail; i++)
if (tail[i]) nz++;
return nz > 20;
}
#line 1436 "dcraw/dcraw.c"
void CLASS ppm_thumb()
{
char *thumb;
thumb_length = thumb_width*thumb_height*3;
thumb = (char *) malloc (thumb_length);
merror (thumb, "ppm_thumb()");
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
fread (thumb, 1, thumb_length, ifp);
fwrite (thumb, 1, thumb_length, ofp);
free (thumb);
}
void CLASS ppm16_thumb()
{
int i;
char *thumb;
thumb_length = thumb_width*thumb_height*3;
thumb = (char *) calloc (thumb_length, 2);
merror (thumb, "ppm16_thumb()");
read_shorts ((ushort *) thumb, thumb_length);
for (i=0; i < thumb_length; i++)
thumb[i] = ((ushort *) thumb)[i] >> 8;
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
fwrite (thumb, 1, thumb_length, ofp);
free (thumb);
}
void CLASS layer_thumb()
{
int i, c;
char *thumb, map[][4] = { "012","102" };
colors = thumb_misc >> 5 & 7;
thumb_length = thumb_width*thumb_height;
thumb = (char *) calloc (colors, thumb_length);
merror (thumb, "layer_thumb()");
fprintf (ofp, "P%d\n%d %d\n255\n",
5 + (colors >> 1), thumb_width, thumb_height);
fread (thumb, thumb_length, colors, ifp);
for (i=0; i < thumb_length; i++)
FORCC putc (thumb[i+thumb_length*(map[thumb_misc >> 8][c]-'0')], ofp);
free (thumb);
}
void CLASS rollei_thumb()
{
unsigned i;
ushort *thumb;
thumb_length = thumb_width * thumb_height;
thumb = (ushort *) calloc (thumb_length, 2);
merror (thumb, "rollei_thumb()");
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
read_shorts (thumb, thumb_length);
for (i=0; i < thumb_length; i++) {
putc (thumb[i] << 3, ofp);
putc (thumb[i] >> 5 << 2, ofp);
putc (thumb[i] >> 11 << 3, ofp);
}
free (thumb);
}
void CLASS rollei_load_raw()
{
uchar pixel[10];
unsigned iten=0, isix, i, buffer=0, todo[16];
isix = raw_width * raw_height * 5 / 8;
while (fread (pixel, 1, 10, ifp) == 10) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (i=0; i < 10; i+=2) {
todo[i] = iten++;
todo[i+1] = pixel[i] << 8 | pixel[i+1];
buffer = pixel[i] >> 2 | buffer << 6;
}
for ( ; i < 16; i+=2) {
todo[i] = isix++;
todo[i+1] = buffer >> (14-i)*5;
}
for (i=0; i < 16; i+=2)
raw_image[todo[i]] = (todo[i+1] & 0x3ff);
}
maximum = 0x3ff;
}
int CLASS raw (unsigned row, unsigned col)
{
return (row < raw_height && col < raw_width) ? RAW(row,col) : 0;
}
void CLASS phase_one_flat_field (int is_float, int nc)
{
ushort head[8];
unsigned wide, y, x, c, rend, cend, row, col;
float *mrow, num, mult[4];
read_shorts (head, 8);
wide = head[2] / head[4];
mrow = (float *) calloc (nc*wide, sizeof *mrow);
merror (mrow, "phase_one_flat_field()");
for (y=0; y < head[3] / head[5]; y++) {
for (x=0; x < wide; x++)
for (c=0; c < nc; c+=2) {
num = is_float ? getreal(11) : get2()/32768.0;
if (y==0) mrow[c*wide+x] = num;
else mrow[(c+1)*wide+x] = (num - mrow[c*wide+x]) / head[5];
}
if (y==0) continue;
rend = head[1] + y*head[5];
for (row = rend-head[5]; row < raw_height && row < rend; row++) {
for (x=1; x < wide; x++) {
for (c=0; c < nc; c+=2) {
mult[c] = mrow[c*wide+x-1];
mult[c+1] = (mrow[c*wide+x] - mult[c]) / head[4];
}
cend = head[0] + x*head[4];
for (col = cend-head[4]; col < raw_width && col < cend; col++) {
c = nc > 2 ? FC(row-top_margin,col-left_margin) : 0;
if (!(c & 1)) {
c = RAW(row,col) * mult[c];
RAW(row,col) = LIM(c,0,65535);
}
for (c=0; c < nc; c+=2)
mult[c] += mult[c+1];
}
}
for (x=0; x < wide; x++)
for (c=0; c < nc; c+=2)
mrow[c*wide+x] += mrow[(c+1)*wide+x];
}
}
free (mrow);
}
void CLASS phase_one_correct()
{
unsigned entries, tag, data, save, col, row, type;
int len, i, j, k, cip, val[4], dev[4], sum, max;
int head[9], diff, mindiff=INT_MAX, off_412=0;
static const signed char dir[12][2] =
{ {-1,-1}, {-1,1}, {1,-1}, {1,1}, {-2,0}, {0,-2}, {0,2}, {2,0},
{-2,-2}, {-2,2}, {2,-2}, {2,2} };
float poly[8], num, cfrac, frac, mult[2], *yval[2];
ushort *xval[2];
if (half_size || !meta_length) return;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Phase One correction...\n"));
#endif
fseek (ifp, meta_offset, SEEK_SET);
order = get2();
fseek (ifp, 6, SEEK_CUR);
fseek (ifp, meta_offset+get4(), SEEK_SET);
entries = get4(); get4();
while (entries--) {
tag = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, meta_offset+data, SEEK_SET);
if (tag == 0x419) { /* Polynomial curve */
for (get4(), i=0; i < 8; i++)
poly[i] = getreal(11);
poly[3] += (ph1.tag_210 - poly[7]) * poly[6] + 1;
for (i=0; i < 0x10000; i++) {
num = (poly[5]*i + poly[3])*i + poly[1];
curve[i] = LIM(num,0,65535);
} goto apply; /* apply to right half */
} else if (tag == 0x41a) { /* Polynomial curve */
for (i=0; i < 4; i++)
poly[i] = getreal(11);
for (i=0; i < 0x10000; i++) {
for (num=0, j=4; j--; )
num = num * i + poly[j];
curve[i] = LIM(num+i,0,65535);
} apply: /* apply to whole image */
for (row=0; row < raw_height; row++)
for (col = (tag & 1)*ph1.split_col; col < raw_width; col++)
RAW(row,col) = curve[RAW(row,col)];
} else if (tag == 0x400) { /* Sensor defects */
while ((len -= 8) >= 0) {
col = get2();
row = get2();
type = get2(); get2();
if (col >= raw_width) continue;
if (type == 131) /* Bad column */
for (row=0; row < raw_height; row++)
if (FC(row-top_margin,col-left_margin) == 1) {
for (sum=i=0; i < 4; i++)
sum += val[i] = raw (row+dir[i][0], col+dir[i][1]);
for (max=i=0; i < 4; i++) {
dev[i] = abs((val[i] << 2) - sum);
if (dev[max] < dev[i]) max = i;
}
RAW(row,col) = (sum - val[max])/3.0 + 0.5;
} else {
for (sum=0, i=8; i < 12; i++)
sum += raw (row+dir[i][0], col+dir[i][1]);
RAW(row,col) = 0.5 + sum * 0.0732233 +
(raw(row,col-2) + raw(row,col+2)) * 0.3535534;
}
else if (type == 129) { /* Bad pixel */
if (row >= raw_height) continue;
j = (FC(row-top_margin,col-left_margin) != 1) * 4;
for (sum=0, i=j; i < j+8; i++)
sum += raw (row+dir[i][0], col+dir[i][1]);
RAW(row,col) = (sum + 4) >> 3;
}
}
} else if (tag == 0x401) { /* All-color flat fields */
phase_one_flat_field (1, 2);
} else if (tag == 0x416 || tag == 0x410) {
phase_one_flat_field (0, 2);
} else if (tag == 0x40b) { /* Red+blue flat field */
phase_one_flat_field (0, 4);
} else if (tag == 0x412) {
fseek (ifp, 36, SEEK_CUR);
diff = abs (get2() - ph1.tag_21a);
if (mindiff > diff) {
mindiff = diff;
off_412 = ftell(ifp) - 38;
}
}
fseek (ifp, save, SEEK_SET);
}
if (off_412) {
fseek (ifp, off_412, SEEK_SET);
for (i=0; i < 9; i++) head[i] = get4() & 0x7fff;
yval[0] = (float *) calloc (head[1]*head[3] + head[2]*head[4], 6);
merror (yval[0], "phase_one_correct()");
yval[1] = (float *) (yval[0] + head[1]*head[3]);
xval[0] = (ushort *) (yval[1] + head[2]*head[4]);
xval[1] = (ushort *) (xval[0] + head[1]*head[3]);
get2();
for (i=0; i < 2; i++)
for (j=0; j < head[i+1]*head[i+3]; j++)
yval[i][j] = getreal(11);
for (i=0; i < 2; i++)
for (j=0; j < head[i+1]*head[i+3]; j++)
xval[i][j] = get2();
for (row=0; row < raw_height; row++)
for (col=0; col < raw_width; col++) {
cfrac = (float) col * head[3] / raw_width;
cfrac -= cip = cfrac;
num = RAW(row,col) * 0.5;
for (i=cip; i < cip+2; i++) {
for (k=j=0; j < head[1]; j++)
if (num < xval[0][k = head[1]*i+j]) break;
frac = (j == 0 || j == head[1]) ? 0 :
(xval[0][k] - num) / (xval[0][k] - xval[0][k-1]);
mult[i-cip] = yval[0][k-1] * frac + yval[0][k] * (1-frac);
}
i = ((mult[0] * (1-cfrac) + mult[1] * cfrac) * row + num) * 2;
RAW(row,col) = LIM(i,0,65535);
}
free (yval[0]);
}
}
void CLASS phase_one_load_raw()
{
int a, b, i;
ushort akey, bkey, t_mask;
fseek (ifp, ph1.key_off, SEEK_SET);
akey = get2();
bkey = get2();
t_mask = ph1.format == 1 ? 0x5555:0x1354;
fseek (ifp, data_offset, SEEK_SET);
read_shorts (raw_image, raw_width*raw_height);
if (ph1.format)
for (i=0; i < raw_width*raw_height; i+=2) {
a = raw_image[i+0] ^ akey;
b = raw_image[i+1] ^ bkey;
raw_image[i+0] = (a & t_mask) | (b & ~t_mask);
raw_image[i+1] = (b & t_mask) | (a & ~t_mask);
}
}
unsigned CLASS ph1_bithuff (int nbits, ushort *huff)
{
#ifndef LIBRAW_NOTHREADS
#define bitbuf tls->ph1_bits.bitbuf
#define vbits tls->ph1_bits.vbits
#else
static UINT64 bitbuf=0;
static int vbits=0;
#endif
unsigned c;
if (nbits == -1)
return bitbuf = vbits = 0;
if (nbits == 0) return 0;
if (vbits < nbits) {
bitbuf = bitbuf << 32 | get4();
vbits += 32;
}
c = bitbuf << (64-vbits) >> (64-nbits);
if (huff) {
vbits -= huff[c] >> 8;
return (uchar) huff[c];
}
vbits -= nbits;
return c;
#ifndef LIBRAW_NOTHREADS
#undef bitbuf
#undef vbits
#endif
}
#define ph1_bits(n) ph1_bithuff(n,0)
#define ph1_huff(h) ph1_bithuff(*h,h+1)
void CLASS phase_one_load_raw_c()
{
static const int length[] = { 8,7,6,9,11,10,5,12,14,13 };
int *offset, len[2], pred[2], row, col, i, j;
ushort *pixel;
short (*t_black)[2];
pixel = (ushort *) calloc (raw_width + raw_height*4, 2);
merror (pixel, "phase_one_load_raw_c()");
offset = (int *) (pixel + raw_width);
fseek (ifp, strip_offset, SEEK_SET);
for (row=0; row < raw_height; row++)
offset[row] = get4();
t_black = (short (*)[2]) offset + raw_height;
fseek (ifp, ph1.black_off, SEEK_SET);
if (ph1.black_off)
{
read_shorts ((ushort *) t_black[0], raw_height*2);
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.rawdata.ph1_black = (short (*)[2])calloc(raw_height*2,sizeof(short));
merror (imgdata.rawdata.ph1_black, "phase_one_load_raw_c()");
memmove(imgdata.rawdata.ph1_black,(short *) t_black[0],raw_height*2*sizeof(short));
#endif
}
for (i=0; i < 256; i++)
curve[i] = i*i / 3.969 + 0.5;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, data_offset + offset[row], SEEK_SET);
ph1_bits(-1);
pred[0] = pred[1] = 0;
for (col=0; col < raw_width; col++) {
if (col >= (raw_width & -8))
len[0] = len[1] = 14;
else if ((col & 7) == 0)
for (i=0; i < 2; i++) {
for (j=0; j < 5 && !ph1_bits(1); j++);
if (j--) len[i] = length[j*2 + ph1_bits(1)];
}
if ((i = len[col & 1]) == 14)
pixel[col] = pred[col & 1] = ph1_bits(16);
else
pixel[col] = pred[col & 1] += ph1_bits(i) + 1 - (1 << (i - 1));
if (pred[col & 1] >> 16) derror();
if (ph1.format == 5 && pixel[col] < 256)
pixel[col] = curve[pixel[col]];
}
for (col=0; col < raw_width; col++) {
#ifndef LIBRAW_LIBRARY_BUILD
i = (pixel[col] << 2) - ph1.t_black + t_black[row][col >= ph1.split_col];
if (i > 0) RAW(row,col) = i;
#else
RAW(row,col) = pixel[col] << 2;
#endif
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = 0xfffc - ph1.t_black;
}
void CLASS hasselblad_load_raw()
{
struct jhead jh;
int row, col, pred[2], len[2], diff, c;
if (!ljpeg_start (&jh, 0)) return;
order = 0x4949;
ph1_bits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pred[0] = pred[1] = 0x8000 + load_flags;
for (col=0; col < raw_width; col+=2) {
FORC(2) len[c] = ph1_huff(jh.huff[0]);
FORC(2) {
diff = ph1_bits(len[c]);
if ((diff & (1 << (len[c]-1))) == 0)
diff -= (1 << len[c]) - 1;
if (diff == 65535) diff = -32768;
RAW(row,col+c) = pred[c] += diff;
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...){
ljpeg_end (&jh);
throw;
}
#endif
ljpeg_end (&jh);
maximum = 0xffff;
}
void CLASS leaf_hdr_load_raw()
{
ushort *pixel=0;
unsigned tile=0, r, c, row, col;
if (!filters) {
pixel = (ushort *) calloc (raw_width, sizeof *pixel);
merror (pixel, "leaf_hdr_load_raw()");
}
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
FORC(tiff_samples)
for (r=0; r < raw_height; r++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (r % tile_length == 0) {
fseek (ifp, data_offset + 4*tile++, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
}
if (filters && c != shot_select) continue;
if (filters) pixel = raw_image + r*raw_width;
read_shorts (pixel, raw_width);
if (!filters && (row = r - top_margin) < height)
for (col=0; col < width; col++)
image[row*width+col][c] = pixel[col+left_margin];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
if(!filters) free(pixel);
throw;
}
#endif
if (!filters) {
maximum = 0xffff;
raw_color = 1;
free (pixel);
}
}
void CLASS unpacked_load_raw()
{
int row, col, bits=0;
while (1 << ++bits < maximum);
read_shorts (raw_image, raw_width*raw_height);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++)
if ((RAW(row,col) >>= load_flags) >> bits
&& (unsigned) (row-top_margin) < height
&& (unsigned) (col-left_margin) < width) derror();
}
}
void CLASS sinar_4shot_load_raw()
{
ushort *pixel;
unsigned shot, row, col, r, c;
if ((shot = shot_select) || half_size) {
if (shot) shot--;
if (shot > 3) shot = 3;
fseek (ifp, data_offset + shot*4, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
unpacked_load_raw();
return;
}
#ifndef LIBRAW_LIBRARY_BUILD
free (raw_image);
raw_image = 0;
free (image);
image = (ushort (*)[4])
calloc ((iheight=height), (iwidth=width)*sizeof *image);
merror (image, "sinar_4shot_load_raw()");
#endif
pixel = (ushort *) calloc (raw_width, sizeof *pixel);
merror (pixel, "sinar_4shot_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (shot=0; shot < 4; shot++) {
fseek (ifp, data_offset + shot*4, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
read_shorts (pixel, raw_width);
if ((r = row-top_margin - (shot >> 1 & 1)) >= height) continue;
for (col=0; col < raw_width; col++) {
if ((c = col-left_margin - (shot & 1)) >= width) continue;
image[r*width+c][FC(row,col)] = pixel[col];
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
shrink = filters = 0;
}
void CLASS imacon_full_load_raw()
{
int row, col;
if (!image) return;
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++)
read_shorts (image[row*width+col], 3);
}
}
void CLASS packed_load_raw()
{
int vbits=0, bwide, rbits, bite, half, irow, row, col, val, i;
UINT64 bitbuf=0;
bwide = raw_width * tiff_bps / 8;
bwide += bwide & load_flags >> 7;
rbits = bwide * 8 - raw_width * tiff_bps;
if (load_flags & 1) bwide = bwide * 16 / 15;
bite = 8 + (load_flags & 24);
half = (raw_height+1) >> 1;
for (irow=0; irow < raw_height; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
row = irow;
if (load_flags & 2 &&
(row = irow % half * 2 + irow / half) == 1 &&
load_flags & 4) {
if (vbits=0, tiff_compress)
fseek (ifp, data_offset - (-half*bwide & -2048), SEEK_SET);
else {
fseek (ifp, 0, SEEK_END);
fseek (ifp, ftell(ifp) >> 3 << 2, SEEK_SET);
}
}
for (col=0; col < raw_width; col++) {
for (vbits -= tiff_bps; vbits < 0; vbits += bite) {
bitbuf <<= bite;
for (i=0; i < bite; i+=8)
bitbuf |= (unsigned) (fgetc(ifp) << i);
}
val = bitbuf << (64-tiff_bps-vbits) >> (64-tiff_bps);
RAW(row,col ^ (load_flags >> 6 & 1)) = val;
if (load_flags & 1 && (col % 10) == 9 &&
fgetc(ifp) && col < width+left_margin) derror();
}
vbits -= rbits;
}
}
void CLASS nokia_load_raw()
{
uchar *data, *dp;
int rev, dwide, row, col, c;
rev = 3 * (order == 0x4949);
dwide = (raw_width * 5 + 1) / 4;
data = (uchar *) malloc (dwide*2);
merror (data, "nokia_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (data+dwide, 1, dwide, ifp) < dwide) derror();
FORC(dwide) data[c] = data[dwide+(c ^ rev)];
for (dp=data, col=0; col < raw_width; dp+=5, col+=4)
FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...){
free (data);
throw;
}
#endif
free (data);
maximum = 0x3ff;
}
void CLASS canon_rmf_load_raw()
{
int row, col, bits, orow, ocol, c;
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width-2; col+=3) {
bits = get4();
FORC3 {
orow = row;
if ((ocol = col+c-4) < 0) {
ocol += raw_width;
if ((orow -= 2) < 0)
orow += raw_height;
}
RAW(orow,ocol) = bits >> (10*c+2) & 0x3ff;
}
}
}
maximum = 0x3ff;
}
unsigned CLASS pana_bits (int nbits)
{
#ifndef LIBRAW_NOTHREADS
#define buf tls->pana_bits.buf
#define vbits tls->pana_bits.vbits
#else
static uchar buf[0x4000];
static int vbits;
#endif
int byte;
if (!nbits) return vbits=0;
if (!vbits) {
fread (buf+load_flags, 1, 0x4000-load_flags, ifp);
fread (buf, 1, load_flags, ifp);
}
vbits = (vbits - nbits) & 0x1ffff;
byte = vbits >> 3 ^ 0x3ff0;
return (buf[byte] | buf[byte+1] << 8) >> (vbits & 7) & ~((~0u) << nbits);
#ifndef LIBRAW_NOTHREADS
#undef buf
#undef vbits
#endif
}
void CLASS panasonic_load_raw()
{
int row, col, i, j, sh=0, pred[2], nonz[2];
pana_bits(0);
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
if ((i = col % 14) == 0)
pred[0] = pred[1] = nonz[0] = nonz[1] = 0;
if (i % 3 == 2) sh = 4 >> (3 - pana_bits(2));
if (nonz[i & 1]) {
if ((j = pana_bits(8))) {
if ((pred[i & 1] -= 0x80 << sh) < 0 || sh == 4)
pred[i & 1] &= ~((~0u) << sh);
pred[i & 1] += j << sh;
}
} else if ((nonz[i & 1] = pana_bits(8)) || i > 11)
pred[i & 1] = nonz[i & 1] << 4 | pana_bits(4);
if ((RAW(row,col) = pred[col & 1]) > 4098 && col < width) derror();
}
}
}
void CLASS olympus_load_raw()
{
ushort huff[4096];
int row, col, nbits, sign, low, high, i, c, w, n, nw;
int acarry[2][3], *carry, pred, diff;
huff[n=0] = 0xc0c;
for (i=12; i--; )
FORC(2048 >> i) huff[++n] = (i+1) << 8 | i;
fseek (ifp, 7, SEEK_CUR);
getbits(-1);
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
memset (acarry, 0, sizeof acarry);
for (col=0; col < raw_width; col++) {
carry = acarry[col & 1];
i = 2 * (carry[2] < 3);
for (nbits=2+i; (ushort) carry[0] >> (nbits+i); nbits++);
low = (sign = getbits(3)) & 3;
sign = sign << 29 >> 31;
if ((high = getbithuff(12,huff)) == 12)
high = getbits(16-nbits) >> 1;
carry[0] = (high << nbits) | getbits(nbits);
diff = (carry[0] ^ sign) + carry[1];
carry[1] = (diff*3 + carry[1]) >> 5;
carry[2] = carry[0] > 16 ? 0 : carry[2]+1;
if (col >= width) continue;
if (row < 2 && col < 2) pred = 0;
else if (row < 2) pred = RAW(row,col-2);
else if (col < 2) pred = RAW(row-2,col);
else {
w = RAW(row,col-2);
n = RAW(row-2,col);
nw = RAW(row-2,col-2);
if ((w < nw && nw < n) || (n < nw && nw < w)) {
if (ABS(w-nw) > 32 || ABS(n-nw) > 32)
pred = w + n - nw;
else pred = (w + n) >> 1;
} else pred = ABS(w-nw) > ABS(n-nw) ? w : n;
}
if ((RAW(row,col) = pred + ((diff << 2) | low)) >> 12) derror();
}
}
}
void CLASS minolta_rd175_load_raw()
{
uchar pixel[768];
unsigned irow, box, row, col;
for (irow=0; irow < 1481; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, 768, ifp) < 768) derror();
box = irow / 82;
row = irow % 82 * 12 + ((box < 12) ? box | 1 : (box-12)*2);
switch (irow) {
case 1477: case 1479: continue;
case 1476: row = 984; break;
case 1480: row = 985; break;
case 1478: row = 985; box = 1;
}
if ((box < 12) && (box & 1)) {
for (col=0; col < 1533; col++, row ^= 1)
if (col != 1) RAW(row,col) = (col+1) & 2 ?
pixel[col/2-1] + pixel[col/2+1] : pixel[col/2] << 1;
RAW(row,1) = pixel[1] << 1;
RAW(row,1533) = pixel[765] << 1;
} else
for (col=row & 1; col < 1534; col+=2)
RAW(row,col) = pixel[col/2] << 1;
}
maximum = 0xff << 1;
}
void CLASS quicktake_100_load_raw()
{
uchar pixel[484][644];
static const short gstep[16] =
{ -89,-60,-44,-32,-22,-15,-8,-2,2,8,15,22,32,44,60,89 };
static const short rstep[6][4] =
{ { -3,-1,1,3 }, { -5,-1,1,5 }, { -8,-2,2,8 },
{ -13,-3,3,13 }, { -19,-4,4,19 }, { -28,-6,6,28 } };
static const short t_curve[256] =
{ 0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,
28,29,30,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,53,
54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,74,75,76,77,78,
79,80,81,82,83,84,86,88,90,92,94,97,99,101,103,105,107,110,112,114,116,
118,120,123,125,127,129,131,134,136,138,140,142,144,147,149,151,153,155,
158,160,162,164,166,168,171,173,175,177,179,181,184,186,188,190,192,195,
197,199,201,203,205,208,210,212,214,216,218,221,223,226,230,235,239,244,
248,252,257,261,265,270,274,278,283,287,291,296,300,305,309,313,318,322,
326,331,335,339,344,348,352,357,361,365,370,374,379,383,387,392,396,400,
405,409,413,418,422,426,431,435,440,444,448,453,457,461,466,470,474,479,
483,487,492,496,500,508,519,531,542,553,564,575,587,598,609,620,631,643,
654,665,676,687,698,710,721,732,743,754,766,777,788,799,810,822,833,844,
855,866,878,889,900,911,922,933,945,956,967,978,989,1001,1012,1023 };
int rb, row, col, sharp, val=0;
getbits(-1);
memset (pixel, 0x80, sizeof pixel);
for (row=2; row < height+2; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=2+(row & 1); col < width+2; col+=2) {
val = ((pixel[row-1][col-1] + 2*pixel[row-1][col+1] +
pixel[row][col-2]) >> 2) + gstep[getbits(4)];
pixel[row][col] = val = LIM(val,0,255);
if (col < 4)
pixel[row][col-2] = pixel[row+1][~row & 1] = val;
if (row == 2)
pixel[row-1][col+1] = pixel[row-1][col+3] = val;
}
pixel[row][col] = val;
}
for (rb=0; rb < 2; rb++)
for (row=2+rb; row < height+2; row+=2)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=3-(row & 1); col < width+2; col+=2) {
if (row < 4 || col < 4) sharp = 2;
else {
val = ABS(pixel[row-2][col] - pixel[row][col-2])
+ ABS(pixel[row-2][col] - pixel[row-2][col-2])
+ ABS(pixel[row][col-2] - pixel[row-2][col-2]);
sharp = val < 4 ? 0 : val < 8 ? 1 : val < 16 ? 2 :
val < 32 ? 3 : val < 48 ? 4 : 5;
}
val = ((pixel[row-2][col] + pixel[row][col-2]) >> 1)
+ rstep[sharp][getbits(2)];
pixel[row][col] = val = LIM(val,0,255);
if (row < 4) pixel[row-2][col+2] = val;
if (col < 4) pixel[row+2][col-2] = val;
}
}
for (row=2; row < height+2; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=3-(row & 1); col < width+2; col+=2) {
val = ((pixel[row][col-1] + (pixel[row][col] << 2) +
pixel[row][col+1]) >> 1) - 0x100;
pixel[row][col] = LIM(val,0,255);
}
}
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++)
RAW(row,col) = t_curve[pixel[row+2][col+2]];
}
maximum = 0x3ff;
}
#define radc_token(tree) ((signed char) getbithuff(8,huff[tree]))
#define FORYX for (y=1; y < 3; y++) for (x=col+1; x >= col; x--)
#define PREDICTOR (c ? (buf[c][y-1][x] + buf[c][y][x+1]) / 2 \
: (buf[c][y-1][x+1] + 2*buf[c][y-1][x] + buf[c][y][x+1]) / 4)
#ifdef __GNUC__
# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
# pragma GCC optimize("no-aggressive-loop-optimizations")
# endif
#endif
void CLASS kodak_radc_load_raw()
{
static const char src[] = {
1,1, 2,3, 3,4, 4,2, 5,7, 6,5, 7,6, 7,8,
1,0, 2,1, 3,3, 4,4, 5,2, 6,7, 7,6, 8,5, 8,8,
2,1, 2,3, 3,0, 3,2, 3,4, 4,6, 5,5, 6,7, 6,8,
2,0, 2,1, 2,3, 3,2, 4,4, 5,6, 6,7, 7,5, 7,8,
2,1, 2,4, 3,0, 3,2, 3,3, 4,7, 5,5, 6,6, 6,8,
2,3, 3,1, 3,2, 3,4, 3,5, 3,6, 4,7, 5,0, 5,8,
2,3, 2,6, 3,0, 3,1, 4,4, 4,5, 4,7, 5,2, 5,8,
2,4, 2,7, 3,3, 3,6, 4,1, 4,2, 4,5, 5,0, 5,8,
2,6, 3,1, 3,3, 3,5, 3,7, 3,8, 4,0, 5,2, 5,4,
2,0, 2,1, 3,2, 3,3, 4,4, 4,5, 5,6, 5,7, 4,8,
1,0, 2,2, 2,-2,
1,-3, 1,3,
2,-17, 2,-5, 2,5, 2,17,
2,-7, 2,2, 2,9, 2,18,
2,-18, 2,-9, 2,-2, 2,7,
2,-28, 2,28, 3,-49, 3,-9, 3,9, 4,49, 5,-79, 5,79,
2,-1, 2,13, 2,26, 3,39, 4,-16, 5,55, 6,-37, 6,76,
2,-26, 2,-13, 2,1, 3,-39, 4,16, 5,-55, 6,-76, 6,37
};
ushort huff[19][256];
int row, col, tree, nreps, rep, step, i, c, s, r, x, y, val;
short last[3] = { 16,16,16 }, mul[3], buf[3][3][386];
static const ushort pt[] =
{ 0,0, 1280,1344, 2320,3616, 3328,8000, 4095,16383, 65535,16383 };
for (i=2; i < 12; i+=2)
for (c=pt[i-2]; c <= pt[i]; c++)
curve[c] = (float)
(c-pt[i-2]) / (pt[i]-pt[i-2]) * (pt[i+1]-pt[i-1]) + pt[i-1] + 0.5;
for (s=i=0; i < sizeof src; i+=2)
FORC(256 >> src[i])
huff[0][s++] = src[i] << 8 | (uchar) src[i+1];
s = kodak_cbpp == 243 ? 2 : 3;
FORC(256) huff[18][c] = (8-s) << 8 | c >> s << s | 1 << (s-1);
getbits(-1);
for (i=0; i < sizeof(buf)/sizeof(short); i++)
buf[0][0][i] = 2048;
for (row=0; row < height; row+=4) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
FORC3 mul[c] = getbits(6);
FORC3 {
val = ((0x1000000/last[c] + 0x7ff) >> 12) * mul[c];
s = val > 65564 ? 10:12;
x = ~((~0u) << (s-1));
val <<= 12-s;
for (i=0; i < sizeof(buf[0])/sizeof(short); i++)
buf[c][0][i] = (buf[c][0][i] * val + x) >> s;
last[c] = mul[c];
for (r=0; r <= !c; r++) {
buf[c][1][width/2] = buf[c][2][width/2] = mul[c] << 7;
for (tree=1, col=width/2; col > 0; ) {
if ((tree = radc_token(tree))) {
col -= 2;
if (tree == 8)
FORYX buf[c][y][x] = (uchar) radc_token(18) * mul[c];
else
FORYX buf[c][y][x] = radc_token(tree+10) * 16 + PREDICTOR;
} else
do {
nreps = (col > 2) ? radc_token(9) + 1 : 1;
for (rep=0; rep < 8 && rep < nreps && col > 0; rep++) {
col -= 2;
FORYX buf[c][y][x] = PREDICTOR;
if (rep & 1) {
step = radc_token(10) << 4;
FORYX buf[c][y][x] += step;
}
}
} while (nreps == 9);
}
for (y=0; y < 2; y++)
for (x=0; x < width/2; x++) {
val = (buf[c][y+1][x] << 4) / mul[c];
if (val < 0) val = 0;
if (c) RAW(row+y*2+c-1,x*2+2-c) = val;
else RAW(row+r*2+y,x*2+y) = val;
}
memcpy (buf[c][0]+!c, buf[c][2], sizeof buf[c][0]-2*!c);
}
}
for (y=row; y < row+4; y++)
for (x=0; x < width; x++)
if ((x+y) & 1) {
r = x ? x-1 : x+1;
s = x+1 < width ? x+1 : x-1;
val = (RAW(y,x)-2048)*2 + (RAW(y,r)+RAW(y,s))/2;
if (val < 0) val = 0;
RAW(y,x) = val;
}
}
for (i=0; i < height*width; i++)
raw_image[i] = curve[raw_image[i]];
maximum = 0x3fff;
}
#undef FORYX
#undef PREDICTOR
#ifdef NO_JPEG
void CLASS kodak_jpeg_load_raw() {}
void CLASS lossy_dng_load_raw() {}
#else
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS kodak_jpeg_load_raw() {}
#else
METHODDEF(boolean)
fill_input_buffer (j_decompress_ptr cinfo)
{
#ifndef LIBRAW_NOTHREADS
#define jpeg_buffer tls->jpeg_buffer
#else
static uchar jpeg_buffer[4096];
#endif
size_t nbytes;
nbytes = fread (jpeg_buffer, 1, 4096, ifp);
swab (jpeg_buffer, jpeg_buffer, nbytes);
cinfo->src->next_input_byte = jpeg_buffer;
cinfo->src->bytes_in_buffer = nbytes;
return TRUE;
#ifndef LIBRAW_NOTHREADS
#undef jpeg_buffer
#endif
}
void CLASS kodak_jpeg_load_raw()
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE (*pixel)[3];
int row, col;
cinfo.err = jpeg_std_error (&jerr);
jpeg_create_decompress (&cinfo);
jpeg_stdio_src (&cinfo, ifp);
cinfo.src->fill_input_buffer = fill_input_buffer;
jpeg_read_header (&cinfo, TRUE);
jpeg_start_decompress (&cinfo);
if ((cinfo.output_width != width ) ||
(cinfo.output_height*2 != height ) ||
(cinfo.output_components != 3 )) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: incorrect JPEG dimensions\n"), ifname);
#endif
jpeg_destroy_decompress (&cinfo);
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_DECODE_JPEG;
#else
longjmp (failure, 3);
#endif
}
buf = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, width*3, 1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
while (cinfo.output_scanline < cinfo.output_height) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
row = cinfo.output_scanline * 2;
jpeg_read_scanlines (&cinfo, buf, 1);
pixel = (JSAMPLE (*)[3]) buf[0];
for (col=0; col < width; col+=2) {
RAW(row+0,col+0) = pixel[col+0][1] << 1;
RAW(row+1,col+1) = pixel[col+1][1] << 1;
RAW(row+0,col+1) = pixel[col][0] + pixel[col+1][0];
RAW(row+1,col+0) = pixel[col][2] + pixel[col+1][2];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
jpeg_finish_decompress (&cinfo);
jpeg_destroy_decompress (&cinfo);
throw;
}
#endif
jpeg_finish_decompress (&cinfo);
jpeg_destroy_decompress (&cinfo);
maximum = 0xff << 1;
}
#endif
void CLASS lossy_dng_load_raw()
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE (*pixel)[3];
unsigned sorder=order, ntags, opcode, deg, i, j, c;
unsigned save=data_offset-4, trow=0, tcol=0, row, col;
ushort t_curve[3][256];
double coeff[9], tot;
fseek (ifp, meta_offset, SEEK_SET);
order = 0x4d4d;
ntags = get4();
while (ntags--) {
opcode = get4(); get4(); get4();
if (opcode != 8)
{ fseek (ifp, get4(), SEEK_CUR); continue; }
fseek (ifp, 20, SEEK_CUR);
if ((c = get4()) > 2) break;
fseek (ifp, 12, SEEK_CUR);
if ((deg = get4()) > 8) break;
for (i=0; i <= deg && i < 9; i++)
coeff[i] = getreal(12);
for (i=0; i < 256; i++) {
for (tot=j=0; j <= deg; j++)
tot += coeff[j] * pow(i/255.0, (int)j);
t_curve[c][i] = tot*0xffff;
}
}
order = sorder;
cinfo.err = jpeg_std_error (&jerr);
jpeg_create_decompress (&cinfo);
while (trow < raw_height) {
fseek (ifp, save+=4, SEEK_SET);
if (tile_length < INT_MAX)
fseek (ifp, get4(), SEEK_SET);
#ifdef LIBRAW_LIBRARY_BUILD
if(libraw_internal_data.internal_data.input->jpeg_src(&cinfo) == -1)
{
jpeg_destroy_decompress(&cinfo);
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
#else
jpeg_stdio_src (&cinfo, ifp);
#endif
jpeg_read_header (&cinfo, TRUE);
jpeg_start_decompress (&cinfo);
buf = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width*3, 1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
while (cinfo.output_scanline < cinfo.output_height &&
(row = trow + cinfo.output_scanline) < height) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
jpeg_read_scanlines (&cinfo, buf, 1);
pixel = (JSAMPLE (*)[3]) buf[0];
for (col=0; col < cinfo.output_width && tcol+col < width; col++) {
FORC3 image[row*width+tcol+col][c] = t_curve[c][pixel[col][c]];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
jpeg_destroy_decompress (&cinfo);
throw;
}
#endif
jpeg_abort_decompress (&cinfo);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
}
jpeg_destroy_decompress (&cinfo);
maximum = 0xffff;
}
#endif
void CLASS kodak_dc120_load_raw()
{
static const int mul[4] = { 162, 192, 187, 92 };
static const int add[4] = { 0, 636, 424, 212 };
uchar pixel[848];
int row, shift, col;
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, 848, ifp) < 848) derror();
shift = row * mul[row & 3] + add[row & 3];
for (col=0; col < width; col++)
RAW(row,col) = (ushort) pixel[(col + shift) % 848];
}
maximum = 0xff;
}
void CLASS eight_bit_load_raw()
{
uchar *pixel;
unsigned row, col;
pixel = (uchar *) calloc (raw_width, sizeof *pixel);
merror (pixel, "eight_bit_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, raw_width, ifp) < raw_width) derror();
for (col=0; col < raw_width; col++)
RAW(row,col) = curve[pixel[col]];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_yrgb_load_raw()
{
uchar *pixel;
int row, col, y, cb, cr, rgb[3], c;
pixel = (uchar *) calloc (raw_width, 3*sizeof *pixel);
merror (pixel, "kodak_yrgb_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (~row & 1)
if (fread (pixel, raw_width, 3, ifp) < 3) derror();
for (col=0; col < raw_width; col++) {
y = pixel[width*2*(row & 1) + col];
cb = pixel[width + (col & -2)] - 128;
cr = pixel[width + (col & -2)+1] - 128;
rgb[1] = y-((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,255)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_262_load_raw()
{
static const uchar kodak_tree[2][26] =
{ { 0,1,5,1,1,2,0,0,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 },
{ 0,3,1,1,1,1,1,2,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 } };
ushort *huff[2];
uchar *pixel;
int *strip, ns, c, row, col, chess, pi=0, pi1, pi2, pred, val;
FORC(2) huff[c] = make_decoder (kodak_tree[c]);
ns = (raw_height+63) >> 5;
pixel = (uchar *) malloc (raw_width*32 + ns*4);
merror (pixel, "kodak_262_load_raw()");
strip = (int *) (pixel + raw_width*32);
order = 0x4d4d;
FORC(ns) strip[c] = get4();
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if ((row & 31) == 0) {
fseek (ifp, strip[row >> 5], SEEK_SET);
getbits(-1);
pi = 0;
}
for (col=0; col < raw_width; col++) {
chess = (row + col) & 1;
pi1 = chess ? pi-2 : pi-raw_width-1;
pi2 = chess ? pi-2*raw_width : pi-raw_width+1;
if (col <= chess) pi1 = -1;
if (pi1 < 0) pi1 = pi2;
if (pi2 < 0) pi2 = pi1;
if (pi1 < 0 && col > 1) pi1 = pi2 = pi-2;
pred = (pi1 < 0) ? 0 : (pixel[pi1] + pixel[pi2]) >> 1;
pixel[pi] = val = pred + ljpeg_diff (huff[chess]);
if (val >> 8) derror();
val = curve[pixel[pi++]];
RAW(row,col) = val;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
FORC(2) free (huff[c]);
}
int CLASS kodak_65000_decode (short *out, int bsize)
{
uchar c, blen[768];
ushort raw[6];
INT64 bitbuf=0;
int save, bits=0, i, j, len, diff;
save = ftell(ifp);
bsize = (bsize + 3) & -4;
for (i=0; i < bsize; i+=2) {
c = fgetc(ifp);
if ((blen[i ] = c & 15) > 12 ||
(blen[i+1] = c >> 4) > 12 ) {
fseek (ifp, save, SEEK_SET);
for (i=0; i < bsize; i+=8) {
read_shorts (raw, 6);
out[i ] = raw[0] >> 12 << 8 | raw[2] >> 12 << 4 | raw[4] >> 12;
out[i+1] = raw[1] >> 12 << 8 | raw[3] >> 12 << 4 | raw[5] >> 12;
for (j=0; j < 6; j++)
out[i+2+j] = raw[j] & 0xfff;
}
return 1;
}
}
if ((bsize & 7) == 4) {
bitbuf = fgetc(ifp) << 8;
bitbuf += fgetc(ifp);
bits = 16;
}
for (i=0; i < bsize; i++) {
len = blen[i];
if (bits < len) {
for (j=0; j < 32; j+=8)
bitbuf += (INT64) fgetc(ifp) << (bits+(j^8));
bits += 32;
}
diff = bitbuf & (0xffff >> (16-len));
bitbuf >>= len;
bits -= len;
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
out[i] = diff;
}
return 0;
}
void CLASS kodak_65000_load_raw()
{
short buf[256];
int row, col, len, pred[2], ret, i;
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=256) {
pred[0] = pred[1] = 0;
len = MIN (256, width-col);
ret = kodak_65000_decode (buf, len);
for (i=0; i < len; i++)
if ((RAW(row,col+i) = curve[ret ? buf[i] :
(pred[i & 1] += buf[i])]) >> 12) derror();
}
}
}
void CLASS kodak_ycbcr_load_raw()
{
short buf[384], *bp;
int row, col, len, c, i, j, k, y[2][2], cb, cr, rgb[3];
ushort *ip;
if (!image) return;
for (row=0; row < height; row+=2)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=128) {
len = MIN (128, width-col);
kodak_65000_decode (buf, len*3);
y[0][1] = y[1][1] = cb = cr = 0;
for (bp=buf, i=0; i < len; i+=2, bp+=2) {
cb += bp[4];
cr += bp[5];
rgb[1] = -((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
for (j=0; j < 2; j++)
for (k=0; k < 2; k++) {
if ((y[j][k] = y[j][k^1] + *bp++) >> 10) derror();
ip = image[(row+j)*width + col+i+k];
FORC3 ip[c] = curve[LIM(y[j][k]+rgb[c], 0, 0xfff)];
}
}
}
}
}
void CLASS kodak_rgb_load_raw()
{
short buf[768], *bp;
int row, col, len, c, i, rgb[3];
ushort *ip=image[0];
#ifndef LIBRAW_LIBRARY_BUILD
if (raw_image) free (raw_image);
raw_image = 0;
#endif
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=256) {
len = MIN (256, width-col);
kodak_65000_decode (buf, len*3);
memset (rgb, 0, sizeof rgb);
for (bp=buf, i=0; i < len; i++, ip+=4)
FORC3 if ((ip[c] = rgb[c] += *bp++) >> 12) derror();
}
}
}
void CLASS kodak_thumb_load_raw()
{
int row, col;
colors = thumb_misc >> 5;
for (row=0; row < height; row++)
for (col=0; col < width; col++)
read_shorts (image[row*width+col], colors);
maximum = (1 << (thumb_misc & 31)) - 1;
}
void CLASS sony_decrypt (unsigned *data, int len, int start, int key)
{
#ifndef LIBRAW_NOTHREADS
#define pad tls->sony_decrypt.pad
#define p tls->sony_decrypt.p
#else
static unsigned pad[128], p;
#endif
if (start) {
for (p=0; p < 4; p++)
pad[p] = key = key * 48828125 + 1;
pad[3] = pad[3] << 1 | (pad[0]^pad[2]) >> 31;
for (p=4; p < 127; p++)
pad[p] = (pad[p-4]^pad[p-2]) << 1 | (pad[p-3]^pad[p-1]) >> 31;
for (p=0; p < 127; p++)
pad[p] = htonl(pad[p]);
}
#if 1 // Avoid gcc 4.8 bug
while (len--)
{
*data++ ^= pad[p & 127] = pad[(p+1) & 127] ^ pad[(p+65) & 127];
p++;
}
#else
while (len--)
*data++ ^= pad[p++ & 127] = pad[(p+1) & 127] ^ pad[(p+65) & 127];
#endif
#ifndef LIBRAW_NOTHREADS
#undef pad
#undef p
#endif
}
void CLASS sony_load_raw()
{
uchar head[40];
ushort *pixel;
unsigned i, key, row, col;
fseek (ifp, 200896, SEEK_SET);
fseek (ifp, (unsigned) fgetc(ifp)*4 - 1, SEEK_CUR);
order = 0x4d4d;
key = get4();
fseek (ifp, 164600, SEEK_SET);
fread (head, 1, 40, ifp);
sony_decrypt ((unsigned int *) head, 10, 1, key);
for (i=26; i-- > 22; )
key = key << 8 | head[i];
fseek (ifp, data_offset, SEEK_SET);
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pixel = raw_image + row*raw_width;
if (fread (pixel, 2, raw_width, ifp) < raw_width) derror();
sony_decrypt ((unsigned int *) pixel, raw_width/2, !row, key);
for (col=0; col < raw_width; col++)
if ((pixel[col] = ntohs(pixel[col])) >> 14) derror();
}
maximum = 0x3ff0;
}
void CLASS sony_arw_load_raw()
{
ushort huff[32768];
static const ushort tab[18] =
{ 0xf11,0xf10,0xe0f,0xd0e,0xc0d,0xb0c,0xa0b,0x90a,0x809,
0x708,0x607,0x506,0x405,0x304,0x303,0x300,0x202,0x201 };
int i, c, n, col, row, len, diff, sum=0;
for (n=i=0; i < 18; i++)
FORC(32768 >> (tab[i] >> 8)) huff[n++] = tab[i];
getbits(-1);
for (col = raw_width; col--; )
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (row=0; row < raw_height+1; row+=2) {
if (row == raw_height) row = 1;
len = getbithuff(15,huff);
diff = getbits(len);
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
if ((sum += diff) >> 12) derror();
if (row < height) RAW(row,col) = sum;
}
}
}
void CLASS sony_arw2_load_raw()
{
uchar *data, *dp;
ushort pix[16];
int row, col, val, max, min, imax, imin, sh, bit, i;
data = (uchar *) malloc (raw_width);
merror (data, "sony_arw2_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fread (data, 1, raw_width, ifp);
for (dp=data, col=0; col < raw_width-30; dp+=16) {
max = 0x7ff & (val = sget4(dp));
min = 0x7ff & val >> 11;
imax = 0x0f & val >> 22;
imin = 0x0f & val >> 26;
for (sh=0; sh < 4 && 0x80 << sh <= max-min; sh++);
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = max;
else if (i == imin) pix[i] = min;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.sony_arw2_hack)
{
for (i=0; i < 16; i++, col+=2)
RAW(row,col) = curve[pix[i] << 1];
}
else
{
for (i=0; i < 16; i++, col+=2)
RAW(row,col) = curve[pix[i] << 1] >> 2;
}
#else
for (i=0; i < 16; i++, col+=2)
RAW(row,col) = curve[pix[i] << 1] >> 2;
#endif
col -= col & 1 ? 1:31;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (data);
throw;
}
#endif
free (data);
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.sony_arw2_hack)
{
black <<= 2;
maximum <<=2;
}
#endif
}
void CLASS samsung_load_raw()
{
int row, col, c, i, dir, op[4], len[4];
order = 0x4949;
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, strip_offset+row*4, SEEK_SET);
fseek (ifp, data_offset+get4(), SEEK_SET);
ph1_bits(-1);
FORC4 len[c] = row < 2 ? 7:4;
for (col=0; col < raw_width; col+=16) {
dir = ph1_bits(1);
FORC4 op[c] = ph1_bits(2);
FORC4 switch (op[c]) {
case 3: len[c] = ph1_bits(4); break;
case 2: len[c]--; break;
case 1: len[c]++;
}
for (c=0; c < 16; c+=2) {
i = len[((c & 1) << 1) | (c >> 3)];
RAW(row,col+c) = ((signed) ph1_bits(i) << (32-i) >> (32-i)) +
(dir ? RAW(row+(~c | -2),col+c) : col ? RAW(row,col+(c | -2)) : 128);
if (c == 14) c = -1;
}
}
}
}
#define HOLE(row) ((holes >> (((row) - raw_height) & 7)) & 1)
/* Kudos to Rich Taylor for figuring out SMaL's compression algorithm. */
void CLASS smal_decode_segment (unsigned seg[2][2], int holes)
{
uchar hist[3][13] = {
{ 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 },
{ 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 },
{ 3, 3, 0, 0, 63, 47, 31, 15, 0 } };
int low, high=0xff, carry=0, nbits=8;
int pix, s, count, bin, next, i, sym[3];
uchar diff, pred[]={0,0};
ushort data=0, range=0;
fseek (ifp, seg[0][1]+1, SEEK_SET);
getbits(-1);
for (pix=seg[0][0]; pix < seg[1][0]; pix++) {
for (s=0; s < 3; s++) {
data = data << nbits | getbits(nbits);
if (carry < 0)
carry = (nbits += carry+1) < 1 ? nbits-1 : 0;
while (--nbits >= 0)
if ((data >> nbits & 0xff) == 0xff) break;
if (nbits > 0)
data = ((data & ((1 << (nbits-1)) - 1)) << 1) |
((data + (((data & (1 << (nbits-1)))) << 1)) & ((~0u) << nbits));
if (nbits >= 0) {
data += getbits(1);
carry = nbits - 8;
}
count = ((((data-range+1) & 0xffff) << 2) - 1) / (high >> 4);
for (bin=0; hist[s][bin+5] > count; bin++);
low = hist[s][bin+5] * (high >> 4) >> 2;
if (bin) high = hist[s][bin+4] * (high >> 4) >> 2;
high -= low;
for (nbits=0; high << nbits < 128; nbits++);
range = (range+low) << nbits;
high <<= nbits;
next = hist[s][1];
if (++hist[s][2] > hist[s][3]) {
next = (next+1) & hist[s][0];
hist[s][3] = (hist[s][next+4] - hist[s][next+5]) >> 2;
hist[s][2] = 1;
}
if (hist[s][hist[s][1]+4] - hist[s][hist[s][1]+5] > 1) {
if (bin < hist[s][1])
for (i=bin; i < hist[s][1]; i++) hist[s][i+5]--;
else if (next <= bin)
for (i=hist[s][1]; i < bin; i++) hist[s][i+5]++;
}
hist[s][1] = next;
sym[s] = bin;
}
diff = sym[2] << 5 | sym[1] << 2 | (sym[0] & 3);
if (sym[0] & 4)
diff = diff ? -diff : 0x80;
if (ftell(ifp) + 12 >= seg[1][1])
diff = 0;
raw_image[pix] = pred[pix & 1] += diff;
if (!(pix & 1) && HOLE(pix / raw_width)) pix += 2;
}
maximum = 0xff;
}
void CLASS smal_v6_load_raw()
{
unsigned seg[2][2];
fseek (ifp, 16, SEEK_SET);
seg[0][0] = 0;
seg[0][1] = get2();
seg[1][0] = raw_width * raw_height;
seg[1][1] = INT_MAX;
smal_decode_segment (seg, 0);
}
int CLASS median4 (int *p)
{
int min, max, sum, i;
min = max = sum = p[0];
for (i=1; i < 4; i++) {
sum += p[i];
if (min > p[i]) min = p[i];
if (max < p[i]) max = p[i];
}
return (sum - min - max) >> 1;
}
void CLASS fill_holes (int holes)
{
int row, col, val[4];
for (row=2; row < height-2; row++) {
if (!HOLE(row)) continue;
for (col=1; col < width-1; col+=4) {
val[0] = RAW(row-1,col-1);
val[1] = RAW(row-1,col+1);
val[2] = RAW(row+1,col-1);
val[3] = RAW(row+1,col+1);
RAW(row,col) = median4(val);
}
for (col=2; col < width-2; col+=4)
if (HOLE(row-2) || HOLE(row+2))
RAW(row,col) = (RAW(row,col-2) + RAW(row,col+2)) >> 1;
else {
val[0] = RAW(row,col-2);
val[1] = RAW(row,col+2);
val[2] = RAW(row-2,col);
val[3] = RAW(row+2,col);
RAW(row,col) = median4(val);
}
}
}
void CLASS smal_v9_load_raw()
{
unsigned seg[256][2], offset, nseg, holes, i;
fseek (ifp, 67, SEEK_SET);
offset = get4();
nseg = fgetc(ifp);
fseek (ifp, offset, SEEK_SET);
for (i=0; i < nseg*2; i++)
seg[0][i] = get4() + data_offset*(i & 1);
fseek (ifp, 78, SEEK_SET);
holes = fgetc(ifp);
fseek (ifp, 88, SEEK_SET);
seg[nseg][0] = raw_height * raw_width;
seg[nseg][1] = get4() + data_offset;
for (i=0; i < nseg; i++)
smal_decode_segment (seg+i, holes);
if (holes) fill_holes (holes);
}
void CLASS redcine_load_raw()
{
#ifndef NO_JASPER
int c, row, col;
jas_stream_t *in;
jas_image_t *jimg;
jas_matrix_t *jmat;
jas_seqent_t *data;
ushort *img, *pix;
jas_init();
#ifndef LIBRAW_LIBRARY_BUILD
in = jas_stream_fopen (ifname, "rb");
#else
in = (jas_stream_t*)ifp->make_jas_stream();
if(!in)
throw LIBRAW_EXCEPTION_DECODE_JPEG2000;
#endif
jas_stream_seek (in, data_offset+20, SEEK_SET);
jimg = jas_image_decode (in, -1, 0);
#ifndef LIBRAW_LIBRARY_BUILD
if (!jimg) longjmp (failure, 3);
#else
if(!jimg)
{
jas_stream_close (in);
throw LIBRAW_EXCEPTION_DECODE_JPEG2000;
}
#endif
jmat = jas_matrix_create (height/2, width/2);
merror (jmat, "redcine_load_raw()");
img = (ushort *) calloc ((height+2), (width+2)*2);
merror (img, "redcine_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
bool fastexitflag = false;
try {
#endif
FORC4 {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
jas_image_readcmpt (jimg, c, 0, 0, width/2, height/2, jmat);
data = jas_matrix_getref (jmat, 0, 0);
for (row = c >> 1; row < height; row+=2)
for (col = c & 1; col < width; col+=2)
img[(row+1)*(width+2)+col+1] = data[(row/2)*(width/2)+col/2];
}
for (col=1; col <= width; col++) {
img[col] = img[2*(width+2)+col];
img[(height+1)*(width+2)+col] = img[(height-1)*(width+2)+col];
}
for (row=0; row < height+2; row++) {
img[row*(width+2)] = img[row*(width+2)+2];
img[(row+1)*(width+2)-1] = img[(row+1)*(width+2)-3];
}
for (row=1; row <= height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pix = img + row*(width+2) + (col = 1 + (FC(row,1) & 1));
for ( ; col <= width; col+=2, pix+=2) {
c = (((pix[0] - 0x800) << 3) +
pix[-(width+2)] + pix[width+2] + pix[-1] + pix[1]) >> 2;
pix[0] = LIM(c,0,4095);
}
}
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++)
RAW(row,col) = curve[img[(row+1)*(width+2)+col+1]];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
fastexitflag=true;
}
#endif
free (img);
jas_matrix_destroy (jmat);
jas_image_destroy (jimg);
jas_stream_close (in);
#ifdef LIBRAW_LIBRARY_BUILD
if(fastexitflag)
throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK;
#endif
#endif
}
#line 3983 "dcraw/dcraw.c"
void CLASS crop_masked_pixels()
{
int row, col;
unsigned
#ifndef LIBRAW_LIBRARY_BUILD
r, raw_pitch = raw_width*2,
c, m, mblack[8], zero, val;
#else
c, m, zero, val;
#define mblack imgdata.color.black_stat
#endif
#ifndef LIBRAW_LIBRARY_BUILD
if (load_raw == &CLASS phase_one_load_raw ||
load_raw == &CLASS phase_one_load_raw_c)
phase_one_correct();
if (fuji_width) {
for (row=0; row < raw_height-top_margin*2; row++) {
for (col=0; col < fuji_width << !fuji_layout; col++) {
if (fuji_layout) {
r = fuji_width - 1 - col + (row >> 1);
c = col + ((row+1) >> 1);
} else {
r = fuji_width - 1 + row - (col >> 1);
c = row + ((col+1) >> 1);
}
if (r < height && c < width)
BAYER(r,c) = RAW(row+top_margin,col+left_margin);
}
}
} else {
for (row=0; row < height; row++)
for (col=0; col < width; col++)
BAYER2(row,col) = RAW(row+top_margin,col+left_margin);
}
#endif
if (mask[0][3]) goto mask_set;
if (load_raw == &CLASS canon_load_raw ||
load_raw == &CLASS lossless_jpeg_load_raw) {
mask[0][1] = mask[1][1] = 2;
mask[0][3] = -2;
goto sides;
}
if (load_raw == &CLASS canon_600_load_raw ||
load_raw == &CLASS sony_load_raw ||
(load_raw == &CLASS eight_bit_load_raw && strncmp(model,"DC2",3)) ||
load_raw == &CLASS kodak_262_load_raw ||
(load_raw == &CLASS packed_load_raw && (load_flags & 32))) {
sides:
mask[0][0] = mask[1][0] = top_margin;
mask[0][2] = mask[1][2] = top_margin+height;
mask[0][3] += left_margin;
mask[1][1] += left_margin+width;
mask[1][3] += raw_width;
}
if (load_raw == &CLASS nokia_load_raw) {
mask[0][2] = top_margin;
mask[0][3] = width;
}
mask_set:
memset (mblack, 0, sizeof mblack);
for (zero=m=0; m < 8; m++)
for (row=MAX(mask[m][0],0); row < MIN(mask[m][2],raw_height); row++)
for (col=MAX(mask[m][1],0); col < MIN(mask[m][3],raw_width); col++) {
c = FC(row-top_margin,col-left_margin);
mblack[c] += val = raw_image[(row)*raw_pitch/2+(col)];
mblack[4+c]++;
zero += !val;
}
if (load_raw == &CLASS canon_600_load_raw && width < raw_width) {
black = (mblack[0]+mblack[1]+mblack[2]+mblack[3]) /
(mblack[4]+mblack[5]+mblack[6]+mblack[7]) - 4;
#ifndef LIBRAW_LIBRARY_BUILD
canon_600_correct();
#endif
} else if (zero < mblack[4] && mblack[5] && mblack[6] && mblack[7])
FORC4 cblack[c] = mblack[c] / mblack[4+c];
}
#ifdef LIBRAW_LIBRARY_BUILD
#undef mblack
#endif
void CLASS remove_zeroes()
{
unsigned row, col, tot, n, r, c;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,0,2);
#endif
for (row=0; row < height; row++)
for (col=0; col < width; col++)
if (BAYER(row,col) == 0) {
tot = n = 0;
for (r = row-2; r <= row+2; r++)
for (c = col-2; c <= col+2; c++)
if (r < height && c < width &&
FC(r,c) == FC(row,col) && BAYER(r,c))
tot += (n++,BAYER(r,c));
if (n) BAYER(row,col) = tot/n;
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,1,2);
#endif
}
#line 4254 "dcraw/dcraw.c"
void CLASS gamma_curve (double pwr, double ts, int mode, int imax)
{
int i;
double g[6], bnd[2]={0,0}, r;
g[0] = pwr;
g[1] = ts;
g[2] = g[3] = g[4] = 0;
bnd[g[1] >= 1] = 1;
if (g[1] && (g[1]-1)*(g[0]-1) <= 0) {
for (i=0; i < 48; i++) {
g[2] = (bnd[0] + bnd[1])/2;
if (g[0]) bnd[(pow(g[2]/g[1],-g[0]) - 1)/g[0] - 1/g[2] > -1] = g[2];
else bnd[g[2]/exp(1-1/g[2]) < g[1]] = g[2];
}
g[3] = g[2] / g[1];
if (g[0]) g[4] = g[2] * (1/g[0] - 1);
}
if (g[0]) g[5] = 1 / (g[1]*SQR(g[3])/2 - g[4]*(1 - g[3]) +
(1 - pow(g[3],1+g[0]))*(1 + g[4])/(1 + g[0])) - 1;
else g[5] = 1 / (g[1]*SQR(g[3])/2 + 1
- g[2] - g[3] - g[2]*g[3]*(log(g[3]) - 1)) - 1;
if (!mode--) {
memcpy (gamm, g, sizeof gamm);
return;
}
for (i=0; i < 0x10000; i++) {
curve[i] = 0xffff;
if ((r = (double) i / imax) < 1)
curve[i] = 0x10000 * ( mode
? (r < g[3] ? r*g[1] : (g[0] ? pow( r,g[0])*(1+g[4])-g[4] : log(r)*g[2]+1))
: (r < g[2] ? r/g[1] : (g[0] ? pow((r+g[4])/(1+g[4]),1/g[0]) : exp((r-1)/g[2]))));
}
}
void CLASS pseudoinverse (double (*in)[3], double (*out)[3], int size)
{
double work[3][6], num;
int i, j, k;
for (i=0; i < 3; i++) {
for (j=0; j < 6; j++)
work[i][j] = j == i+3;
for (j=0; j < 3; j++)
for (k=0; k < size; k++)
work[i][j] += in[k][i] * in[k][j];
}
for (i=0; i < 3; i++) {
num = work[i][i];
for (j=0; j < 6; j++)
work[i][j] /= num;
for (k=0; k < 3; k++) {
if (k==i) continue;
num = work[k][i];
for (j=0; j < 6; j++)
work[k][j] -= work[i][j] * num;
}
}
for (i=0; i < size; i++)
for (j=0; j < 3; j++)
for (out[i][j]=k=0; k < 3; k++)
out[i][j] += work[j][k+3] * in[i][k];
}
void CLASS cam_xyz_coeff (double cam_xyz[4][3])
{
double cam_rgb[4][3], inverse[4][3], num;
int i, j, k;
for (i=0; i < colors; i++) /* Multiply out XYZ colorspace */
for (j=0; j < 3; j++)
for (cam_rgb[i][j] = k=0; k < 3; k++)
cam_rgb[i][j] += cam_xyz[i][k] * xyz_rgb[k][j];
for (i=0; i < colors; i++) { /* Normalize cam_rgb so that */
for (num=j=0; j < 3; j++) /* cam_rgb * (1,1,1) is (1,1,1,1) */
num += cam_rgb[i][j];
if(num > 0.00001)
{
for (j=0; j < 3; j++)
cam_rgb[i][j] /= num;
pre_mul[i] = 1 / num;
}
else
{
for (j=0; j < 3; j++)
cam_rgb[i][j] = 0.0;
pre_mul[i] = 1.0;
}
}
pseudoinverse (cam_rgb, inverse, colors);
for (raw_color = i=0; i < 3; i++)
for (j=0; j < colors; j++)
rgb_cam[i][j] = inverse[j][i];
}
#ifdef COLORCHECK
void CLASS colorcheck()
{
#define NSQ 24
// Coordinates of the GretagMacbeth ColorChecker squares
// width, height, 1st_column, 1st_row
int cut[NSQ][4]; // you must set these
// ColorChecker Chart under 6500-kelvin illumination
static const double gmb_xyY[NSQ][3] = {
{ 0.400, 0.350, 10.1 }, // Dark Skin
{ 0.377, 0.345, 35.8 }, // Light Skin
{ 0.247, 0.251, 19.3 }, // Blue Sky
{ 0.337, 0.422, 13.3 }, // Foliage
{ 0.265, 0.240, 24.3 }, // Blue Flower
{ 0.261, 0.343, 43.1 }, // Bluish Green
{ 0.506, 0.407, 30.1 }, // Orange
{ 0.211, 0.175, 12.0 }, // Purplish Blue
{ 0.453, 0.306, 19.8 }, // Moderate Red
{ 0.285, 0.202, 6.6 }, // Purple
{ 0.380, 0.489, 44.3 }, // Yellow Green
{ 0.473, 0.438, 43.1 }, // Orange Yellow
{ 0.187, 0.129, 6.1 }, // Blue
{ 0.305, 0.478, 23.4 }, // Green
{ 0.539, 0.313, 12.0 }, // Red
{ 0.448, 0.470, 59.1 }, // Yellow
{ 0.364, 0.233, 19.8 }, // Magenta
{ 0.196, 0.252, 19.8 }, // Cyan
{ 0.310, 0.316, 90.0 }, // White
{ 0.310, 0.316, 59.1 }, // Neutral 8
{ 0.310, 0.316, 36.2 }, // Neutral 6.5
{ 0.310, 0.316, 19.8 }, // Neutral 5
{ 0.310, 0.316, 9.0 }, // Neutral 3.5
{ 0.310, 0.316, 3.1 } }; // Black
double gmb_cam[NSQ][4], gmb_xyz[NSQ][3];
double inverse[NSQ][3], cam_xyz[4][3], num;
int c, i, j, k, sq, row, col, count[4];
memset (gmb_cam, 0, sizeof gmb_cam);
for (sq=0; sq < NSQ; sq++) {
FORCC count[c] = 0;
for (row=cut[sq][3]; row < cut[sq][3]+cut[sq][1]; row++)
for (col=cut[sq][2]; col < cut[sq][2]+cut[sq][0]; col++) {
c = FC(row,col);
if (c >= colors) c -= 2;
gmb_cam[sq][c] += BAYER(row,col);
count[c]++;
}
FORCC gmb_cam[sq][c] = gmb_cam[sq][c]/count[c] - black;
gmb_xyz[sq][0] = gmb_xyY[sq][2] * gmb_xyY[sq][0] / gmb_xyY[sq][1];
gmb_xyz[sq][1] = gmb_xyY[sq][2];
gmb_xyz[sq][2] = gmb_xyY[sq][2] *
(1 - gmb_xyY[sq][0] - gmb_xyY[sq][1]) / gmb_xyY[sq][1];
}
pseudoinverse (gmb_xyz, inverse, NSQ);
for (i=0; i < colors; i++)
for (j=0; j < 3; j++)
for (cam_xyz[i][j] = k=0; k < NSQ; k++)
cam_xyz[i][j] += gmb_cam[k][i] * inverse[k][j];
cam_xyz_coeff (cam_xyz);
if (verbose) {
printf (" { \"%s %s\", %d,\n\t{", make, model, black);
num = 10000 / (cam_xyz[1][0] + cam_xyz[1][1] + cam_xyz[1][2]);
FORCC for (j=0; j < 3; j++)
printf ("%c%d", (c | j) ? ',':' ', (int) (cam_xyz[c][j] * num + 0.5));
puts (" } },");
}
#undef NSQ
}
#endif
void CLASS hat_transform (float *temp, float *base, int st, int size, int sc)
{
int i;
for (i=0; i < sc; i++)
temp[i] = 2*base[st*i] + base[st*(sc-i)] + base[st*(i+sc)];
for (; i+sc < size; i++)
temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(i+sc)];
for (; i < size; i++)
temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(2*size-2-(i+sc))];
}
#if !defined(LIBRAW_USE_OPENMP)
void CLASS wavelet_denoise()
{
float *fimg=0, *temp, thold, mul[2], avg, diff;
int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] =
{ 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Wavelet denoising...\n"));
#endif
while (maximum << scale < 0x10000) scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight*iwidth) < 0x15550000)
fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg);
merror (fimg, "wavelet_denoise()");
temp = fimg + size*3;
if ((nc = colors) == 3 && filters) nc++;
FORC(nc) { /* denoise R,G1,B,G3 individually */
for (i=0; i < size; i++)
fimg[i] = 256 * sqrt((double)(image[i][c] << scale));
for (hpass=lev=0; lev < 5; lev++) {
lpass = size*((lev & 1)+1);
for (row=0; row < iheight; row++) {
hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev);
for (col=0; col < iwidth; col++)
fimg[lpass + row*iwidth + col] = temp[col] * 0.25;
}
for (col=0; col < iwidth; col++) {
hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev);
for (row=0; row < iheight; row++)
fimg[lpass + row*iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
for (i=0; i < size; i++) {
fimg[hpass+i] -= fimg[lpass+i];
if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold;
else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold;
else fimg[hpass+i] = 0;
if (hpass) fimg[i] += fimg[hpass+i];
}
hpass = lpass;
}
for (i=0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000);
}
if (filters && colors == 3) { /* pull G1 and G3 closer together */
for (row=0; row < 2; row++) {
mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1];
blk[row] = cblack[FC(row,0) | 1];
}
for (i=0; i < 4; i++)
window[i] = (ushort *) fimg + width*i;
for (wlast=-1, row=1; row < height-1; row++) {
while (wlast < row+1) {
for (wlast++, i=0; i < 4; i++)
window[(i+3) & 3] = window[i];
for (col = FC(wlast,1) & 1; col < width; col+=2)
window[2][col] = BAYER(wlast,col);
}
thold = threshold/512;
for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) {
avg = ( window[0][col-1] + window[0][col+1] +
window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 )
* mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt((double)BAYER(row,col)) - avg;
if (diff < -thold) diff += thold;
else if (diff > thold) diff -= thold;
else diff = 0;
BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5);
}
}
}
free (fimg);
}
#else /* LIBRAW_USE_OPENMP */
void CLASS wavelet_denoise()
{
float *fimg=0, *temp, thold, mul[2], avg, diff;
int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] =
{ 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Wavelet denoising...\n"));
#endif
while (maximum << scale < 0x10000) scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight*iwidth) < 0x15550000)
fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg);
merror (fimg, "wavelet_denoise()");
temp = fimg + size*3;
if ((nc = colors) == 3 && filters) nc++;
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp parallel default(shared) private(i,col,row,thold,lev,lpass,hpass,temp,c) firstprivate(scale,size)
#endif
{
temp = (float*)malloc( (iheight + iwidth) * sizeof *fimg);
FORC(nc) { /* denoise R,G1,B,G3 individually */
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++)
fimg[i] = 256 * sqrt((double)(image[i][c] << scale));
for (hpass=lev=0; lev < 5; lev++) {
lpass = size*((lev & 1)+1);
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (row=0; row < iheight; row++) {
hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev);
for (col=0; col < iwidth; col++)
fimg[lpass + row*iwidth + col] = temp[col] * 0.25;
}
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (col=0; col < iwidth; col++) {
hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev);
for (row=0; row < iheight; row++)
fimg[lpass + row*iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++) {
fimg[hpass+i] -= fimg[lpass+i];
if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold;
else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold;
else fimg[hpass+i] = 0;
if (hpass) fimg[i] += fimg[hpass+i];
}
hpass = lpass;
}
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000);
}
free(temp);
} /* end omp parallel */
/* the following loops are hard to parallize, no idea yes,
* problem is wlast which is carrying dependency
* second part should be easyer, but did not yet get it right.
*/
if (filters && colors == 3) { /* pull G1 and G3 closer together */
for (row=0; row < 2; row++){
mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1];
blk[row] = cblack[FC(row,0) | 1];
}
for (i=0; i < 4; i++)
window[i] = (ushort *) fimg + width*i;
for (wlast=-1, row=1; row < height-1; row++) {
while (wlast < row+1) {
for (wlast++, i=0; i < 4; i++)
window[(i+3) & 3] = window[i];
for (col = FC(wlast,1) & 1; col < width; col+=2)
window[2][col] = BAYER(wlast,col);
}
thold = threshold/512;
for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) {
avg = ( window[0][col-1] + window[0][col+1] +
window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 )
* mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt((double)BAYER(row,col)) - avg;
if (diff < -thold) diff += thold;
else if (diff > thold) diff -= thold;
else diff = 0;
BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5);
}
}
}
free (fimg);
}
#endif
// green equilibration
void CLASS green_matching()
{
int i,j;
double m1,m2,c1,c2;
int o1_1,o1_2,o1_3,o1_4;
int o2_1,o2_2,o2_3,o2_4;
ushort (*img)[4];
const int margin = 3;
int oj = 2, oi = 2;
float f;
const float thr = 0.01f;
if(half_size || shrink) return;
if(FC(oj, oi) != 3) oj++;
if(FC(oj, oi) != 3) oi++;
if(FC(oj, oi) != 3) oj--;
img = (ushort (*)[4]) calloc (height*width, sizeof *image);
merror (img, "green_matching()");
memcpy(img,image,height*width*sizeof *image);
for(j=oj;j<height-margin;j+=2)
for(i=oi;i<width-margin;i+=2){
o1_1=img[(j-1)*width+i-1][1];
o1_2=img[(j-1)*width+i+1][1];
o1_3=img[(j+1)*width+i-1][1];
o1_4=img[(j+1)*width+i+1][1];
o2_1=img[(j-2)*width+i][3];
o2_2=img[(j+2)*width+i][3];
o2_3=img[j*width+i-2][3];
o2_4=img[j*width+i+2][3];
m1=(o1_1+o1_2+o1_3+o1_4)/4.0;
m2=(o2_1+o2_2+o2_3+o2_4)/4.0;
c1=(abs(o1_1-o1_2)+abs(o1_1-o1_3)+abs(o1_1-o1_4)+abs(o1_2-o1_3)+abs(o1_3-o1_4)+abs(o1_2-o1_4))/6.0;
c2=(abs(o2_1-o2_2)+abs(o2_1-o2_3)+abs(o2_1-o2_4)+abs(o2_2-o2_3)+abs(o2_3-o2_4)+abs(o2_2-o2_4))/6.0;
if((img[j*width+i][3]<maximum*0.95)&&(c1<maximum*thr)&&(c2<maximum*thr))
{
f = image[j*width+i][3]*m1/m2;
image[j*width+i][3]=f>0xffff?0xffff:f;
}
}
free(img);
}
void CLASS scale_colors()
{
unsigned bottom, right, size, row, col, ur, uc, i, x, y, c, sum[8];
int val, dark, sat;
double dsum[8], dmin, dmax;
float scale_mul[4], fr, fc;
ushort *img=0, *pix;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,0,2);
#endif
if (user_mul[0])
memcpy (pre_mul, user_mul, sizeof pre_mul);
if (use_auto_wb || (use_camera_wb && cam_mul[0] == -1)) {
memset (dsum, 0, sizeof dsum);
bottom = MIN (greybox[1]+greybox[3], height);
right = MIN (greybox[0]+greybox[2], width);
for (row=greybox[1]; row < bottom; row += 8)
for (col=greybox[0]; col < right; col += 8) {
memset (sum, 0, sizeof sum);
for (y=row; y < row+8 && y < bottom; y++)
for (x=col; x < col+8 && x < right; x++)
FORC4 {
if (filters) {
c = fcol(y,x);
val = BAYER2(y,x);
} else
val = image[y*width+x][c];
if (val > maximum-25) goto skip_block;
if ((val -= cblack[c]) < 0) val = 0;
sum[c] += val;
sum[c+4]++;
if (filters) break;
}
FORC(8) dsum[c] += sum[c];
skip_block: ;
}
FORC4 if (dsum[c]) pre_mul[c] = dsum[c+4] / dsum[c];
}
if (use_camera_wb && cam_mul[0] != -1) {
memset (sum, 0, sizeof sum);
for (row=0; row < 8; row++)
for (col=0; col < 8; col++) {
c = FC(row,col);
if ((val = white[row][col] - cblack[c]) > 0)
sum[c] += val;
sum[c+4]++;
}
if (sum[0] && sum[1] && sum[2] && sum[3])
FORC4 pre_mul[c] = (float) sum[c+4] / sum[c];
else if (cam_mul[0] && cam_mul[2])
memcpy (pre_mul, cam_mul, sizeof pre_mul);
else
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_CAMERA_WB;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: Cannot use camera white balance.\n"), ifname);
#endif
}
}
if (pre_mul[1] == 0) pre_mul[1] = 1;
if (pre_mul[3] == 0) pre_mul[3] = colors < 4 ? pre_mul[1] : 1;
dark = black;
sat = maximum;
if (threshold) wavelet_denoise();
maximum -= black;
for (dmin=DBL_MAX, dmax=c=0; c < 4; c++) {
if (dmin > pre_mul[c])
dmin = pre_mul[c];
if (dmax < pre_mul[c])
dmax = pre_mul[c];
}
if (!highlight) dmax = dmin;
FORC4 scale_mul[c] = (pre_mul[c] /= dmax) * 65535.0 / maximum;
#ifdef DCRAW_VERBOSE
if (verbose) {
fprintf (stderr,
_("Scaling with darkness %d, saturation %d, and\nmultipliers"), dark, sat);
FORC4 fprintf (stderr, " %f", pre_mul[c]);
fputc ('\n', stderr);
}
#endif
size = iheight*iwidth;
#ifdef LIBRAW_LIBRARY_BUILD
scale_colors_loop(scale_mul);
#else
for (i=0; i < size*4; i++) {
val = image[0][i];
if (!val) continue;
val -= cblack[i & 3];
val *= scale_mul[i & 3];
image[0][i] = CLIP(val);
}
#endif
if ((aber[0] != 1 || aber[2] != 1) && colors == 3) {
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Correcting chromatic aberration...\n"));
#endif
for (c=0; c < 4; c+=2) {
if (aber[c] == 1) continue;
img = (ushort *) malloc (size * sizeof *img);
merror (img, "scale_colors()");
for (i=0; i < size; i++)
img[i] = image[i][c];
for (row=0; row < iheight; row++) {
ur = fr = (row - iheight*0.5) * aber[c] + iheight*0.5;
if (ur > iheight-2) continue;
fr -= ur;
for (col=0; col < iwidth; col++) {
uc = fc = (col - iwidth*0.5) * aber[c] + iwidth*0.5;
if (uc > iwidth-2) continue;
fc -= uc;
pix = img + ur*iwidth + uc;
image[row*iwidth+col][c] =
(pix[ 0]*(1-fc) + pix[ 1]*fc) * (1-fr) +
(pix[iwidth]*(1-fc) + pix[iwidth+1]*fc) * fr;
}
}
free(img);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,1,2);
#endif
}
void CLASS pre_interpolate()
{
ushort (*img)[4];
int row, col, c;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,0,2);
#endif
if (shrink) {
if (half_size) {
height = iheight;
width = iwidth;
if (filters == 9) {
for (row=0; row < 3; row++)
for (col=1; col < 4; col++)
if (!(image[row*width+col][0] | image[row*width+col][2]))
goto break2; break2:
for ( ; row < height; row+=3)
for (col=(col-1)%3+1; col < width-1; col+=3) {
img = image + row*width+col;
for (c=0; c < 3; c+=2)
img[0][c] = (img[-1][c] + img[1][c]) >> 1;
}
}
} else {
img = (ushort (*)[4]) calloc (height, width*sizeof *img);
merror (img, "pre_interpolate()");
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
c = fcol(row,col);
img[row*width+col][c] = image[(row >> 1)*iwidth+(col >> 1)][c];
}
free (image);
image = img;
shrink = 0;
}
}
if (filters > 1000 && colors == 3) {
mix_green = four_color_rgb ^ half_size;
if (four_color_rgb | half_size) colors++;
else {
for (row = FC(1,0) >> 1; row < height; row+=2)
for (col = FC(row,1) & 1; col < width; col+=2)
image[row*width+col][1] = image[row*width+col][3];
filters &= ~((filters & 0x55555555) << 1);
}
}
if (half_size) filters = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,1,2);
#endif
}
void CLASS border_interpolate (int border)
{
unsigned row, col, y, x, f, c, sum[8];
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
if (col==border && row >= border && row < height-border)
col = width-border;
memset (sum, 0, sizeof sum);
for (y=row-1; y != row+2; y++)
for (x=col-1; x != col+2; x++)
if (y < height && x < width) {
f = fcol(y,x);
sum[f] += image[y*width+x][f];
sum[f+4]++;
}
f = fcol(row,col);
FORCC if (c != f && sum[c+4])
image[row*width+col][c] = sum[c] / sum[c+4];
}
}
void CLASS lin_interpolate_loop(int code[16][16][32],int size)
{
int row;
for (row=1; row < height-1; row++)
{
int col,*ip;
ushort *pix;
for (col=1; col < width-1; col++) {
int i;
int sum[4];
pix = image[row*width+col];
ip = code[row % size][col % size];
memset (sum, 0, sizeof sum);
for (i=*ip++; i--; ip+=3)
sum[ip[2]] += pix[ip[0]] << ip[1];
for (i=colors; --i; ip+=2)
pix[ip[0]] = sum[ip[0]] * ip[1] >> 8;
}
}
}
void CLASS lin_interpolate()
{
int code[16][16][32], size=16, *ip, sum[4];
int f, c, x, y, row, col, shift, color;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Bilinear interpolation...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3);
#endif
if (filters == 9) size = 6;
border_interpolate(1);
for (row=0; row < size; row++)
for (col=0; col < size; col++) {
ip = code[row][col]+1;
f = fcol(row,col);
memset (sum, 0, sizeof sum);
for (y=-1; y <= 1; y++)
for (x=-1; x <= 1; x++) {
shift = (y==0) + (x==0);
color = fcol(row+y,col+x);
if (color == f) continue;
*ip++ = (width*y + x)*4 + color;
*ip++ = shift;
*ip++ = color;
sum[color] += 1 << shift;
}
code[row][col][0] = (ip - code[row][col]) / 3;
FORCC
if (c != f) {
*ip++ = c;
*ip++ = sum[c]>0?256 / sum[c]:0;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3);
#endif
lin_interpolate_loop(code,size);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3);
#endif
}
/*
This algorithm is officially called:
"Interpolation using a Threshold-based variable number of gradients"
described in http://scien.stanford.edu/pages/labsite/1999/psych221/projects/99/tingchen/algodep/vargra.html
I've extended the basic idea to work with non-Bayer filter arrays.
Gradients are numbered clockwise from NW=0 to W=7.
*/
void CLASS vng_interpolate()
{
static const signed char *cp, terms[] = {
-2,-2,+0,-1,0,0x01, -2,-2,+0,+0,1,0x01, -2,-1,-1,+0,0,0x01,
-2,-1,+0,-1,0,0x02, -2,-1,+0,+0,0,0x03, -2,-1,+0,+1,1,0x01,
-2,+0,+0,-1,0,0x06, -2,+0,+0,+0,1,0x02, -2,+0,+0,+1,0,0x03,
-2,+1,-1,+0,0,0x04, -2,+1,+0,-1,1,0x04, -2,+1,+0,+0,0,0x06,
-2,+1,+0,+1,0,0x02, -2,+2,+0,+0,1,0x04, -2,+2,+0,+1,0,0x04,
-1,-2,-1,+0,0,0x80, -1,-2,+0,-1,0,0x01, -1,-2,+1,-1,0,0x01,
-1,-2,+1,+0,1,0x01, -1,-1,-1,+1,0,0x88, -1,-1,+1,-2,0,0x40,
-1,-1,+1,-1,0,0x22, -1,-1,+1,+0,0,0x33, -1,-1,+1,+1,1,0x11,
-1,+0,-1,+2,0,0x08, -1,+0,+0,-1,0,0x44, -1,+0,+0,+1,0,0x11,
-1,+0,+1,-2,1,0x40, -1,+0,+1,-1,0,0x66, -1,+0,+1,+0,1,0x22,
-1,+0,+1,+1,0,0x33, -1,+0,+1,+2,1,0x10, -1,+1,+1,-1,1,0x44,
-1,+1,+1,+0,0,0x66, -1,+1,+1,+1,0,0x22, -1,+1,+1,+2,0,0x10,
-1,+2,+0,+1,0,0x04, -1,+2,+1,+0,1,0x04, -1,+2,+1,+1,0,0x04,
+0,-2,+0,+0,1,0x80, +0,-1,+0,+1,1,0x88, +0,-1,+1,-2,0,0x40,
+0,-1,+1,+0,0,0x11, +0,-1,+2,-2,0,0x40, +0,-1,+2,-1,0,0x20,
+0,-1,+2,+0,0,0x30, +0,-1,+2,+1,1,0x10, +0,+0,+0,+2,1,0x08,
+0,+0,+2,-2,1,0x40, +0,+0,+2,-1,0,0x60, +0,+0,+2,+0,1,0x20,
+0,+0,+2,+1,0,0x30, +0,+0,+2,+2,1,0x10, +0,+1,+1,+0,0,0x44,
+0,+1,+1,+2,0,0x10, +0,+1,+2,-1,1,0x40, +0,+1,+2,+0,0,0x60,
+0,+1,+2,+1,0,0x20, +0,+1,+2,+2,0,0x10, +1,-2,+1,+0,0,0x80,
+1,-1,+1,+1,0,0x88, +1,+0,+1,+2,0,0x08, +1,+0,+2,-1,0,0x40,
+1,+0,+2,+1,0,0x10
}, chood[] = { -1,-1, -1,0, -1,+1, 0,+1, +1,+1, +1,0, +1,-1, 0,-1 };
ushort (*brow[5])[4], *pix;
int prow=8, pcol=2, *ip, *code[16][16], gval[8], gmin, gmax, sum[4];
int row, col, x, y, x1, x2, y1, y2, t, weight, grads, color, diag;
int g, diff, thold, num, c;
lin_interpolate();
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("VNG interpolation...\n"));
#endif
if (filters == 1) prow = pcol = 16;
if (filters == 9) prow = pcol = 6;
ip = (int *) calloc (prow*pcol, 1280);
merror (ip, "vng_interpolate()");
for (row=0; row < prow; row++) /* Precalculate for VNG */
for (col=0; col < pcol; col++) {
code[row][col] = ip;
for (cp=terms, t=0; t < 64; t++) {
y1 = *cp++; x1 = *cp++;
y2 = *cp++; x2 = *cp++;
weight = *cp++;
grads = *cp++;
color = fcol(row+y1,col+x1);
if (fcol(row+y2,col+x2) != color) continue;
diag = (fcol(row,col+1) == color && fcol(row+1,col) == color) ? 2:1;
if (abs(y1-y2) == diag && abs(x1-x2) == diag) continue;
*ip++ = (y1*width + x1)*4 + color;
*ip++ = (y2*width + x2)*4 + color;
*ip++ = weight;
for (g=0; g < 8; g++)
if (grads & 1<<g) *ip++ = g;
*ip++ = -1;
}
*ip++ = INT_MAX;
for (cp=chood, g=0; g < 8; g++) {
y = *cp++; x = *cp++;
*ip++ = (y*width + x) * 4;
color = fcol(row,col);
if (fcol(row+y,col+x) != color && fcol(row+y*2,col+x*2) == color)
*ip++ = (y*width + x) * 8 + color;
else
*ip++ = 0;
}
}
brow[4] = (ushort (*)[4]) calloc (width*3, sizeof **brow);
merror (brow[4], "vng_interpolate()");
for (row=0; row < 3; row++)
brow[row] = brow[4] + row*width;
for (row=2; row < height-2; row++) { /* Do VNG interpolation */
#ifdef LIBRAW_LIBRARY_BUILD
if(!((row-2)%256))RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,(row-2)/256+1,((height-3)/256)+1);
#endif
for (col=2; col < width-2; col++) {
pix = image[row*width+col];
ip = code[row % prow][col % pcol];
memset (gval, 0, sizeof gval);
while ((g = ip[0]) != INT_MAX) { /* Calculate gradients */
diff = ABS(pix[g] - pix[ip[1]]) << ip[2];
gval[ip[3]] += diff;
ip += 5;
if ((g = ip[-1]) == -1) continue;
gval[g] += diff;
while ((g = *ip++) != -1)
gval[g] += diff;
}
ip++;
gmin = gmax = gval[0]; /* Choose a threshold */
for (g=1; g < 8; g++) {
if (gmin > gval[g]) gmin = gval[g];
if (gmax < gval[g]) gmax = gval[g];
}
if (gmax == 0) {
memcpy (brow[2][col], pix, sizeof *image);
continue;
}
thold = gmin + (gmax >> 1);
memset (sum, 0, sizeof sum);
color = fcol(row,col);
for (num=g=0; g < 8; g++,ip+=2) { /* Average the neighbors */
if (gval[g] <= thold) {
FORCC
if (c == color && ip[1])
sum[c] += (pix[c] + pix[ip[1]]) >> 1;
else
sum[c] += pix[ip[0] + c];
num++;
}
}
FORCC { /* Save to buffer */
t = pix[color];
if (c != color)
t += (sum[c] - sum[color]) / num;
brow[2][col][c] = CLIP(t);
}
}
if (row > 3) /* Write buffer to image */
memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image);
for (g=0; g < 4; g++)
brow[(g-1) & 3] = brow[g];
}
memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image);
memcpy (image[(row-1)*width+2], brow[1]+2, (width-4)*sizeof *image);
free (brow[4]);
free (code[0][0]);
}
/*
Patterned Pixel Grouping Interpolation by Alain Desbiolles
*/
void CLASS ppg_interpolate()
{
int dir[5] = { 1, width, -1, -width, 1 };
int row, col, diff[2], guess[2], c, d, i;
ushort (*pix)[4];
border_interpolate(3);
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("PPG interpolation...\n"));
#endif
/* Fill in the green layer with gradients and pattern recognition: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=3; row < height-3; row++)
for (col=3+(FC(row,3) & 1), c=FC(row,col); col < width-3; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]) > 0; i++) {
guess[i] = (pix[-d][1] + pix[0][c] + pix[d][1]) * 2
- pix[-2*d][c] - pix[2*d][c];
diff[i] = ( ABS(pix[-2*d][c] - pix[ 0][c]) +
ABS(pix[ 2*d][c] - pix[ 0][c]) +
ABS(pix[ -d][1] - pix[ d][1]) ) * 3 +
( ABS(pix[ 3*d][1] - pix[ d][1]) +
ABS(pix[-3*d][1] - pix[-d][1]) ) * 2;
}
d = dir[i = diff[0] > diff[1]];
pix[0][1] = ULIM(guess[i] >> 2, pix[d][1], pix[-d][1]);
}
/* Calculate red and blue for each green pixel: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=1; row < height-1; row++)
for (col=1+(FC(row,2) & 1), c=FC(row,col+1); col < width-1; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]) > 0; c=2-c, i++)
pix[0][c] = CLIP((pix[-d][c] + pix[d][c] + 2*pix[0][1]
- pix[-d][1] - pix[d][1]) >> 1);
}
/* Calculate blue for red pixels and vice versa: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=1; row < height-1; row++)
for (col=1+(FC(row,1) & 1), c=2-FC(row,col); col < width-1; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]+dir[i+1]) > 0; i++) {
diff[i] = ABS(pix[-d][c] - pix[d][c]) +
ABS(pix[-d][1] - pix[0][1]) +
ABS(pix[ d][1] - pix[0][1]);
guess[i] = pix[-d][c] + pix[d][c] + 2*pix[0][1]
- pix[-d][1] - pix[d][1];
}
if (diff[0] != diff[1])
pix[0][c] = CLIP(guess[diff[0] > diff[1]] >> 1);
else
pix[0][c] = CLIP((guess[0]+guess[1]) >> 2);
}
}
void CLASS cielab (ushort rgb[3], short lab[3])
{
int c, i, j, k;
float r, xyz[3];
#ifdef LIBRAW_NOTHREADS
static float cbrt[0x10000], xyz_cam[3][4];
#else
#define cbrt tls->ahd_data.cbrt
#define xyz_cam tls->ahd_data.xyz_cam
#endif
if (!rgb) {
#ifndef LIBRAW_NOTHREADS
if(cbrt[0] < -1.0f)
#endif
for (i=0; i < 0x10000; i++) {
r = i / 65535.0;
cbrt[i] = r > 0.008856 ? pow(r,1.f/3.0f) : 7.787f*r + 16.f/116.0f;
}
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
for (xyz_cam[i][j] = k=0; k < 3; k++)
xyz_cam[i][j] += xyz_rgb[i][k] * rgb_cam[k][j] / d65_white[i];
return;
}
xyz[0] = xyz[1] = xyz[2] = 0.5;
FORCC {
xyz[0] += xyz_cam[0][c] * rgb[c];
xyz[1] += xyz_cam[1][c] * rgb[c];
xyz[2] += xyz_cam[2][c] * rgb[c];
}
xyz[0] = cbrt[CLIP((int) xyz[0])];
xyz[1] = cbrt[CLIP((int) xyz[1])];
xyz[2] = cbrt[CLIP((int) xyz[2])];
lab[0] = 64 * (116 * xyz[1] - 16);
lab[1] = 64 * 500 * (xyz[0] - xyz[1]);
lab[2] = 64 * 200 * (xyz[1] - xyz[2]);
#ifndef LIBRAW_NOTHREADS
#undef cbrt
#undef xyz_cam
#endif
}
#define TS 512 /* Tile Size */
#define fcol(row,col) xtrans[(row+top_margin+6)%6][(col+left_margin+6)%6]
/*
Frank Markesteijn's algorithm for Fuji X-Trans sensors
*/
void CLASS xtrans_interpolate (int passes)
{
int c, d, f, g, h, i, v, ng, row, col, top, left, mrow, mcol;
int val, ndir, pass, hm[8], avg[4], color[3][8];
static const short orth[12] = { 1,0,0,1,-1,0,0,-1,1,0,0,1 },
patt[2][16] = { { 0,1,0,-1,2,0,-1,0,1,1,1,-1,0,0,0,0 },
{ 0,1,0,-2,1,0,-2,0,1,1,-2,-2,1,-1,-1,1 } },
dir[4] = { 1,TS,TS+1,TS-1 };
short allhex[3][3][2][8], *hex;
ushort min, max, sgrow, sgcol;
ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short (*lab) [TS][3], (*lix)[3];
float (*drv)[TS][TS], diff[6], tr;
char (*homo)[TS][TS], *buffer;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("%d-pass X-Trans interpolation...\n"), passes);
#endif
cielab (0,0);
border_interpolate(6);
ndir = 4 << (passes > 1);
buffer = (char *) malloc (TS*TS*(ndir*11+6));
merror (buffer, "xtrans_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*) [TS][3])(buffer + TS*TS*(ndir*6));
drv = (float (*)[TS][TS]) (buffer + TS*TS*(ndir*6+6));
homo = (char (*)[TS][TS]) (buffer + TS*TS*(ndir*10+6));
/* Map a green hexagon around each non-green pixel and vice versa: */
for (row=0; row < 3; row++)
for (col=0; col < 3; col++)
for (ng=d=0; d < 10; d+=2) {
g = fcol(row,col) == 1;
if (fcol(row+orth[d],col+orth[d+2]) == 1) ng=0; else ng++;
if (ng == 4) { sgrow = row; sgcol = col; }
if (ng == g+1) FORC(8) {
v = orth[d ]*patt[g][c*2] + orth[d+1]*patt[g][c*2+1];
h = orth[d+2]*patt[g][c*2] + orth[d+3]*patt[g][c*2+1];
allhex[row][col][0][c^(g*2 & d)] = h + v*width;
allhex[row][col][1][c^(g*2 & d)] = h + v*TS;
}
}
/* Set green1 and green3 to the minimum and maximum allowed values: */
for (row=2; row < height-2; row++)
for (min=~(max=0), col=2; col < width-2; col++) {
if (fcol(row,col) == 1 && (min=~(max=0))) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][0];
if (!max) FORC(6) {
val = pix[hex[c]][1];
if (min > val) min = val;
if (max < val) max = val;
}
pix[0][1] = min;
pix[0][3] = max;
switch ((row-sgrow) % 3) {
case 1: if (row < height-3) { row++; col--; } break;
case 2: if ((min=~(max=0)) && (col+=2) < width-3 && row > 2) row--;
}
}
for (top=3; top < height-19; top += TS-16)
for (left=3; left < width-19; left += TS-16) {
mrow = MIN (top+TS, height-3);
mcol = MIN (left+TS, width-3);
for (row=top; row < mrow; row++)
for (col=left; col < mcol; col++)
memcpy (rgb[0][row-top][col-left], image[row*width+col], 6);
FORC3 memcpy (rgb[c+1], rgb[0], sizeof *rgb);
/* Interpolate green horizontally, vertically, and along both diagonals: */
for (row=top; row < mrow; row++)
for (col=left; col < mcol; col++) {
if ((f = fcol(row,col)) == 1) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][0];
color[1][0] = 174 * (pix[ hex[1]][1] + pix[ hex[0]][1]) -
46 * (pix[2*hex[1]][1] + pix[2*hex[0]][1]);
color[1][1] = 223 * pix[ hex[3]][1] + pix[ hex[2]][1] * 33 +
92 * (pix[ 0 ][f] - pix[ -hex[2]][f]);
FORC(2) color[1][2+c] =
164 * pix[hex[4+c]][1] + 92 * pix[-2*hex[4+c]][1] + 33 *
(2*pix[0][f] - pix[3*hex[4+c]][f] - pix[-3*hex[4+c]][f]);
FORC4 rgb[c^!((row-sgrow) % 3)][row-top][col-left][1] =
LIM(color[1][c] >> 8,pix[0][1],pix[0][3]);
}
for (pass=0; pass < passes; pass++) {
if (pass == 1)
memcpy (rgb+=4, buffer, 4*sizeof *rgb);
/* Recalculate green from interpolated values of closer pixels: */
if (pass) {
for (row=top+2; row < mrow-2; row++)
for (col=left+2; col < mcol-2; col++) {
if ((f = fcol(row,col)) == 1) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][1];
for (d=3; d < 6; d++) {
rix = &rgb[(d-2)^!((row-sgrow) % 3)][row-top][col-left];
val = rix[-2*hex[d]][1] + 2*rix[hex[d]][1]
- rix[-2*hex[d]][f] - 2*rix[hex[d]][f] + 3*rix[0][f];
rix[0][1] = LIM(val/3,pix[0][1],pix[0][3]);
}
}
}
/* Interpolate red and blue values for solitary green pixels: */
for (row=(top-sgrow+4)/3*3+sgrow; row < mrow-2; row+=3)
for (col=(left-sgcol+4)/3*3+sgcol; col < mcol-2; col+=3) {
rix = &rgb[0][row-top][col-left];
h = fcol(row,col+1);
memset (diff, 0, sizeof diff);
for (i=1, d=0; d < 6; d++, i^=TS^1, h^=2) {
for (c=0; c < 2; c++, h^=2) {
g = 2*rix[0][1] - rix[i<<c][1] - rix[-i<<c][1];
color[h][d] = g + rix[i<<c][h] + rix[-i<<c][h];
if (d > 1)
diff[d] += SQR (rix[i<<c][1] - rix[-i<<c][1]
- rix[i<<c][h] + rix[-i<<c][h]) + SQR(g);
}
if (d > 1 && (d & 1))
if (diff[d-1] < diff[d])
FORC(2) color[c*2][d] = color[c*2][d-1];
if (d < 2 || (d & 1)) {
FORC(2) rix[0][c*2] = CLIP(color[c*2][d]/2);
rix += TS*TS;
}
}
}
/* Interpolate red for blue pixels and vice versa: */
for (row=top+1; row < mrow-1; row++)
for (col=left+1; col < mcol-1; col++) {
if ((f = 2-fcol(row,col)) == 1) continue;
rix = &rgb[0][row-top][col-left];
i = (row-sgrow) % 3 ? TS:1;
for (d=0; d < 4; d++, rix += TS*TS)
rix[0][f] = CLIP((rix[i][f] + rix[-i][f] +
2*rix[0][1] - rix[i][1] - rix[-i][1])/2);
}
/* Fill in red and blue for 2x2 blocks of green: */
for (row=top+2; row < mrow-2; row++) if ((row-sgrow) % 3)
for (col=left+2; col < mcol-2; col++) if ((col-sgcol) % 3) {
rix = &rgb[0][row-top][col-left];
hex = allhex[row % 3][col % 3][1];
for (d=0; d < ndir; d+=2, rix += TS*TS)
if (hex[d] + hex[d+1]) {
g = 3*rix[0][1] - 2*rix[hex[d]][1] - rix[hex[d+1]][1];
for (c=0; c < 4; c+=2) rix[0][c] =
CLIP((g + 2*rix[hex[d]][c] + rix[hex[d+1]][c])/3);
} else {
g = 2*rix[0][1] - rix[hex[d]][1] - rix[hex[d+1]][1];
for (c=0; c < 4; c+=2) rix[0][c] =
CLIP((g + rix[hex[d]][c] + rix[hex[d+1]][c])/2);
}
}
}
rgb = (ushort(*)[TS][TS][3]) buffer;
mrow -= top;
mcol -= left;
/* Convert to CIELab and differentiate in all directions: */
for (d=0; d < ndir; d++) {
for (row=2; row < mrow-2; row++)
for (col=2; col < mcol-2; col++)
cielab (rgb[d][row][col], lab[row][col]);
for (f=dir[d & 3],row=3; row < mrow-3; row++)
for (col=3; col < mcol-3; col++) {
lix = &lab[row][col];
g = 2*lix[0][0] - lix[f][0] - lix[-f][0];
drv[d][row][col] = SQR(g)
+ SQR((2*lix[0][1] - lix[f][1] - lix[-f][1] + g*500/232))
+ SQR((2*lix[0][2] - lix[f][2] - lix[-f][2] - g*500/580));
}
}
/* Build homogeneity maps from the derivatives: */
memset(homo, 0, ndir*TS*TS);
for (row=4; row < mrow-4; row++)
for (col=4; col < mcol-4; col++) {
for (tr=FLT_MAX, d=0; d < ndir; d++)
if (tr > drv[d][row][col])
tr = drv[d][row][col];
tr *= 8;
for (d=0; d < ndir; d++)
for (v=-1; v <= 1; v++)
for (h=-1; h <= 1; h++)
if (drv[d][row+v][col+h] <= tr)
homo[d][row][col]++;
}
/* Average the most homogenous pixels for the final result: */
if (height-top < TS+4) mrow = height-top+2;
if (width-left < TS+4) mcol = width-left+2;
for (row = MIN(top,8); row < mrow-8; row++)
for (col = MIN(left,8); col < mcol-8; col++) {
for (d=0; d < ndir; d++)
for (hm[d]=0, v=-2; v <= 2; v++)
for (h=-2; h <= 2; h++)
hm[d] += homo[d][row+v][col+h];
for (d=0; d < ndir-4; d++)
if (hm[d] < hm[d+4]) hm[d ] = 0; else
if (hm[d] > hm[d+4]) hm[d+4] = 0;
for (max=hm[0],d=1; d < ndir; d++)
if (max < hm[d]) max = hm[d];
max -= max >> 3;
memset (avg, 0, sizeof avg);
for (d=0; d < ndir; d++)
if (hm[d] >= max) {
FORC3 avg[c] += rgb[d][row][col][c];
avg[3]++;
}
FORC3 image[(row+top)*width+col+left][c] = avg[c]/avg[3];
}
}
free(buffer);
}
#undef fcol
/*
Adaptive Homogeneity-Directed interpolation is based on
the work of Keigo Hirakawa, Thomas Parks, and Paul Lee.
*/
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS ahd_interpolate_green_h_and_v(int top, int left, ushort (*out_rgb)[TS][TS][3])
{
int row, col;
int c, val;
ushort (*pix)[4];
const int rowlimit = MIN(top+TS, height-2);
const int collimit = MIN(left+TS, width-2);
for (row = top; row < rowlimit; row++) {
col = left + (FC(row,left) & 1);
for (c = FC(row,col); col < collimit; col+=2) {
pix = image + row*width+col;
val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2
- pix[-2][c] - pix[2][c]) >> 2;
out_rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]);
val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2
- pix[-2*width][c] - pix[2*width][c]) >> 2;
out_rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]);
}
}
}
void CLASS ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][3], short (*out_lab)[TS][3])
{
unsigned row, col;
int c, val;
ushort (*pix)[4];
ushort (*rix)[3];
short (*lix)[3];
float xyz[3];
const unsigned num_pix_per_row = 4*width;
const unsigned rowlimit = MIN(top+TS-1, height-3);
const unsigned collimit = MIN(left+TS-1, width-3);
ushort *pix_above;
ushort *pix_below;
int t1, t2;
for (row = top+1; row < rowlimit; row++) {
pix = image + row*width + left;
rix = &inout_rgb[row-top][0];
lix = &out_lab[row-top][0];
for (col = left+1; col < collimit; col++) {
pix++;
pix_above = &pix[0][0] - num_pix_per_row;
pix_below = &pix[0][0] + num_pix_per_row;
rix++;
lix++;
c = 2 - FC(row, col);
if (c == 1) {
c = FC(row+1,col);
t1 = 2-c;
val = pix[0][1] + (( pix[-1][t1] + pix[1][t1]
- rix[-1][1] - rix[1][1] ) >> 1);
rix[0][t1] = CLIP(val);
val = pix[0][1] + (( pix_above[c] + pix_below[c]
- rix[-TS][1] - rix[TS][1] ) >> 1);
} else {
t1 = -4+c; /* -4+c: pixel of color c to the left */
t2 = 4+c; /* 4+c: pixel of color c to the right */
val = rix[0][1] + (( pix_above[t1] + pix_above[t2]
+ pix_below[t1] + pix_below[t2]
- rix[-TS-1][1] - rix[-TS+1][1]
- rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2);
}
rix[0][c] = CLIP(val);
c = FC(row,col);
rix[0][c] = pix[0][c];
cielab(rix[0],lix[0]);
}
}
}
void CLASS ahd_interpolate_r_and_b_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][TS][3], short (*out_lab)[TS][TS][3])
{
int direction;
for (direction = 0; direction < 2; direction++) {
ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(top, left, inout_rgb[direction], out_lab[direction]);
}
}
void CLASS ahd_interpolate_build_homogeneity_map(int top, int left, short (*lab)[TS][TS][3], char (*out_homogeneity_map)[TS][2])
{
int row, col;
int tr, tc;
int direction;
int i;
short (*lix)[3];
short (*lixs[2])[3];
short *adjacent_lix;
unsigned ldiff[2][4], abdiff[2][4], leps, abeps;
static const int dir[4] = { -1, 1, -TS, TS };
const int rowlimit = MIN(top+TS-2, height-4);
const int collimit = MIN(left+TS-2, width-4);
int homogeneity;
char (*homogeneity_map_p)[2];
memset (out_homogeneity_map, 0, 2*TS*TS);
for (row=top+2; row < rowlimit; row++) {
tr = row-top;
homogeneity_map_p = &out_homogeneity_map[tr][1];
for (direction=0; direction < 2; direction++) {
lixs[direction] = &lab[direction][tr][1];
}
for (col=left+2; col < collimit; col++) {
tc = col-left;
homogeneity_map_p++;
for (direction=0; direction < 2; direction++) {
lix = ++lixs[direction];
for (i=0; i < 4; i++) {
adjacent_lix = lix[dir[i]];
ldiff[direction][i] = ABS(lix[0][0]-adjacent_lix[0]);
abdiff[direction][i] = SQR(lix[0][1]-adjacent_lix[1])
+ SQR(lix[0][2]-adjacent_lix[2]);
}
}
leps = MIN(MAX(ldiff[0][0],ldiff[0][1]),
MAX(ldiff[1][2],ldiff[1][3]));
abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]),
MAX(abdiff[1][2],abdiff[1][3]));
for (direction=0; direction < 2; direction++) {
homogeneity = 0;
for (i=0; i < 4; i++) {
if (ldiff[direction][i] <= leps && abdiff[direction][i] <= abeps) {
homogeneity++;
}
}
homogeneity_map_p[0][direction] = homogeneity;
}
}
}
}
void CLASS ahd_interpolate_combine_homogeneous_pixels(int top, int left, ushort (*rgb)[TS][TS][3], char (*homogeneity_map)[TS][2])
{
int row, col;
int tr, tc;
int i, j;
int direction;
int hm[2];
int c;
const int rowlimit = MIN(top+TS-3, height-5);
const int collimit = MIN(left+TS-3, width-5);
ushort (*pix)[4];
ushort (*rix[2])[3];
for (row=top+3; row < rowlimit; row++) {
tr = row-top;
pix = &image[row*width+left+2];
for (direction = 0; direction < 2; direction++) {
rix[direction] = &rgb[direction][tr][2];
}
for (col=left+3; col < collimit; col++) {
tc = col-left;
pix++;
for (direction = 0; direction < 2; direction++) {
rix[direction]++;
}
for (direction=0; direction < 2; direction++) {
hm[direction] = 0;
for (i=tr-1; i <= tr+1; i++) {
for (j=tc-1; j <= tc+1; j++) {
hm[direction] += homogeneity_map[i][j][direction];
}
}
}
if (hm[0] != hm[1]) {
memcpy(pix[0], rix[hm[1] > hm[0]][0], 3 * sizeof(ushort));
} else {
FORC3 {
pix[0][c] = (rix[0][0][c] + rix[1][0][c]) >> 1;
}
}
}
}
}
void CLASS ahd_interpolate()
{
int i, j, k, top, left;
float xyz_cam[3][4],r;
char *buffer;
ushort (*rgb)[TS][TS][3];
short (*lab)[TS][TS][3];
char (*homo)[TS][2];
int terminate_flag = 0;
cielab(0,0);
border_interpolate(5);
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel private(buffer,rgb,lab,homo,top,left,i,j,k) shared(xyz_cam,terminate_flag)
#endif
#endif
{
buffer = (char *) malloc (26*TS*TS); /* 1664 kB */
merror (buffer, "ahd_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS);
homo = (char (*)[TS][2]) (buffer + 24*TS*TS);
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
#pragma omp for schedule(dynamic)
#endif
#endif
for (top=2; top < height-5; top += TS-6){
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
if(0== omp_get_thread_num())
#endif
if(callbacks.progress_cb) {
int rr = (*callbacks.progress_cb)(callbacks.progresscb_data,LIBRAW_PROGRESS_INTERPOLATE,top-2,height-7);
if(rr)
terminate_flag = 1;
}
#endif
for (left=2; !terminate_flag && (left < width-5); left += TS-6) {
ahd_interpolate_green_h_and_v(top, left, rgb);
ahd_interpolate_r_and_b_and_convert_to_cielab(top, left, rgb, lab);
ahd_interpolate_build_homogeneity_map(top, left, lab, homo);
ahd_interpolate_combine_homogeneous_pixels(top, left, rgb, homo);
}
}
free (buffer);
}
#ifdef LIBRAW_LIBRARY_BUILD
if(terminate_flag)
throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK;
#endif
}
#else
void CLASS ahd_interpolate()
{
int i, j, top, left, row, col, tr, tc, c, d, val, hm[2];
static const int dir[4] = { -1, 1, -TS, TS };
unsigned ldiff[2][4], abdiff[2][4], leps, abeps;
ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short (*lab)[TS][TS][3], (*lix)[3];
char (*homo)[TS][TS], *buffer;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("AHD interpolation...\n"));
#endif
cielab (0,0);
border_interpolate(5);
buffer = (char *) malloc (26*TS*TS);
merror (buffer, "ahd_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS);
homo = (char (*)[TS][TS]) (buffer + 24*TS*TS);
for (top=2; top < height-5; top += TS-6)
for (left=2; left < width-5; left += TS-6) {
/* Interpolate green horizontally and vertically: */
for (row=top; row < top+TS && row < height-2; row++) {
col = left + (FC(row,left) & 1);
for (c = FC(row,col); col < left+TS && col < width-2; col+=2) {
pix = image + row*width+col;
val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2
- pix[-2][c] - pix[2][c]) >> 2;
rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]);
val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2
- pix[-2*width][c] - pix[2*width][c]) >> 2;
rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]);
}
}
/* Interpolate red and blue, and convert to CIELab: */
for (d=0; d < 2; d++)
for (row=top+1; row < top+TS-1 && row < height-3; row++)
for (col=left+1; col < left+TS-1 && col < width-3; col++) {
pix = image + row*width+col;
rix = &rgb[d][row-top][col-left];
lix = &lab[d][row-top][col-left];
if ((c = 2 - FC(row,col)) == 1) {
c = FC(row+1,col);
val = pix[0][1] + (( pix[-1][2-c] + pix[1][2-c]
- rix[-1][1] - rix[1][1] ) >> 1);
rix[0][2-c] = CLIP(val);
val = pix[0][1] + (( pix[-width][c] + pix[width][c]
- rix[-TS][1] - rix[TS][1] ) >> 1);
} else
val = rix[0][1] + (( pix[-width-1][c] + pix[-width+1][c]
+ pix[+width-1][c] + pix[+width+1][c]
- rix[-TS-1][1] - rix[-TS+1][1]
- rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2);
rix[0][c] = CLIP(val);
c = FC(row,col);
rix[0][c] = pix[0][c];
cielab (rix[0],lix[0]);
}
/* Build homogeneity maps from the CIELab images: */
memset (homo, 0, 2*TS*TS);
for (row=top+2; row < top+TS-2 && row < height-4; row++) {
tr = row-top;
for (col=left+2; col < left+TS-2 && col < width-4; col++) {
tc = col-left;
for (d=0; d < 2; d++) {
lix = &lab[d][tr][tc];
for (i=0; i < 4; i++) {
ldiff[d][i] = ABS(lix[0][0]-lix[dir[i]][0]);
abdiff[d][i] = SQR(lix[0][1]-lix[dir[i]][1])
+ SQR(lix[0][2]-lix[dir[i]][2]);
}
}
leps = MIN(MAX(ldiff[0][0],ldiff[0][1]),
MAX(ldiff[1][2],ldiff[1][3]));
abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]),
MAX(abdiff[1][2],abdiff[1][3]));
for (d=0; d < 2; d++)
for (i=0; i < 4; i++)
if (ldiff[d][i] <= leps && abdiff[d][i] <= abeps)
homo[d][tr][tc]++;
}
}
/* Combine the most homogenous pixels for the final result: */
for (row=top+3; row < top+TS-3 && row < height-5; row++) {
tr = row-top;
for (col=left+3; col < left+TS-3 && col < width-5; col++) {
tc = col-left;
for (d=0; d < 2; d++)
for (hm[d]=0, i=tr-1; i <= tr+1; i++)
for (j=tc-1; j <= tc+1; j++)
hm[d] += homo[d][i][j];
if (hm[0] != hm[1])
FORC3 image[row*width+col][c] = rgb[hm[1] > hm[0]][tr][tc][c];
else
FORC3 image[row*width+col][c] =
(rgb[0][tr][tc][c] + rgb[1][tr][tc][c]) >> 1;
}
}
}
free (buffer);
}
#endif
#undef TS
void CLASS median_filter()
{
ushort (*pix)[4];
int pass, c, i, j, k, med[9];
static const uchar opt[] = /* Optimal 9-element median search */
{ 1,2, 4,5, 7,8, 0,1, 3,4, 6,7, 1,2, 4,5, 7,8,
0,3, 5,8, 4,7, 3,6, 1,4, 2,5, 4,7, 4,2, 6,4, 4,2 };
for (pass=1; pass <= med_passes; pass++) {
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_MEDIAN_FILTER,pass-1,med_passes);
#endif
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Median filter pass %d...\n"), pass);
#endif
for (c=0; c < 3; c+=2) {
for (pix = image; pix < image+width*height; pix++)
pix[0][3] = pix[0][c];
for (pix = image+width; pix < image+width*(height-1); pix++) {
if ((pix-image+1) % width < 2) continue;
for (k=0, i = -width; i <= width; i += width)
for (j = i-1; j <= i+1; j++)
med[k++] = pix[j][3] - pix[j][1];
for (i=0; i < sizeof opt; i+=2)
if (med[opt[i]] > med[opt[i+1]])
SWAP (med[opt[i]] , med[opt[i+1]]);
pix[0][c] = CLIP(med[4] + pix[0][1]);
}
}
}
}
void CLASS blend_highlights()
{
int clip=INT_MAX, row, col, c, i, j;
static const float trans[2][4][4] =
{ { { 1,1,1 }, { 1.7320508,-1.7320508,0 }, { -1,-1,2 } },
{ { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } };
static const float itrans[2][4][4] =
{ { { 1,0.8660254,-0.5 }, { 1,-0.8660254,-0.5 }, { 1,0,1 } },
{ { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } };
float cam[2][4], lab[2][4], sum[2], chratio;
if ((unsigned) (colors-3) > 1) return;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Blending highlights...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,0,2);
#endif
FORCC if (clip > (i = 65535*pre_mul[c])) clip = i;
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
FORCC if (image[row*width+col][c] > clip) break;
if (c == colors) continue;
FORCC {
cam[0][c] = image[row*width+col][c];
cam[1][c] = MIN(cam[0][c],clip);
}
for (i=0; i < 2; i++) {
FORCC for (lab[i][c]=j=0; j < colors; j++)
lab[i][c] += trans[colors-3][c][j] * cam[i][j];
for (sum[i]=0,c=1; c < colors; c++)
sum[i] += SQR(lab[i][c]);
}
chratio = sqrt(sum[1]/sum[0]);
for (c=1; c < colors; c++)
lab[0][c] *= chratio;
FORCC for (cam[0][c]=j=0; j < colors; j++)
cam[0][c] += itrans[colors-3][c][j] * lab[0][j];
FORCC image[row*width+col][c] = cam[0][c] / colors;
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,1,2);
#endif
}
#define SCALE (4 >> shrink)
void CLASS recover_highlights()
{
float *map, sum, wgt, grow;
int hsat[4], count, spread, change, val, i;
unsigned high, wide, mrow, mcol, row, col, kc, c, d, y, x;
ushort *pixel;
static const signed char dir[8][2] =
{ {-1,-1}, {-1,0}, {-1,1}, {0,1}, {1,1}, {1,0}, {1,-1}, {0,-1} };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Rebuilding highlights...\n"));
#endif
grow = pow (2.0, 4-highlight);
FORCC hsat[c] = 32000 * pre_mul[c];
for (kc=0, c=1; c < colors; c++)
if (pre_mul[kc] < pre_mul[c]) kc = c;
high = height / SCALE;
wide = width / SCALE;
map = (float *) calloc (high, wide*sizeof *map);
merror (map, "recover_highlights()");
FORCC if (c != kc) {
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,c-1,colors-1);
#endif
memset (map, 0, high*wide*sizeof *map);
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
sum = wgt = count = 0;
for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++)
for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) {
pixel = image[row*width+col];
if (pixel[c] / hsat[c] == 1 && pixel[kc] > 24000) {
sum += pixel[c];
wgt += pixel[kc];
count++;
}
}
if (count == SCALE*SCALE)
map[mrow*wide+mcol] = sum / wgt;
}
for (spread = 32/grow; spread--; ) {
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
if (map[mrow*wide+mcol]) continue;
sum = count = 0;
for (d=0; d < 8; d++) {
y = mrow + dir[d][0];
x = mcol + dir[d][1];
if (y < high && x < wide && map[y*wide+x] > 0) {
sum += (1 + (d & 1)) * map[y*wide+x];
count += 1 + (d & 1);
}
}
if (count > 3)
map[mrow*wide+mcol] = - (sum+grow) / (count+grow);
}
for (change=i=0; i < high*wide; i++)
if (map[i] < 0) {
map[i] = -map[i];
change = 1;
}
if (!change) break;
}
for (i=0; i < high*wide; i++)
if (map[i] == 0) map[i] = 1;
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++)
for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) {
pixel = image[row*width+col];
if (pixel[c] / hsat[c] > 1) {
val = pixel[kc] * map[mrow*wide+mcol];
if (pixel[c] < val) pixel[c] = CLIP(val);
}
}
}
}
free (map);
}
#undef SCALE
void CLASS tiff_get (unsigned base,
unsigned *tag, unsigned *type, unsigned *len, unsigned *save)
{
*tag = get2();
*type = get2();
*len = get4();
*save = ftell(ifp) + 4;
if (*len * ("11124811248488"[*type < 14 ? *type:0]-'0') > 4)
fseek (ifp, get4()+base, SEEK_SET);
}
void CLASS parse_thumb_note (int base, unsigned toff, unsigned tlen)
{
unsigned entries, tag, type, len, save;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
if (tag == toff) thumb_offset = get4()+base;
if (tag == tlen) thumb_length = get4();
fseek (ifp, save, SEEK_SET);
}
}
#line 5968 "dcraw/dcraw.c"
void CLASS parse_makernote (int base, int uptag)
{
static const uchar xlat[2][256] = {
{ 0xc1,0xbf,0x6d,0x0d,0x59,0xc5,0x13,0x9d,0x83,0x61,0x6b,0x4f,0xc7,0x7f,0x3d,0x3d,
0x53,0x59,0xe3,0xc7,0xe9,0x2f,0x95,0xa7,0x95,0x1f,0xdf,0x7f,0x2b,0x29,0xc7,0x0d,
0xdf,0x07,0xef,0x71,0x89,0x3d,0x13,0x3d,0x3b,0x13,0xfb,0x0d,0x89,0xc1,0x65,0x1f,
0xb3,0x0d,0x6b,0x29,0xe3,0xfb,0xef,0xa3,0x6b,0x47,0x7f,0x95,0x35,0xa7,0x47,0x4f,
0xc7,0xf1,0x59,0x95,0x35,0x11,0x29,0x61,0xf1,0x3d,0xb3,0x2b,0x0d,0x43,0x89,0xc1,
0x9d,0x9d,0x89,0x65,0xf1,0xe9,0xdf,0xbf,0x3d,0x7f,0x53,0x97,0xe5,0xe9,0x95,0x17,
0x1d,0x3d,0x8b,0xfb,0xc7,0xe3,0x67,0xa7,0x07,0xf1,0x71,0xa7,0x53,0xb5,0x29,0x89,
0xe5,0x2b,0xa7,0x17,0x29,0xe9,0x4f,0xc5,0x65,0x6d,0x6b,0xef,0x0d,0x89,0x49,0x2f,
0xb3,0x43,0x53,0x65,0x1d,0x49,0xa3,0x13,0x89,0x59,0xef,0x6b,0xef,0x65,0x1d,0x0b,
0x59,0x13,0xe3,0x4f,0x9d,0xb3,0x29,0x43,0x2b,0x07,0x1d,0x95,0x59,0x59,0x47,0xfb,
0xe5,0xe9,0x61,0x47,0x2f,0x35,0x7f,0x17,0x7f,0xef,0x7f,0x95,0x95,0x71,0xd3,0xa3,
0x0b,0x71,0xa3,0xad,0x0b,0x3b,0xb5,0xfb,0xa3,0xbf,0x4f,0x83,0x1d,0xad,0xe9,0x2f,
0x71,0x65,0xa3,0xe5,0x07,0x35,0x3d,0x0d,0xb5,0xe9,0xe5,0x47,0x3b,0x9d,0xef,0x35,
0xa3,0xbf,0xb3,0xdf,0x53,0xd3,0x97,0x53,0x49,0x71,0x07,0x35,0x61,0x71,0x2f,0x43,
0x2f,0x11,0xdf,0x17,0x97,0xfb,0x95,0x3b,0x7f,0x6b,0xd3,0x25,0xbf,0xad,0xc7,0xc5,
0xc5,0xb5,0x8b,0xef,0x2f,0xd3,0x07,0x6b,0x25,0x49,0x95,0x25,0x49,0x6d,0x71,0xc7 },
{ 0xa7,0xbc,0xc9,0xad,0x91,0xdf,0x85,0xe5,0xd4,0x78,0xd5,0x17,0x46,0x7c,0x29,0x4c,
0x4d,0x03,0xe9,0x25,0x68,0x11,0x86,0xb3,0xbd,0xf7,0x6f,0x61,0x22,0xa2,0x26,0x34,
0x2a,0xbe,0x1e,0x46,0x14,0x68,0x9d,0x44,0x18,0xc2,0x40,0xf4,0x7e,0x5f,0x1b,0xad,
0x0b,0x94,0xb6,0x67,0xb4,0x0b,0xe1,0xea,0x95,0x9c,0x66,0xdc,0xe7,0x5d,0x6c,0x05,
0xda,0xd5,0xdf,0x7a,0xef,0xf6,0xdb,0x1f,0x82,0x4c,0xc0,0x68,0x47,0xa1,0xbd,0xee,
0x39,0x50,0x56,0x4a,0xdd,0xdf,0xa5,0xf8,0xc6,0xda,0xca,0x90,0xca,0x01,0x42,0x9d,
0x8b,0x0c,0x73,0x43,0x75,0x05,0x94,0xde,0x24,0xb3,0x80,0x34,0xe5,0x2c,0xdc,0x9b,
0x3f,0xca,0x33,0x45,0xd0,0xdb,0x5f,0xf5,0x52,0xc3,0x21,0xda,0xe2,0x22,0x72,0x6b,
0x3e,0xd0,0x5b,0xa8,0x87,0x8c,0x06,0x5d,0x0f,0xdd,0x09,0x19,0x93,0xd0,0xb9,0xfc,
0x8b,0x0f,0x84,0x60,0x33,0x1c,0x9b,0x45,0xf1,0xf0,0xa3,0x94,0x3a,0x12,0x77,0x33,
0x4d,0x44,0x78,0x28,0x3c,0x9e,0xfd,0x65,0x57,0x16,0x94,0x6b,0xfb,0x59,0xd0,0xc8,
0x22,0x36,0xdb,0xd2,0x63,0x98,0x43,0xa1,0x04,0x87,0x86,0xf7,0xa6,0x26,0xbb,0xd6,
0x59,0x4d,0xbf,0x6a,0x2e,0xaa,0x2b,0xef,0xe6,0x78,0xb6,0x4e,0xe0,0x2f,0xdc,0x7c,
0xbe,0x57,0x19,0x32,0x7e,0x2a,0xd0,0xb8,0xba,0x29,0x00,0x3c,0x52,0x7d,0xa8,0x49,
0x3b,0x2d,0xeb,0x25,0x49,0xfa,0xa3,0xaa,0x39,0xa7,0xc5,0xa7,0x50,0x11,0x36,0xfb,
0xc6,0x67,0x4a,0xf5,0xa5,0x12,0x65,0x7e,0xb0,0xdf,0xaf,0x4e,0xb3,0x61,0x7f,0x2f } };
unsigned offset=0, entries, tag, type, len, save, c;
unsigned ver97=0, serial=0, i, wbi=0, wb[4]={0,0,0,0};
uchar buf97[324], ci, cj, ck;
short morder, sorder=order;
char buf[10];
unsigned SamsungKey[11];
static const double rgb_adobe[3][3] = // inv(sRGB2XYZ_D65) * AdobeRGB2XYZ_D65
{{ 1.398283396477404, -0.398283116703571, 4.427165001263944E-08},
{-1.233904514232401E-07, 0.999999995196570, 3.126724276714121e-08},
{ 4.561487232726535E-08, -0.042938290466635, 1.042938250416105 }};
float adobe_cam [3][3];
/*
The MakerNote might have its own TIFF header (possibly with
its own byte-order!), or it might just be a table.
*/
if (!strcmp(make,"Nokia")) return;
fread (buf, 1, 10, ifp);
if (!strncmp (buf,"KDK" ,3) || /* these aren't TIFF tables */
!strncmp (buf,"VER" ,3) ||
!strncmp (buf,"IIII",4) ||
!strncmp (buf,"MMMM",4)) return;
if (!strncmp (buf,"KC" ,2) || /* Konica KD-400Z, KD-510Z */
!strncmp (buf,"MLY" ,3)) { /* Minolta DiMAGE G series */
order = 0x4d4d;
while ((i=ftell(ifp)) < data_offset && i < 16384) {
wb[0] = wb[2]; wb[2] = wb[1]; wb[1] = wb[3];
wb[3] = get2();
if (wb[1] == 256 && wb[3] == 256 &&
wb[0] > 256 && wb[0] < 640 && wb[2] > 256 && wb[2] < 640)
FORC4 cam_mul[c] = wb[c];
}
goto quit;
}
if (!strcmp (buf,"Nikon")) {
base = ftell(ifp);
order = get2();
if (get2() != 42) goto quit;
offset = get4();
fseek (ifp, offset-8, SEEK_CUR);
} else if (!strcmp (buf,"OLYMPUS")) {
base = ftell(ifp)-10;
fseek (ifp, -2, SEEK_CUR);
order = get2(); get2();
} else if (!strncmp (buf,"SONY",4) ||
!strcmp (buf,"Panasonic")) {
goto nf;
} else if (!strncmp (buf,"FUJIFILM",8)) {
base = ftell(ifp)-10;
nf: order = 0x4949;
fseek (ifp, 2, SEEK_CUR);
} else if (!strcmp (buf,"OLYMP") ||
!strcmp (buf,"LEICA") ||
!strcmp (buf,"Ricoh") ||
!strcmp (buf,"EPSON"))
fseek (ifp, -2, SEEK_CUR);
else if (!strcmp (buf,"AOC") ||
!strcmp (buf,"QVC"))
fseek (ifp, -4, SEEK_CUR);
else {
fseek (ifp, -10, SEEK_CUR);
if (!strncmp(make,"SAMSUNG",7))
base = ftell(ifp);
}
entries = get2();
if (entries > 1000) return;
morder = order;
while (entries--) {
order = morder;
tiff_get (base, &tag, &type, &len, &save);
tag |= uptag << 16;
if (tag == 2 && strstr(make,"NIKON") && !iso_speed)
iso_speed = (get2(),get2());
if (tag == 37 && strstr(make,"NIKON") && !iso_speed)
{
unsigned char cc;
fread(&cc,1,1,ifp);
iso_speed = int(100.0 * pow(2.0,double(cc)/12.0-5.0));
}
if (tag == 4 && len > 26 && len < 35) {
if ((i=(get4(),get2())) != 0x7fff && !iso_speed)
iso_speed = 50 * pow (2.0, i/32.0 - 4);
if ((i=(get2(),get2())) != 0x7fff && !aperture)
aperture = pow (2.0, i/64.0);
if ((i=get2()) != 0xffff && !shutter)
shutter = pow (2.0, (short) i/-32.0);
wbi = (get2(),get2());
shot_order = (get2(),get2());
}
if ((tag == 4 || tag == 0x114) && !strncmp(make,"KONICA",6)) {
fseek (ifp, tag == 4 ? 140:160, SEEK_CUR);
switch (get2()) {
case 72: flip = 0; break;
case 76: flip = 6; break;
case 82: flip = 5; break;
}
}
if (tag == 7 && type == 2 && len > 20)
fgets (model2, 64, ifp);
if (tag == 8 && type == 4)
shot_order = get4();
if (tag == 9 && !strcmp(make,"Canon"))
fread (artist, 64, 1, ifp);
if (tag == 0xc && len == 4)
FORC3 cam_mul[(c << 1 | c >> 1) & 3] = getreal(type);
if (tag == 0xd && type == 7 && get2() == 0xaaaa) {
for (c=i=2; (ushort) c != 0xbbbb && i < len; i++)
c = c << 8 | fgetc(ifp);
while ((i+=4) < len-5)
if (get4() == 257 && (i=len) && (c = (get4(),fgetc(ifp))) < 3)
flip = "065"[c]-'0';
}
if (tag == 0x10 && type == 4)
unique_id = get4();
if (tag == 0x11 && is_raw && !strncmp(make,"NIKON",5)) {
fseek (ifp, get4()+base, SEEK_SET);
parse_tiff_ifd (base);
}
if (tag == 0x14 && type == 7) {
if (len == 2560) {
fseek (ifp, 1248, SEEK_CUR);
goto get2_256;
}
fread (buf, 1, 10, ifp);
if (!strncmp(buf,"NRW ",4)) {
fseek (ifp, strcmp(buf+4,"0100") ? 46:1546, SEEK_CUR);
cam_mul[0] = get4() << 2;
cam_mul[1] = get4() + get4();
cam_mul[2] = get4() << 2;
}
}
if (tag == 0x15 && type == 2 && is_raw)
fread (model, 64, 1, ifp);
if (strstr(make,"PENTAX")) {
if (tag == 0x1b) tag = 0x1018;
if (tag == 0x1c) tag = 0x1017;
}
if (tag == 0x1d)
while ((c = fgetc(ifp)) && c != EOF)
serial = serial*10 + (isdigit(c) ? c - '0' : c % 10);
if (tag == 0x81 && type == 4) {
data_offset = get4();
fseek (ifp, data_offset + 41, SEEK_SET);
raw_height = get2() * 2;
raw_width = get2();
filters = 0x61616161;
}
if (tag == 0x29 && type == 1) {
c = wbi < 18 ? "012347800000005896"[wbi]-'0' : 0;
fseek (ifp, 8 + c*32, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get4();
}
if ((tag == 0x81 && type == 7) ||
(tag == 0x100 && type == 7) ||
(tag == 0x280 && type == 1)) {
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (tag == 0x88 && type == 4 && (thumb_offset = get4()))
thumb_offset += base;
if (tag == 0x89 && type == 4)
thumb_length = get4();
if (tag == 0x8c || tag == 0x96)
meta_offset = ftell(ifp);
if (tag == 0x97) {
for (i=0; i < 4; i++)
ver97 = ver97 * 10 + fgetc(ifp)-'0';
switch (ver97) {
case 100:
fseek (ifp, 68, SEEK_CUR);
FORC4 cam_mul[(c >> 1) | ((c & 1) << 1)] = get2();
break;
case 102:
fseek (ifp, 6, SEEK_CUR);
goto get2_rggb;
case 103:
fseek (ifp, 16, SEEK_CUR);
FORC4 cam_mul[c] = get2();
}
if (ver97 >= 200) {
if (ver97 != 205) fseek (ifp, 280, SEEK_CUR);
fread (buf97, 324, 1, ifp);
}
}
if (tag == 0xa1 && type == 7) {
order = 0x4949;
fseek (ifp, 140, SEEK_CUR);
FORC3 cam_mul[c] = get4();
}
if (tag == 0xa4 && type == 3) {
fseek (ifp, wbi*48, SEEK_CUR);
FORC3 cam_mul[c] = get2();
}
if (tag == 0xa7 && (unsigned) (ver97-200) < 17) {
ci = xlat[0][serial & 0xff];
cj = xlat[1][fgetc(ifp)^fgetc(ifp)^fgetc(ifp)^fgetc(ifp)];
ck = 0x60;
for (i=0; i < 324; i++)
buf97[i] ^= (cj += ci * ck++);
i = "66666>666;6A;:;55"[ver97-200] - '0';
FORC4 cam_mul[c ^ (c >> 1) ^ (i & 1)] =
sget2 (buf97 + (i & -2) + c*2);
}
if(tag == 0xb001 && type == 3)
{
unique_id = get2();
}
if (tag == 0x200 && len == 3)
shot_order = (get4(),get4());
if (tag == 0x200 && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x201 && len == 4)
goto get2_rggb;
if (tag == 0x220 && type == 7)
meta_offset = ftell(ifp);
if (tag == 0x401 && type == 4 && len == 4)
FORC4 cblack[c ^ c >> 1] = get4();
if (tag == 0x03d && strstr(make,"NIKON") && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0xe01) { /* Nikon Capture Note */
order = 0x4949;
fseek (ifp, 22, SEEK_CUR);
for (offset=22; offset+22 < len; offset += 22+i) {
tag = get4();
fseek (ifp, 14, SEEK_CUR);
i = get4()-4;
if (tag == 0x76a43207) flip = get2();
else fseek (ifp, i, SEEK_CUR);
}
}
if (tag == 0xe80 && len == 256 && type == 7) {
fseek (ifp, 48, SEEK_CUR);
cam_mul[0] = get2() * 508 * 1.078 / 0x10000;
cam_mul[2] = get2() * 382 * 1.173 / 0x10000;
}
if (tag == 0xf00 && type == 7) {
if (len == 614)
fseek (ifp, 176, SEEK_CUR);
else if (len == 734 || len == 1502)
fseek (ifp, 148, SEEK_CUR);
else goto next;
goto get2_256;
}
if ((tag == 0x1011 && len == 9) || tag == 0x20400200)
{
if(!strcasecmp(make,"Olympus"))
{
int j,k;
for (i=0; i < 3; i++)
FORC3 adobe_cam[i][c] = ((short) get2()) / 256.0;
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
for (cmatrix[i][j] = k=0; k < 3; k++)
cmatrix[i][j] += rgb_adobe[i][k] * adobe_cam[k][j];
}
else
for (i=0; i < 3; i++)
FORC3 cmatrix[i][c] = ((short) get2()) / 256.0;
}
if ((tag == 0x1012 || tag == 0x20400600) && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x1017 || tag == 0x20400100)
cam_mul[0] = get2() / 256.0;
if (tag == 0x1018 || tag == 0x20400100)
cam_mul[2] = get2() / 256.0;
if (tag == 0x2011 && len == 2) {
get2_256:
order = 0x4d4d;
cam_mul[0] = get2() / 256.0;
cam_mul[2] = get2() / 256.0;
}
if ((tag | 0x70) == 0x2070 && type == 4)
fseek (ifp, get4()+base, SEEK_SET);
if (tag == 0x2020)
parse_thumb_note (base, 257, 258);
if (tag == 0x2040)
parse_makernote (base, 0x2040);
if (tag == 0xb028) {
fseek (ifp, get4()+base, SEEK_SET);
parse_thumb_note (base, 136, 137);
}
if (tag == 0x4001 && len > 500) {
i = len == 582 ? 50 : len == 653 ? 68 : len == 5120 ? 142 : 126;
fseek (ifp, i, SEEK_CUR);
get2_rggb:
FORC4 cam_mul[c ^ (c >> 1)] = get2();
i = len >> 3 == 164 ? 112:22;
fseek (ifp, i, SEEK_CUR);
FORC4 sraw_mul[c ^ (c >> 1)] = get2();
}
if(!strcasecmp(make,"Samsung"))
{
if (tag == 0xa020) // get the full Samsung encryption key
for (i=0; i<11; i++) SamsungKey[i] = get4();
if (tag == 0xa021) // get and decode Samsung cam_mul array
FORC4 cam_mul[c ^ (c >> 1)] = get4() - SamsungKey[c];
if (tag == 0xa030 && len == 9) // get and decode Samsung color matrix
for (i=0; i < 3; i++)
FORC3 cmatrix[i][c] = (short)((get4() + SamsungKey[i*3+c]))/256.0;
if (tag == 0xa028)
FORC4 cblack[c ^ (c >> 1)] = get4() - SamsungKey[c];
}
else
{
// Somebody else use 0xa021 and 0xa028?
if (tag == 0xa021)
FORC4 cam_mul[c ^ (c >> 1)] = get4();
if (tag == 0xa028)
FORC4 cam_mul[c ^ (c >> 1)] -= get4();
}
next:
fseek (ifp, save, SEEK_SET);
}
quit:
order = sorder;
}
/*
Since the TIFF DateTime string has no timezone information,
assume that the camera's clock was set to Universal Time.
*/
void CLASS get_timestamp (int reversed)
{
struct tm t;
char str[20];
int i;
str[19] = 0;
if (reversed)
for (i=19; i--; ) str[i] = fgetc(ifp);
else
fread (str, 19, 1, ifp);
memset (&t, 0, sizeof t);
if (sscanf (str, "%d:%d:%d %d:%d:%d", &t.tm_year, &t.tm_mon,
&t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec) != 6)
return;
t.tm_year -= 1900;
t.tm_mon -= 1;
t.tm_isdst = -1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
void CLASS parse_exif (int base)
{
unsigned kodak, entries, tag, type, len, save, c;
double expo;
kodak = !strncmp(make,"EASTMAN",7) && tiff_nifds < 3;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
switch (tag) {
case 33434: shutter = getreal(type); break;
case 33437: aperture = getreal(type); break;
case 34855: iso_speed = get2(); break;
case 36867:
case 36868: get_timestamp(0); break;
case 37377: if ((expo = -getreal(type)) < 128)
shutter = pow (2.0, expo); break;
case 37378: aperture = pow (2.0, getreal(type)/2); break;
case 37386: focal_len = getreal(type); break;
case 37500: parse_makernote (base, 0); break;
case 40962: if (kodak) raw_width = get4(); break;
case 40963: if (kodak) raw_height = get4(); break;
case 41730:
if (get4() == 0x20002)
for (exif_cfa=c=0; c < 8; c+=2)
exif_cfa |= fgetc(ifp) * 0x01010101 << c;
}
fseek (ifp, save, SEEK_SET);
}
}
void CLASS parse_gps (int base)
{
unsigned entries, tag, type, len, save, c;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
switch (tag) {
case 1: case 3: case 5:
gpsdata[29+tag/2] = getc(ifp); break;
case 2: case 4: case 7:
FORC(6) gpsdata[tag/3*6+c] = get4(); break;
case 6:
FORC(2) gpsdata[18+c] = get4(); break;
case 18: case 29:
fgets ((char *) (gpsdata+14+tag/3), MIN(len,12), ifp);
}
fseek (ifp, save, SEEK_SET);
}
}
void CLASS romm_coeff (float romm_cam[3][3])
{
static const float rgb_romm[3][3] = /* ROMM == Kodak ProPhoto */
{ { 2.034193, -0.727420, -0.306766 },
{ -0.228811, 1.231729, -0.002922 },
{ -0.008565, -0.153273, 1.161839 } };
int i, j, k;
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
for (cmatrix[i][j] = k=0; k < 3; k++)
cmatrix[i][j] += rgb_romm[i][k] * romm_cam[k][j];
}
void CLASS parse_mos (int offset)
{
char data[40];
int skip, from, i, c, neut[4], planes=0, frot=0;
static const char *mod[] =
{ "","DCB2","Volare","Cantare","CMost","Valeo 6","Valeo 11","Valeo 22",
"Valeo 11p","Valeo 17","","Aptus 17","Aptus 22","Aptus 75","Aptus 65",
"Aptus 54S","Aptus 65S","Aptus 75S","AFi 5","AFi 6","AFi 7",
"","","","","","","","","","","","","","","","","","AFi-II 12" };
float romm_cam[3][3];
fseek (ifp, offset, SEEK_SET);
while (1) {
if (get4() != 0x504b5453) break;
get4();
fread (data, 1, 40, ifp);
skip = get4();
from = ftell(ifp);
if (!strcmp(data,"JPEG_preview_data")) {
thumb_offset = from;
thumb_length = skip;
}
if (!strcmp(data,"icc_camera_profile")) {
profile_offset = from;
profile_length = skip;
}
if (!strcmp(data,"ShootObj_back_type")) {
fscanf (ifp, "%d", &i);
if ((unsigned) i < sizeof mod / sizeof (*mod))
strcpy (model, mod[i]);
}
if (!strcmp(data,"icc_camera_to_tone_matrix")) {
for (i=0; i < 9; i++)
romm_cam[0][i] = int_to_float(get4());
romm_coeff (romm_cam);
}
if (!strcmp(data,"CaptProf_color_matrix")) {
for (i=0; i < 9; i++)
fscanf (ifp, "%f", &romm_cam[0][i]);
romm_coeff (romm_cam);
}
if (!strcmp(data,"CaptProf_number_of_planes"))
fscanf (ifp, "%d", &planes);
if (!strcmp(data,"CaptProf_raw_data_rotation"))
fscanf (ifp, "%d", &flip);
if (!strcmp(data,"CaptProf_mosaic_pattern"))
FORC4 {
fscanf (ifp, "%d", &i);
if (i == 1) frot = c ^ (c >> 1);
}
if (!strcmp(data,"ImgProf_rotation_angle")) {
fscanf (ifp, "%d", &i);
flip = i - flip;
}
if (!strcmp(data,"NeutObj_neutrals") && !cam_mul[0]) {
FORC4 fscanf (ifp, "%d", neut+c);
FORC3 cam_mul[c] = (float) neut[0] / neut[c+1];
}
if (!strcmp(data,"Rows_data"))
load_flags = get4();
parse_mos (from);
fseek (ifp, skip+from, SEEK_SET);
}
if (planes)
filters = (planes == 1) * 0x01010101 *
(uchar) "\x94\x61\x16\x49"[(flip/90 + frot) & 3];
}
void CLASS linear_table (unsigned len)
{
int i;
if (len > 0x1000) len = 0x1000;
read_shorts (curve, len);
for (i=len; i < 0x1000; i++)
curve[i] = curve[i-1];
maximum = curve[0xfff];
}
void CLASS parse_kodak_ifd (int base)
{
unsigned entries, tag, type, len, save;
int i, c, wbi=-2, wbtemp=6500;
float mul[3]={1,1,1}, num;
static const int wbtag[] = { 64037,64040,64039,64041,-1,-1,64042 };
entries = get2();
if (entries > 1024) return;
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
if (tag == 1020) wbi = getint(type);
if (tag == 1021 && len == 72) { /* WB set in software */
fseek (ifp, 40, SEEK_CUR);
FORC3 cam_mul[c] = 2048.0 / get2();
wbi = -2;
}
if (tag == 2118) wbtemp = getint(type);
if (tag == 2130 + wbi)
FORC3 mul[c] = getreal(type);
if (tag == 2140 + wbi && wbi >= 0)
FORC3 {
for (num=i=0; i < 4; i++)
num += getreal(type) * pow (wbtemp/100.0, i);
cam_mul[c] = 2048 / (num * mul[c]);
}
if (tag == 2317) linear_table (len);
if (tag == 6020) iso_speed = getint(type);
if (tag == 64013) wbi = fgetc(ifp);
if ((unsigned) wbi < 7 && tag == wbtag[wbi])
FORC3 cam_mul[c] = get4();
if (tag == 64019) width = getint(type);
if (tag == 64020) height = (getint(type)+1) & -2;
fseek (ifp, save, SEEK_SET);
}
}
#line 6533 "dcraw/dcraw.c"
int CLASS parse_tiff_ifd (int base)
{
unsigned entries, tag, type, len, plen=16, save;
int ifd, use_cm=0, cfa, i, j, c, ima_len=0;
int blrr=1, blrc=1, dblack[] = { 0,0,0,0 };
char software[64], *cbuf, *cp;
uchar cfa_pat[16], cfa_pc[] = { 0,1,2,3 }, tab[256];
double cc[4][4], cm[4][3], cam_xyz[4][3], num;
double ab[]={ 1,1,1,1 }, asn[] = { 0,0,0,0 }, xyz[] = { 1,1,1 };
unsigned sony_curve[] = { 0,0,0,0,0,4095 };
unsigned *buf, sony_offset=0, sony_length=0, sony_key=0;
struct jhead jh;
#ifndef LIBRAW_LIBRARY_BUILD
FILE *sfp;
#endif
if (tiff_nifds >= sizeof tiff_ifd / sizeof tiff_ifd[0])
return 1;
ifd = tiff_nifds++;
for (j=0; j < 4; j++)
for (i=0; i < 4; i++)
cc[j][i] = i == j;
entries = get2();
if (entries > 512) return 1;
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
switch (tag) {
case 5: width = get2(); break;
case 6: height = get2(); break;
case 7: width += get2(); break;
case 9: if ((i = get2())) filters = i; break;
case 17: case 18:
if (type == 3 && len == 1)
cam_mul[(tag-17)*2] = get2() / 256.0;
break;
case 23:
if (type == 3) iso_speed = get2();
break;
case 36: case 37: case 38:
cam_mul[tag-0x24] = get2();
break;
case 39:
if (len < 50 || cam_mul[0]) break;
fseek (ifp, 12, SEEK_CUR);
FORC3 cam_mul[c] = get2();
break;
case 46:
if (type != 7 || fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) break;
thumb_offset = ftell(ifp) - 2;
thumb_length = len;
break;
case 61440: /* Fuji HS10 table */
parse_tiff_ifd (base);
break;
case 2: case 256: case 61441: /* ImageWidth */
tiff_ifd[ifd].t_width = getint(type);
break;
case 3: case 257: case 61442: /* ImageHeight */
tiff_ifd[ifd].t_height = getint(type);
break;
case 258: /* BitsPerSample */
case 61443:
tiff_ifd[ifd].samples = len & 7;
tiff_ifd[ifd].bps = getint(type);
break;
case 61446:
raw_height = 0;
if (tiff_ifd[ifd].bps > 12) break;
load_raw = &CLASS packed_load_raw;
load_flags = get4() ? 24:80;
break;
case 259: /* Compression */
tiff_ifd[ifd].comp = getint(type);
break;
case 262: /* PhotometricInterpretation */
tiff_ifd[ifd].phint = get2();
break;
case 270: /* ImageDescription */
fread (desc, 512, 1, ifp);
break;
case 271: /* Make */
fgets (make, 64, ifp);
break;
case 272: /* Model */
fgets (model, 64, ifp);
break;
case 280: /* Panasonic RW2 offset */
if (type != 4) break;
load_raw = &CLASS panasonic_load_raw;
load_flags = 0x2008;
case 273: /* StripOffset */
case 513: /* JpegIFOffset */
case 61447:
tiff_ifd[ifd].offset = get4()+base;
if (!tiff_ifd[ifd].bps && tiff_ifd[ifd].offset > 0) {
fseek (ifp, tiff_ifd[ifd].offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
tiff_ifd[ifd].comp = 6;
tiff_ifd[ifd].t_width = jh.wide;
tiff_ifd[ifd].t_height = jh.high;
tiff_ifd[ifd].bps = jh.bits;
tiff_ifd[ifd].samples = jh.clrs;
if (!(jh.sraw || (jh.clrs & 1)))
tiff_ifd[ifd].t_width *= jh.clrs;
i = order;
parse_tiff (tiff_ifd[ifd].offset + 12);
order = i;
}
}
break;
case 274: /* Orientation */
tiff_ifd[ifd].t_flip = "50132467"[get2() & 7]-'0';
break;
case 277: /* SamplesPerPixel */
tiff_ifd[ifd].samples = getint(type) & 7;
break;
case 279: /* StripByteCounts */
case 514:
case 61448:
tiff_ifd[ifd].bytes = get4();
break;
case 61454:
FORC3 cam_mul[(4-c) % 3] = getint(type);
break;
case 305: case 11: /* Software */
fgets (software, 64, ifp);
if (!strncmp(software,"Adobe",5) ||
!strncmp(software,"dcraw",5) ||
!strncmp(software,"UFRaw",5) ||
!strncmp(software,"Bibble",6) ||
!strncmp(software,"Nikon Scan",10) ||
!strcmp (software,"Digital Photo Professional"))
is_raw = 0;
break;
case 306: /* DateTime */
get_timestamp(0);
break;
case 315: /* Artist */
fread (artist, 64, 1, ifp);
break;
case 322: /* TileWidth */
tiff_ifd[ifd].t_tile_width = getint(type);
break;
case 323: /* TileLength */
tiff_ifd[ifd].t_tile_length = getint(type);
break;
case 324: /* TileOffsets */
tiff_ifd[ifd].offset = len > 1 ? ftell(ifp) : get4();
if (len == 4) {
load_raw = &CLASS sinar_4shot_load_raw;
is_raw = 5;
}
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 325: /* TileByteCount */
tiff_ifd[ifd].tile_maxbytes = 0;
for(int jj=0;jj<len;jj++)
{
int s = get4();
if(s > tiff_ifd[ifd].tile_maxbytes) tiff_ifd[ifd].tile_maxbytes=s;
}
break;
#endif
case 330: /* SubIFDs */
if (!strcmp(model,"DSLR-A100") && tiff_ifd[ifd].t_width == 3872) {
load_raw = &CLASS sony_arw_load_raw;
data_offset = get4()+base;
ifd++; break;
}
if(len > 1000) len=1000; /* 1000 SubIFDs is enough */
while (len--) {
i = ftell(ifp);
fseek (ifp, get4()+base, SEEK_SET);
if (parse_tiff_ifd (base)) break;
fseek (ifp, i+4, SEEK_SET);
}
break;
case 400:
strcpy (make, "Sarnoff");
maximum = 0xfff;
break;
case 28688:
FORC4 sony_curve[c+1] = get2() >> 2 & 0xfff;
for (i=0; i < 5; i++)
for (j = sony_curve[i]+1; j <= sony_curve[i+1]; j++)
curve[j] = curve[j-1] + (1 << i);
break;
case 29184: sony_offset = get4(); break;
case 29185: sony_length = get4(); break;
case 29217: sony_key = get4(); break;
case 29264:
parse_minolta (ftell(ifp));
raw_width = 0;
break;
case 29443:
FORC4 cam_mul[c ^ (c < 2)] = get2();
break;
case 29459:
FORC4 cam_mul[c] = get2();
i = (cam_mul[1] == 1024 && cam_mul[2] == 1024) << 1;
SWAP (cam_mul[i],cam_mul[i+1])
break;
case 30720: // Sony matrix, Sony_SR2SubIFD_0x7800
for (i=0; i < 3; i++)
FORC3 cmatrix[i][c] = ((short) get2()) / 1024.0;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr, _(" Sony matrix:\n%f %f %f\n%f %f %f\n%f %f %f\n"), cmatrix[0][0], cmatrix[0][1], cmatrix[0][2], cmatrix[1][0], cmatrix[1][1], cmatrix[1][2], cmatrix[2][0], cmatrix[2][1], cmatrix[2][2]);
#endif
break;
case 29456: // Sony black level, Sony_SR2SubIFD_0x7310, needs to be divided by 4
FORC4 cblack[c ^ c >> 1] = get2()/4;
i = cblack[3];
FORC3 if(i>cblack[c]) i = cblack[c];
FORC4 cblack[c]-=i;
black = i;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr, _("...Sony black: %u cblack: %u %u %u %u\n"),black, cblack[0],cblack[1],cblack[2], cblack[3]);
#endif
break;
case 33405: /* Model2 */
fgets (model2, 64, ifp);
break;
case 33422: /* CFAPattern */
case 64777: /* Kodak P-series */
if ((plen=len) > 16) plen = 16;
fread (cfa_pat, 1, plen, ifp);
for (colors=cfa=i=0; i < plen && colors < 4; i++) {
colors += !(cfa & (1 << cfa_pat[i]));
cfa |= 1 << cfa_pat[i];
}
if (cfa == 070) memcpy (cfa_pc,"\003\004\005",3); /* CMY */
if (cfa == 072) memcpy (cfa_pc,"\005\003\004\001",4); /* GMCY */
goto guess_cfa_pc;
case 33424:
case 65024:
fseek (ifp, get4()+base, SEEK_SET);
parse_kodak_ifd (base);
break;
case 33434: /* ExposureTime */
shutter = getreal(type);
break;
case 33437: /* FNumber */
aperture = getreal(type);
break;
case 34306: /* Leaf white balance */
FORC4 cam_mul[c ^ 1] = 4096.0 / get2();
break;
case 34307: /* Leaf CatchLight color matrix */
fread (software, 1, 7, ifp);
if (strncmp(software,"MATRIX",6)) break;
colors = 4;
for (raw_color = i=0; i < 3; i++) {
FORC4 fscanf (ifp, "%f", &rgb_cam[i][c^1]);
if (!use_camera_wb) continue;
num = 0;
FORC4 num += rgb_cam[i][c];
FORC4 rgb_cam[i][c] /= num;
}
break;
case 34310: /* Leaf metadata */
parse_mos (ftell(ifp));
case 34303:
strcpy (make, "Leaf");
break;
case 34665: /* EXIF tag */
fseek (ifp, get4()+base, SEEK_SET);
parse_exif (base);
break;
case 34853: /* GPSInfo tag */
fseek (ifp, get4()+base, SEEK_SET);
parse_gps (base);
break;
case 34675: /* InterColorProfile */
case 50831: /* AsShotICCProfile */
profile_offset = ftell(ifp);
profile_length = len;
break;
case 37122: /* CompressedBitsPerPixel */
kodak_cbpp = get4();
break;
case 37386: /* FocalLength */
focal_len = getreal(type);
break;
case 37393: /* ImageNumber */
shot_order = getint(type);
break;
case 37400: /* old Kodak KDC tag */
for (raw_color = i=0; i < 3; i++) {
getreal(type);
FORC3 rgb_cam[i][c] = getreal(type);
}
break;
case 40976:
strip_offset = get4();
load_raw = &CLASS samsung_load_raw;
break;
case 46275: /* Imacon tags */
strcpy (make, "Imacon");
data_offset = ftell(ifp);
ima_len = len;
break;
case 46279:
if (!ima_len) break;
fseek (ifp, 38, SEEK_CUR);
case 46274:
fseek (ifp, 40, SEEK_CUR);
raw_width = get4();
raw_height = get4();
left_margin = get4() & 7;
width = raw_width - left_margin - (get4() & 7);
top_margin = get4() & 7;
height = raw_height - top_margin - (get4() & 7);
if (raw_width == 7262 && ima_len == 234317952 ) {
height = 5412;
width = 7216;
left_margin = 7;
filters=0;
} else if (raw_width == 7262) {
height = 5444;
width = 7244;
left_margin = 7;
}
fseek (ifp, 52, SEEK_CUR);
FORC3 cam_mul[c] = getreal(11);
fseek (ifp, 114, SEEK_CUR);
flip = (get2() >> 7) * 90;
if (width * height * 6 == ima_len) {
if (flip % 180 == 90) SWAP(width,height);
raw_width = width;
raw_height = height;
left_margin = top_margin = filters = flip = 0;
}
sprintf (model, "Ixpress %d-Mp", height*width/1000000);
load_raw = &CLASS imacon_full_load_raw;
if (filters) {
if (left_margin & 1) filters = 0x61616161;
load_raw = &CLASS unpacked_load_raw;
}
maximum = 0xffff;
break;
case 50454: /* Sinar tag */
case 50455:
if (!(cbuf = (char *) malloc(len))) break;
fread (cbuf, 1, len, ifp);
for (cp = cbuf-1; cp && cp < cbuf+len; cp = strchr(cp,'\n'))
if (!strncmp (++cp,"Neutral ",8))
sscanf (cp+8, "%f %f %f", cam_mul, cam_mul+1, cam_mul+2);
free (cbuf);
break;
case 50458:
if (!make[0]) strcpy (make, "Hasselblad");
break;
case 50459: /* Hasselblad tag */
i = order;
j = ftell(ifp);
c = tiff_nifds;
order = get2();
fseek (ifp, j+(get2(),get4()), SEEK_SET);
parse_tiff_ifd (j);
maximum = 0xffff;
tiff_nifds = c;
order = i;
break;
case 50706: /* DNGVersion */
FORC4 dng_version = (dng_version << 8) + fgetc(ifp);
if (!make[0]) strcpy (make, "DNG");
is_raw = 1;
break;
case 50710: /* CFAPlaneColor */
if (len > 4) len = 4;
colors = len;
fread (cfa_pc, 1, colors, ifp);
guess_cfa_pc:
FORCC tab[cfa_pc[c]] = c;
cdesc[c] = 0;
for (i=16; i--; )
filters = filters << 2 | tab[cfa_pat[i % plen]];
filters -= !filters;
break;
case 50711: /* CFALayout */
if (get2() == 2) {
fuji_width = 1;
filters = 0x49494949;
}
break;
case 291:
case 50712: /* LinearizationTable */
linear_table (len);
break;
case 50713: /* BlackLevelRepeatDim */
blrr = get2();
blrc = get2();
break;
case 61450:
blrr = blrc = 2;
case 50714: /* BlackLevel */
black = getreal(type);
if ((unsigned)(filters+1) < 1000) break;
dblack[0] = black;
dblack[1] = (blrc == 2) ? getreal(type):dblack[0];
dblack[2] = (blrr == 2) ? getreal(type):dblack[0];
dblack[3] = (blrc == 2 && blrr == 2) ? getreal(type):dblack[1];
if (colors == 3)
filters |= ((filters >> 2 & 0x22222222) |
(filters << 2 & 0x88888888)) & filters << 1;
FORC4 cblack[filters >> (c << 1) & 3] = dblack[c];
black = 0;
break;
case 50715: /* BlackLevelDeltaH */
case 50716: /* BlackLevelDeltaV */
for (num=i=0; i < len && i < 65536; i++)
num += getreal(type);
black += num/len + 0.5;
break;
case 50717: /* WhiteLevel */
maximum = getint(type);
break;
case 50718: /* DefaultScale */
pixel_aspect = getreal(type);
pixel_aspect /= getreal(type);
break;
case 50721: /* ColorMatrix1 */
case 50722: /* ColorMatrix2 */
FORCC for (j=0; j < 3; j++)
cm[c][j] = getreal(type);
use_cm = 1;
break;
case 50723: /* CameraCalibration1 */
case 50724: /* CameraCalibration2 */
for (i=0; i < colors; i++)
FORCC cc[i][c] = getreal(type);
break;
case 50727: /* AnalogBalance */
FORCC ab[c] = getreal(type);
break;
case 50728: /* AsShotNeutral */
FORCC asn[c] = getreal(type);
break;
case 50729: /* AsShotWhiteXY */
xyz[0] = getreal(type);
xyz[1] = getreal(type);
xyz[2] = 1 - xyz[0] - xyz[1];
FORC3 xyz[c] /= d65_white[c];
break;
case 50740: /* DNGPrivateData */
if (dng_version) break;
parse_minolta (j = get4()+base);
fseek (ifp, j, SEEK_SET);
parse_tiff_ifd (base);
break;
case 50752:
read_shorts (cr2_slice, 3);
break;
case 50829: /* ActiveArea */
top_margin = getint(type);
left_margin = getint(type);
height = getint(type) - top_margin;
width = getint(type) - left_margin;
break;
case 50830: /* MaskedAreas */
for (i=0; i < len && i < 32; i++)
mask[0][i] = getint(type);
black = 0;
break;
case 51009: /* OpcodeList2 */
meta_offset = ftell(ifp);
break;
case 64772: /* Kodak P-series */
if (len < 13) break;
fseek (ifp, 16, SEEK_CUR);
data_offset = get4();
fseek (ifp, 28, SEEK_CUR);
data_offset += get4();
load_raw = &CLASS packed_load_raw;
break;
case 65026:
if (type == 2) fgets (model2, 64, ifp);
}
fseek (ifp, save, SEEK_SET);
}
if (sony_length && (buf = (unsigned *) malloc(sony_length))) {
fseek (ifp, sony_offset, SEEK_SET);
fread (buf, sony_length, 1, ifp);
sony_decrypt (buf, sony_length/4, 1, sony_key);
#ifndef LIBRAW_LIBRARY_BUILD
sfp = ifp;
if ((ifp = tmpfile())) {
fwrite (buf, sony_length, 1, ifp);
fseek (ifp, 0, SEEK_SET);
parse_tiff_ifd (-sony_offset);
fclose (ifp);
}
ifp = sfp;
#else
if( !ifp->tempbuffer_open(buf,sony_length))
{
parse_tiff_ifd(-sony_offset);
ifp->tempbuffer_close();
}
#endif
free (buf);
}
for (i=0; i < colors; i++)
FORCC cc[i][c] *= ab[i];
if (use_cm) {
FORCC for (i=0; i < 3; i++)
for (cam_xyz[c][i]=j=0; j < colors; j++)
cam_xyz[c][i] += cc[c][j] * cm[j][i] * xyz[i];
cam_xyz_coeff (cam_xyz);
}
if (asn[0]) {
cam_mul[3] = 0;
FORCC cam_mul[c] = 1 / asn[c];
}
if (!use_cm)
FORCC pre_mul[c] /= cc[c][c];
return 0;
}
int CLASS parse_tiff (int base)
{
int doff;
fseek (ifp, base, SEEK_SET);
order = get2();
if (order != 0x4949 && order != 0x4d4d) return 0;
get2();
while ((doff = get4())) {
fseek (ifp, doff+base, SEEK_SET);
if (parse_tiff_ifd (base)) break;
}
return 1;
}
void CLASS apply_tiff()
{
int max_samp=0, raw=-1, thm=-1, i;
struct jhead jh;
thumb_misc = 16;
if (thumb_offset) {
fseek (ifp, thumb_offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
if((unsigned)jh.bits<17 && (unsigned)jh.wide < 0x10000 && (unsigned)jh.high < 0x10000)
{
thumb_misc = jh.bits;
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
}
for (i=0; i < tiff_nifds; i++) {
if (max_samp < tiff_ifd[i].samples)
max_samp = tiff_ifd[i].samples;
if (max_samp > 3) max_samp = 3;
if ((tiff_ifd[i].comp != 6 || tiff_ifd[i].samples != 3) &&
unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 &&
(unsigned)tiff_ifd[i].bps < 33 && (unsigned)tiff_ifd[i].samples < 13 &&
tiff_ifd[i].t_width*tiff_ifd[i].t_height > raw_width*raw_height) {
raw_width = tiff_ifd[i].t_width;
raw_height = tiff_ifd[i].t_height;
tiff_bps = tiff_ifd[i].bps;
tiff_compress = tiff_ifd[i].comp;
data_offset = tiff_ifd[i].offset;
tiff_flip = tiff_ifd[i].t_flip;
tiff_samples = tiff_ifd[i].samples;
tile_width = tiff_ifd[i].t_tile_width;
tile_length = tiff_ifd[i].t_tile_length;
#ifdef LIBRAW_LIBRARY_BUILD
data_size = tile_length < INT_MAX && tile_length>0 ? tiff_ifd[i].tile_maxbytes: tiff_ifd[i].bytes;
#endif
raw = i;
}
}
if (!tile_width ) tile_width = INT_MAX;
if (!tile_length) tile_length = INT_MAX;
for (i=tiff_nifds; i--; )
if (tiff_ifd[i].t_flip) tiff_flip = tiff_ifd[i].t_flip;
if (raw >= 0 && !load_raw)
switch (tiff_compress) {
case 32767:
if (tiff_ifd[raw].bytes == raw_width*raw_height) {
tiff_bps = 12;
load_raw = &CLASS sony_arw2_load_raw; break;
}
if (tiff_ifd[raw].bytes*8 != raw_width*raw_height*tiff_bps) {
raw_height += 8;
load_raw = &CLASS sony_arw_load_raw; break;
}
load_flags = 79;
case 32769:
load_flags++;
case 32770:
case 32773: goto slr;
case 0: case 1:
if (!strncmp(make,"OLYMPUS",7) &&
tiff_ifd[raw].bytes*2 == raw_width*raw_height*3)
load_flags = 24;
if (tiff_ifd[raw].bytes*5 == raw_width*raw_height*8) {
load_flags = 81;
tiff_bps = 12;
} slr:
switch (tiff_bps) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 12: if (tiff_ifd[raw].phint == 2)
load_flags = 6;
load_raw = &CLASS packed_load_raw; break;
case 14: load_flags = 0;
case 16: load_raw = &CLASS unpacked_load_raw;
if (!strncmp(make,"OLYMPUS",7) &&
tiff_ifd[raw].bytes*7 > raw_width*raw_height)
load_raw = &CLASS olympus_load_raw;
}
break;
case 6: case 7: case 99:
load_raw = &CLASS lossless_jpeg_load_raw; break;
case 262:
load_raw = &CLASS kodak_262_load_raw; break;
case 34713:
if ((raw_width+9)/10*16*raw_height == tiff_ifd[raw].bytes) {
load_raw = &CLASS packed_load_raw;
load_flags = 1;
} else if (raw_width*raw_height*2 == tiff_ifd[raw].bytes) {
load_raw = &CLASS unpacked_load_raw;
load_flags = 4;
order = 0x4d4d;
} else
load_raw = &CLASS nikon_load_raw; break;
case 65535:
load_raw = &CLASS pentax_load_raw; break;
case 65000:
switch (tiff_ifd[raw].phint) {
case 2: load_raw = &CLASS kodak_rgb_load_raw; filters = 0; break;
case 6: load_raw = &CLASS kodak_ycbcr_load_raw; filters = 0; break;
case 32803: load_raw = &CLASS kodak_65000_load_raw;
}
case 32867: case 34892: break;
default: is_raw = 0;
}
if (!dng_version)
if ( (tiff_samples == 3 && tiff_ifd[raw].bytes && tiff_bps != 14 &&
tiff_compress != 32769 && tiff_compress != 32770)
|| (tiff_bps == 8 && !strcasestr(make,"Kodak") &&
!strstr(model2,"DEBUG RAW")))
is_raw = 0;
for (i=0; i < tiff_nifds; i++)
if (i != raw && tiff_ifd[i].samples == max_samp &&
tiff_ifd[i].bps>0 && tiff_ifd[i].bps < 33 &&
unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 &&
tiff_ifd[i].t_width * tiff_ifd[i].t_height / (SQR(tiff_ifd[i].bps)+1) >
thumb_width * thumb_height / (SQR(thumb_misc)+1)
&& tiff_ifd[i].comp != 34892) {
thumb_width = tiff_ifd[i].t_width;
thumb_height = tiff_ifd[i].t_height;
thumb_offset = tiff_ifd[i].offset;
thumb_length = tiff_ifd[i].bytes;
thumb_misc = tiff_ifd[i].bps;
thm = i;
}
if (thm >= 0) {
thumb_misc |= tiff_ifd[thm].samples << 5;
switch (tiff_ifd[thm].comp) {
case 0:
write_thumb = &CLASS layer_thumb;
break;
case 1:
if (tiff_ifd[thm].bps <= 8)
write_thumb = &CLASS ppm_thumb;
else if (!strcmp(make,"Imacon"))
write_thumb = &CLASS ppm16_thumb;
else
thumb_load_raw = &CLASS kodak_thumb_load_raw;
break;
case 65000:
thumb_load_raw = tiff_ifd[thm].phint == 6 ?
&CLASS kodak_ycbcr_load_raw : &CLASS kodak_rgb_load_raw;
}
}
}
void CLASS parse_minolta (int base)
{
int save, tag, len, offset, high=0, wide=0, i, c;
short sorder=order;
fseek (ifp, base, SEEK_SET);
if (fgetc(ifp) || fgetc(ifp)-'M' || fgetc(ifp)-'R') return;
order = fgetc(ifp) * 0x101;
offset = base + get4() + 8;
while ((save=ftell(ifp)) < offset) {
for (tag=i=0; i < 4; i++)
tag = tag << 8 | fgetc(ifp);
len = get4();
switch (tag) {
case 0x505244: /* PRD */
fseek (ifp, 8, SEEK_CUR);
high = get2();
wide = get2();
break;
case 0x574247: /* WBG */
get4();
i = strcmp(model,"DiMAGE A200") ? 0:3;
FORC4 cam_mul[c ^ (c >> 1) ^ i] = get2();
break;
case 0x545457: /* TTW */
parse_tiff (ftell(ifp));
data_offset = offset;
}
fseek (ifp, save+len+8, SEEK_SET);
}
raw_height = high;
raw_width = wide;
order = sorder;
}
/*
Many cameras have a "debug mode" that writes JPEG and raw
at the same time. The raw file has no header, so try to
to open the matching JPEG file and read its metadata.
*/
void CLASS parse_external_jpeg()
{
const char *file, *ext;
char *jname, *jfile, *jext;
#ifndef LIBRAW_LIBRARY_BUILD
FILE *save=ifp;
#else
#if defined(_WIN32) && !defined(__MINGW32__) && defined(_MSC_VER) && (_MSC_VER > 1310)
if(ifp->wfname())
{
std::wstring rawfile(ifp->wfname());
rawfile.replace(rawfile.length()-3,3,L"JPG");
if(!ifp->subfile_open(rawfile.c_str()))
{
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
ifp->subfile_close();
}
else
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
return;
}
#endif
if(!ifp->fname())
{
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
return;
}
#endif
ext = strrchr (ifname, '.');
file = strrchr (ifname, '/');
if (!file) file = strrchr (ifname, '\\');
#ifndef LIBRAW_LIBRARY_BUILD
if (!file) file = ifname-1;
#else
if (!file) file = (char*)ifname-1;
#endif
file++;
if (!ext || strlen(ext) != 4 || ext-file != 8) return;
jname = (char *) malloc (strlen(ifname) + 1);
merror (jname, "parse_external_jpeg()");
strcpy (jname, ifname);
jfile = file - ifname + jname;
jext = ext - ifname + jname;
if (strcasecmp (ext, ".jpg")) {
strcpy (jext, isupper(ext[1]) ? ".JPG":".jpg");
if (isdigit(*file)) {
memcpy (jfile, file+4, 4);
memcpy (jfile+4, file, 4);
}
} else
while (isdigit(*--jext)) {
if (*jext != '9') {
(*jext)++;
break;
}
*jext = '0';
}
#ifndef LIBRAW_LIBRARY_BUILD
if (strcmp (jname, ifname)) {
if ((ifp = fopen (jname, "rb"))) {
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Reading metadata from %s ...\n"), jname);
#endif
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
fclose (ifp);
}
}
#else
if (strcmp (jname, ifname))
{
if(!ifp->subfile_open(jname))
{
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
ifp->subfile_close();
}
else
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
}
#endif
if (!timestamp)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("Failed to read metadata from %s\n"), jname);
#endif
}
free (jname);
#ifndef LIBRAW_LIBRARY_BUILD
ifp = save;
#endif
}
/*
CIFF block 0x1030 contains an 8x8 white sample.
Load this into white[][] for use in scale_colors().
*/
void CLASS ciff_block_1030()
{
static const ushort key[] = { 0x410, 0x45f3 };
int i, bpp, row, col, vbits=0;
unsigned long bitbuf=0;
if ((get2(),get4()) != 0x80008 || !get4()) return;
bpp = get2();
if (bpp != 10 && bpp != 12) return;
for (i=row=0; row < 8; row++)
for (col=0; col < 8; col++) {
if (vbits < bpp) {
bitbuf = bitbuf << 16 | (get2() ^ key[i++ & 1]);
vbits += 16;
}
white[row][col] =
bitbuf << (LONG_BIT - vbits) >> (LONG_BIT - bpp);
vbits -= bpp;
}
}
/*
Parse a CIFF file, better known as Canon CRW format.
*/
void CLASS parse_ciff (int offset, int length, int depth)
{
int tboff, nrecs, c, type, len, save, wbi=-1;
ushort key[] = { 0x410, 0x45f3 };
fseek (ifp, offset+length-4, SEEK_SET);
tboff = get4() + offset;
fseek (ifp, tboff, SEEK_SET);
nrecs = get2();
if ((nrecs | depth) > 127) return;
while (nrecs--) {
type = get2();
len = get4();
save = ftell(ifp) + 4;
fseek (ifp, offset+get4(), SEEK_SET);
if ((((type >> 8) + 8) | 8) == 0x38)
parse_ciff (ftell(ifp), len, depth+1); /* Parse a sub-table */
if (type == 0x0810)
fread (artist, 64, 1, ifp);
if (type == 0x080a) {
fread (make, 64, 1, ifp);
fseek (ifp, strlen(make) - 63, SEEK_CUR);
fread (model, 64, 1, ifp);
}
if (type == 0x1810) {
width = get4();
height = get4();
pixel_aspect = int_to_float(get4());
flip = get4();
}
if (type == 0x1835) /* Get the decoder table */
tiff_compress = get4();
if (type == 0x2007) {
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (type == 0x1818) {
shutter = pow (2.0f, -int_to_float((get4(),get4())));
aperture = pow (2.0f, int_to_float(get4())/2);
}
if (type == 0x102a) {
iso_speed = pow (2.0, (get4(),get2())/32.0 - 4) * 50;
aperture = pow (2.0, (get2(),(short)get2())/64.0);
shutter = pow (2.0,-((short)get2())/32.0);
wbi = (get2(),get2());
if (wbi > 17) wbi = 0;
fseek (ifp, 32, SEEK_CUR);
if (shutter > 1e6) shutter = get2()/10.0;
}
if (type == 0x102c) {
if (get2() > 512) { /* Pro90, G1 */
fseek (ifp, 118, SEEK_CUR);
FORC4 cam_mul[c ^ 2] = get2();
} else { /* G2, S30, S40 */
fseek (ifp, 98, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2();
}
}
if (type == 0x0032) {
if (len == 768) { /* EOS D30 */
fseek (ifp, 72, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = 1024.0 / get2();
if (!wbi) cam_mul[0] = -1; /* use my auto white balance */
} else if (!cam_mul[0]) {
if (get2() == key[0]) /* Pro1, G6, S60, S70 */
c = (strstr(model,"Pro1") ?
"012346000000000000":"01345:000000006008")[wbi]-'0'+ 2;
else { /* G3, G5, S45, S50 */
c = "023457000000006000"[wbi]-'0';
key[0] = key[1] = 0;
}
fseek (ifp, 78 + c*8, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2() ^ key[c & 1];
if (!wbi) cam_mul[0] = -1;
}
}
if (type == 0x10a9) { /* D60, 10D, 300D, and clones */
if (len > 66) wbi = "0134567028"[wbi]-'0';
fseek (ifp, 2 + wbi*8, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
}
if (type == 0x1030 && (0x18040 >> wbi & 1))
ciff_block_1030(); /* all that don't have 0x10a9 */
if (type == 0x1031) {
raw_width = (get2(),get2());
raw_height = get2();
}
if (type == 0x5029) {
focal_len = len >> 16;
if ((len & 0xffff) == 2) focal_len /= 32;
}
if (type == 0x5813) flash_used = int_to_float(len);
if (type == 0x5814) canon_ev = int_to_float(len);
if (type == 0x5817) shot_order = len;
if (type == 0x5834) unique_id = len;
if (type == 0x580e) timestamp = len;
if (type == 0x180e) timestamp = get4();
#ifdef LOCALTIME
if ((type | 0x4000) == 0x580e)
timestamp = mktime (gmtime (×tamp));
#endif
fseek (ifp, save, SEEK_SET);
}
}
void CLASS parse_rollei()
{
char line[128], *val;
struct tm t;
fseek (ifp, 0, SEEK_SET);
memset (&t, 0, sizeof t);
do {
fgets (line, 128, ifp);
if ((val = strchr(line,'=')))
*val++ = 0;
else
val = line + strlen(line);
if (!strcmp(line,"DAT"))
sscanf (val, "%d.%d.%d", &t.tm_mday, &t.tm_mon, &t.tm_year);
if (!strcmp(line,"TIM"))
sscanf (val, "%d:%d:%d", &t.tm_hour, &t.tm_min, &t.tm_sec);
if (!strcmp(line,"HDR"))
thumb_offset = atoi(val);
if (!strcmp(line,"X "))
raw_width = atoi(val);
if (!strcmp(line,"Y "))
raw_height = atoi(val);
if (!strcmp(line,"TX "))
thumb_width = atoi(val);
if (!strcmp(line,"TY "))
thumb_height = atoi(val);
} while (strncmp(line,"EOHD",4));
data_offset = thumb_offset + thumb_width * thumb_height * 2;
t.tm_year -= 1900;
t.tm_mon -= 1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
strcpy (make, "Rollei");
strcpy (model,"d530flex");
write_thumb = &CLASS rollei_thumb;
}
void CLASS parse_sinar_ia()
{
int entries, off;
char str[8], *cp;
order = 0x4949;
fseek (ifp, 4, SEEK_SET);
entries = get4();
fseek (ifp, get4(), SEEK_SET);
while (entries--) {
off = get4(); get4();
fread (str, 8, 1, ifp);
if (!strcmp(str,"META")) meta_offset = off;
if (!strcmp(str,"THUMB")) thumb_offset = off;
if (!strcmp(str,"RAW0")) data_offset = off;
}
fseek (ifp, meta_offset+20, SEEK_SET);
fread (make, 64, 1, ifp);
make[63] = 0;
if ((cp = strchr(make,' '))) {
strcpy (model, cp+1);
*cp = 0;
}
raw_width = get2();
raw_height = get2();
load_raw = &CLASS unpacked_load_raw;
thumb_width = (get4(),get2());
thumb_height = get2();
write_thumb = &CLASS ppm_thumb;
maximum = 0x3fff;
}
void CLASS parse_phase_one (int base)
{
unsigned entries, tag, type, len, data, save, i, c;
float romm_cam[3][3];
char *cp;
memset (&ph1, 0, sizeof ph1);
fseek (ifp, base, SEEK_SET);
order = get4() & 0xffff;
if (get4() >> 8 != 0x526177) return; /* "Raw" */
fseek (ifp, get4()+base, SEEK_SET);
entries = get4();
get4();
while (entries--) {
tag = get4();
type = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, base+data, SEEK_SET);
switch (tag) {
case 0x100: flip = "0653"[data & 3]-'0'; break;
case 0x106:
for (i=0; i < 9; i++)
romm_cam[0][i] = getreal(11);
romm_coeff (romm_cam);
break;
case 0x107:
FORC3 cam_mul[c] = getreal(11);
break;
case 0x108: raw_width = data; break;
case 0x109: raw_height = data; break;
case 0x10a: left_margin = data; break;
case 0x10b: top_margin = data; break;
case 0x10c: width = data; break;
case 0x10d: height = data; break;
case 0x10e: ph1.format = data; break;
case 0x10f: data_offset = data+base; break;
case 0x110: meta_offset = data+base;
meta_length = len; break;
case 0x112: ph1.key_off = save - 4; break;
case 0x210: ph1.tag_210 = int_to_float(data); break;
case 0x21a: ph1.tag_21a = data; break;
case 0x21c: strip_offset = data+base; break;
case 0x21d: ph1.t_black = data; break;
case 0x222: ph1.split_col = data; break;
case 0x223: ph1.black_off = data+base; break;
case 0x301:
model[63] = 0;
fread (model, 1, 63, ifp);
if ((cp = strstr(model," camera"))) *cp = 0;
}
fseek (ifp, save, SEEK_SET);
}
load_raw = ph1.format < 3 ?
&CLASS phase_one_load_raw : &CLASS phase_one_load_raw_c;
maximum = 0xffff;
strcpy (make, "Phase One");
if (model[0]) return;
switch (raw_height) {
case 2060: strcpy (model,"LightPhase"); break;
case 2682: strcpy (model,"H 10"); break;
case 4128: strcpy (model,"H 20"); break;
case 5488: strcpy (model,"H 25"); break;
}
}
void CLASS parse_fuji (int offset)
{
unsigned entries, tag, len, save, c;
fseek (ifp, offset, SEEK_SET);
entries = get4();
if (entries > 255) return;
while (entries--) {
tag = get2();
len = get2();
save = ftell(ifp);
if (tag == 0x100) {
raw_height = get2();
raw_width = get2();
} else if (tag == 0x121) {
height = get2();
if ((width = get2()) == 4284) width += 3;
} else if (tag == 0x130) {
fuji_layout = fgetc(ifp) >> 7;
fuji_width = !(fgetc(ifp) & 8);
} else if (tag == 0x131) {
filters = 9;
FORC(36) xtrans[0][35-c] = fgetc(ifp) & 3;
} else if (tag == 0x2ff0) {
FORC4 cam_mul[c ^ 1] = get2();
} else if (tag == 0xc000) {
c = order;
order = 0x4949;
if ((tag = get4()) > 10000) tag = get4();
width = tag;
height = get4();
order = c;
}
fseek (ifp, save+len, SEEK_SET);
}
height <<= fuji_layout;
width >>= fuji_layout;
}
int CLASS parse_jpeg (int offset)
{
int len, save, hlen, mark;
fseek (ifp, offset, SEEK_SET);
if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) return 0;
while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda) {
order = 0x4d4d;
len = get2() - 2;
save = ftell(ifp);
if (mark == 0xc0 || mark == 0xc3) {
fgetc(ifp);
raw_height = get2();
raw_width = get2();
}
order = get2();
hlen = get4();
if (get4() == 0x48454150) /* "HEAP" */
parse_ciff (save+hlen, len-hlen, 0);
if (parse_tiff (save+6)) apply_tiff();
fseek (ifp, save+len, SEEK_SET);
}
return 1;
}
void CLASS parse_riff()
{
unsigned i, size, end;
char tag[4], date[64], month[64];
static const char mon[12][4] =
{ "Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec" };
struct tm t;
order = 0x4949;
fread (tag, 4, 1, ifp);
size = get4();
#ifdef LIBRAW_LIBRARY_BUILD
if((int)size<0)
throw LIBRAW_EXCEPTION_IO_EOF;
#endif
end = ftell(ifp) + size;
if (!memcmp(tag,"RIFF",4) || !memcmp(tag,"LIST",4)) {
get4();
while (ftell(ifp)+7 < end)
parse_riff();
} else if (!memcmp(tag,"nctg",4)) {
while (ftell(ifp)+7 < end) {
i = get2();
size = get2();
if ((i+1) >> 1 == 10 && size == 20)
get_timestamp(0);
else fseek (ifp, size, SEEK_CUR);
}
} else if (!memcmp(tag,"IDIT",4) && size < 64) {
fread (date, 64, 1, ifp);
date[size] = 0;
memset (&t, 0, sizeof t);
if (sscanf (date, "%*s %s %d %d:%d:%d %d", month, &t.tm_mday,
&t.tm_hour, &t.tm_min, &t.tm_sec, &t.tm_year) == 6) {
for (i=0; i < 12 && strcasecmp(mon[i],month); i++);
t.tm_mon = i;
t.tm_year -= 1900;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
} else
fseek (ifp, size, SEEK_CUR);
}
void CLASS parse_smal (int offset, int fsize)
{
int ver;
fseek (ifp, offset+2, SEEK_SET);
order = 0x4949;
ver = fgetc(ifp);
if (ver == 6)
fseek (ifp, 5, SEEK_CUR);
if (get4() != fsize) return;
if (ver > 6) data_offset = get4();
raw_height = height = get2();
raw_width = width = get2();
strcpy (make, "SMaL");
sprintf (model, "v%d %dx%d", ver, width, height);
if (ver == 6) load_raw = &CLASS smal_v6_load_raw;
if (ver == 9) load_raw = &CLASS smal_v9_load_raw;
}
void CLASS parse_cine()
{
unsigned off_head, off_setup, off_image, i;
order = 0x4949;
fseek (ifp, 4, SEEK_SET);
is_raw = get2() == 2;
fseek (ifp, 14, SEEK_CUR);
is_raw *= get4();
off_head = get4();
off_setup = get4();
off_image = get4();
timestamp = get4();
if ((i = get4())) timestamp = i;
fseek (ifp, off_head+4, SEEK_SET);
raw_width = get4();
raw_height = get4();
switch (get2(),get2()) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 16: load_raw = &CLASS unpacked_load_raw;
}
fseek (ifp, off_setup+792, SEEK_SET);
strcpy (make, "CINE");
sprintf (model, "%d", get4());
fseek (ifp, 12, SEEK_CUR);
switch ((i=get4()) & 0xffffff) {
case 3: filters = 0x94949494; break;
case 4: filters = 0x49494949; break;
default: is_raw = 0;
}
fseek (ifp, 72, SEEK_CUR);
switch ((get4()+3600) % 360) {
case 270: flip = 4; break;
case 180: flip = 1; break;
case 90: flip = 7; break;
case 0: flip = 2;
}
cam_mul[0] = getreal(11);
cam_mul[2] = getreal(11);
maximum = ~((~0u) << get4());
fseek (ifp, 668, SEEK_CUR);
shutter = get4()/1000000000.0;
fseek (ifp, off_image, SEEK_SET);
if (shot_select < is_raw)
fseek (ifp, shot_select*8, SEEK_CUR);
data_offset = (INT64) get4() + 8;
data_offset += (INT64) get4() << 32;
}
void CLASS parse_redcine()
{
unsigned i, len, rdvo;
order = 0x4d4d;
is_raw = 0;
fseek (ifp, 52, SEEK_SET);
width = get4();
height = get4();
fseek (ifp, 0, SEEK_END);
fseek (ifp, -(i = ftello(ifp) & 511), SEEK_CUR);
if (get4() != i || get4() != 0x52454f42) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: Tail is missing, parsing from head...\n"), ifname);
#endif
fseek (ifp, 0, SEEK_SET);
while ((len = get4()) != EOF) {
if (get4() == 0x52454456)
if (is_raw++ == shot_select)
data_offset = ftello(ifp) - 8;
fseek (ifp, len-8, SEEK_CUR);
}
} else {
rdvo = get4();
fseek (ifp, 12, SEEK_CUR);
is_raw = get4();
fseeko (ifp, rdvo+8 + shot_select*4, SEEK_SET);
data_offset = get4();
}
}
#line 7936 "dcraw/dcraw.c"
/*
All matrices are from Adobe DNG Converter unless otherwise noted.
*/
void CLASS adobe_coeff (const char *t_make, const char *t_model)
{
static const struct {
const char *prefix;
short t_black, t_maximum, trans[12];
} table[] = {
{ "AgfaPhoto DC-833m", 0, 0, /* DJC */
{ 11438,-3762,-1115,-2409,9914,2497,-1227,2295,5300 } },
{ "Apple QuickTake", 0, 0, /* DJC */
{ 21392,-5653,-3353,2406,8010,-415,7166,1427,2078 } },
{ "Canon EOS D2000", 0, 0,
{ 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } },
{ "Canon EOS D6000", 0, 0,
{ 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } },
{ "Canon EOS D30", 0, 0,
{ 9805,-2689,-1312,-5803,13064,3068,-2438,3075,8775 } },
{ "Canon EOS D60", 0, 0xfa0,
{ 6188,-1341,-890,-7168,14489,2937,-2640,3228,8483 } },
{ "Canon EOS 5D Mark III", 0, 0x3c80,
{ 6722,-635,-963,-4287,12460,2028,-908,2162,5668 } },
{ "Canon EOS 5D Mark II", 0, 0x3cf0,
{ 4716,603,-830,-7798,15474,2480,-1496,1937,6651 } },
{ "Canon EOS 5D", 0, 0xe6c,
{ 6347,-479,-972,-8297,15954,2480,-1968,2131,7649 } },
{ "Canon EOS 6D", 0, 0x3c82,
{ 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } },
{ "Canon EOS 7D", 0, 0x3510,
{ 6844,-996,-856,-3876,11761,2396,-593,1772,6198 } },
{ "Canon EOS 10D", 0, 0xfa0,
{ 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } },
{ "Canon EOS 20Da", 0, 0,
{ 14155,-5065,-1382,-6550,14633,2039,-1623,1824,6561 } },
{ "Canon EOS 20D", 0, 0xfff,
{ 6599,-537,-891,-8071,15783,2424,-1983,2234,7462 } },
{ "Canon EOS 30D", 0, 0,
{ 6257,-303,-1000,-7880,15621,2396,-1714,1904,7046 } },
{ "Canon EOS 40D", 0, 0x3f60,
{ 6071,-747,-856,-7653,15365,2441,-2025,2553,7315 } },
{ "Canon EOS 50D", 0, 0x3d93,
{ 4920,616,-593,-6493,13964,2784,-1774,3178,7005 } },
{ "Canon EOS 60D", 0, 0x2ff7,
{ 6719,-994,-925,-4408,12426,2211,-887,2129,6051 } },
{ "Canon EOS 70D", 0, 0x3c80,
{ 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } },
{ "Canon EOS 100D", 0, 0x350f,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 300D", 0, 0xfa0,
{ 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } },
{ "Canon EOS 350D", 0, 0xfff,
{ 6018,-617,-965,-8645,15881,2975,-1530,1719,7642 } },
{ "Canon EOS 400D", 0, 0xe8e,
{ 7054,-1501,-990,-8156,15544,2812,-1278,1414,7796 } },
{ "Canon EOS 450D", 0, 0x390d,
{ 5784,-262,-821,-7539,15064,2672,-1982,2681,7427 } },
{ "Canon EOS 500D", 0, 0x3479,
{ 4763,712,-646,-6821,14399,2640,-1921,3276,6561 } },
{ "Canon EOS 550D", 0, 0x3dd7,
{ 6941,-1164,-857,-3825,11597,2534,-416,1540,6039 } },
{ "Canon EOS 600D", 0, 0x3510,
{ 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } },
{ "Canon EOS 650D", 0, 0x354d,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 700D", 0, 0x3c00,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 1000D", 0, 0xe43,
{ 6771,-1139,-977,-7818,15123,2928,-1244,1437,7533 } },
{ "Canon EOS 1100D", 0, 0x3510,
{ 6444,-904,-893,-4563,12308,2535,-903,2016,6728 } },
{ "Canon EOS M", 0, 0,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS-1Ds Mark III", 0, 0x3bb0,
{ 5859,-211,-930,-8255,16017,2353,-1732,1887,7448 } },
{ "Canon EOS-1Ds Mark II", 0, 0xe80,
{ 6517,-602,-867,-8180,15926,2378,-1618,1771,7633 } },
{ "Canon EOS-1D Mark IV", 0, 0x3bb0,
{ 6014,-220,-795,-4109,12014,2361,-561,1824,5787 } },
{ "Canon EOS-1D Mark III", 0, 0x3bb0,
{ 6291,-540,-976,-8350,16145,2311,-1714,1858,7326 } },
{ "Canon EOS-1D Mark II N", 0, 0xe80,
{ 6240,-466,-822,-8180,15825,2500,-1801,1938,8042 } },
{ "Canon EOS-1D Mark II", 0, 0xe80,
{ 6264,-582,-724,-8312,15948,2504,-1744,1919,8664 } },
{ "Canon EOS-1DS", 0, 0xe20,
{ 4374,3631,-1743,-7520,15212,2472,-2892,3632,8161 } },
{ "Canon EOS-1D C", 0, 0x3c4e,
{ 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } },
{ "Canon EOS-1D X", 0, 0x3c4e,
{ 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } },
{ "Canon EOS-1D", 0, 0xe20,
{ 6806,-179,-1020,-8097,16415,1687,-3267,4236,7690 } },
{ "Canon PowerShot A530", 0, 0,
{ 0 } }, /* don't want the A5 matrix */
{ "Canon PowerShot A50", 0, 0,
{ -5300,9846,1776,3436,684,3939,-5540,9879,6200,-1404,11175,217 } },
{ "Canon PowerShot A5", 0, 0,
{ -4801,9475,1952,2926,1611,4094,-5259,10164,5947,-1554,10883,547 } },
{ "Canon PowerShot G10", 0, 0,
{ 11093,-3906,-1028,-5047,12492,2879,-1003,1750,5561 } },
{ "Canon PowerShot G11", 0, 0,
{ 12177,-4817,-1069,-1612,9864,2049,-98,850,4471 } },
{ "Canon PowerShot G12", 0, 0,
{ 13244,-5501,-1248,-1508,9858,1935,-270,1083,4366 } },
{ "Canon PowerShot G15", 0, 0,
{ 7474,-2301,-567,-4056,11456,2975,-222,716,4181 } },
{ "Canon PowerShot G16", 0, 0,
{ 14130,-8071,127,2199,6528,1551,3402,-1721,4960 } },
{ "Canon PowerShot G1 X", 0, 0,
{ 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } },
{ "Canon PowerShot G1", 0, 0,
{ -4778,9467,2172,4743,-1141,4344,-5146,9908,6077,-1566,11051,557 } },
{ "Canon PowerShot G2", 0, 0,
{ 9087,-2693,-1049,-6715,14382,2537,-2291,2819,7790 } },
{ "Canon PowerShot G3", 0, 0,
{ 9212,-2781,-1073,-6573,14189,2605,-2300,2844,7664 } },
{ "Canon PowerShot G5", 0, 0,
{ 9757,-2872,-933,-5972,13861,2301,-1622,2328,7212 } },
{ "Canon PowerShot G6", 0, 0,
{ 9877,-3775,-871,-7613,14807,3072,-1448,1305,7485 } },
{ "Canon PowerShot G9", 0, 0,
{ 7368,-2141,-598,-5621,13254,2625,-1418,1696,5743 } },
{ "Canon PowerShot Pro1", 0, 0,
{ 10062,-3522,-999,-7643,15117,2730,-765,817,7323 } },
{ "Canon PowerShot Pro70", 34, 0,
{ -4155,9818,1529,3939,-25,4522,-5521,9870,6610,-2238,10873,1342 } },
{ "Canon PowerShot Pro90", 0, 0,
{ -4963,9896,2235,4642,-987,4294,-5162,10011,5859,-1770,11230,577 } },
{ "Canon PowerShot S30", 0, 0,
{ 10566,-3652,-1129,-6552,14662,2006,-2197,2581,7670 } },
{ "Canon PowerShot S40", 0, 0,
{ 8510,-2487,-940,-6869,14231,2900,-2318,2829,9013 } },
{ "Canon PowerShot S45", 0, 0,
{ 8163,-2333,-955,-6682,14174,2751,-2077,2597,8041 } },
{ "Canon PowerShot S50", 0, 0,
{ 8882,-2571,-863,-6348,14234,2288,-1516,2172,6569 } },
{ "Canon PowerShot S60", 0, 0,
{ 8795,-2482,-797,-7804,15403,2573,-1422,1996,7082 } },
{ "Canon PowerShot S70", 0, 0,
{ 9976,-3810,-832,-7115,14463,2906,-901,989,7889 } },
{ "Canon PowerShot S90", 0, 0,
{ 12374,-5016,-1049,-1677,9902,2078,-83,852,4683 } },
{ "Canon PowerShot S95", 0, 0,
{ 13440,-5896,-1279,-1236,9598,1931,-180,1001,4651 } },
{ "Canon PowerShot S120", 0, 0, /* LibRaw */
{ 10800,-4782,-628,-2057,10783,1176,-802,2091,4739 } },
{ "Canon PowerShot S110", 0, 0,
{ 8039,-2643,-654,-3783,11230,2930,-206,690,4194 } },
{ "Canon PowerShot S100", 0, 0,
{ 7968,-2565,-636,-2873,10697,2513,180,667,4211 } },
{ "Canon PowerShot SX1 IS", 0, 0,
{ 6578,-259,-502,-5974,13030,3309,-308,1058,4970 } },
{ "Canon PowerShot SX50 HS", 0, 0,
{ 12432,-4753,-1247,-2110,10691,1629,-412,1623,4926 } },
{ "Canon PowerShot A3300", 0, 0, /* DJC */
{ 10826,-3654,-1023,-3215,11310,1906,0,999,4960 } },
{ "Canon PowerShot A470", 0, 0, /* DJC */
{ 12513,-4407,-1242,-2680,10276,2405,-878,2215,4734 } },
{ "Canon PowerShot A610", 0, 0, /* DJC */
{ 15591,-6402,-1592,-5365,13198,2168,-1300,1824,5075 } },
{ "Canon PowerShot A620", 0, 0, /* DJC */
{ 15265,-6193,-1558,-4125,12116,2010,-888,1639,5220 } },
{ "Canon PowerShot A630", 0, 0, /* DJC */
{ 14201,-5308,-1757,-6087,14472,1617,-2191,3105,5348 } },
{ "Canon PowerShot A640", 0, 0, /* DJC */
{ 13124,-5329,-1390,-3602,11658,1944,-1612,2863,4885 } },
{ "Canon PowerShot A650", 0, 0, /* DJC */
{ 9427,-3036,-959,-2581,10671,1911,-1039,1982,4430 } },
{ "Canon PowerShot A720", 0, 0, /* DJC */
{ 14573,-5482,-1546,-1266,9799,1468,-1040,1912,3810 } },
{ "Canon PowerShot S3 IS", 0, 0, /* DJC */
{ 14062,-5199,-1446,-4712,12470,2243,-1286,2028,4836 } },
{ "Canon PowerShot SX110 IS", 0, 0, /* DJC */
{ 14134,-5576,-1527,-1991,10719,1273,-1158,1929,3581 } },
{ "Canon PowerShot SX220", 0, 0, /* DJC */
{ 13898,-5076,-1447,-1405,10109,1297,-244,1860,3687 } },
{ "Casio EX-S20", 0, 0, /* DJC */
{ 11634,-3924,-1128,-4968,12954,2015,-1588,2648,7206 } },
{ "Casio EX-Z750", 0, 0, /* DJC */
{ 10819,-3873,-1099,-4903,13730,1175,-1755,3751,4632 } },
{ "Casio EX-Z10", 128, 0xfff, /* DJC */
{ 9790,-3338,-603,-2321,10222,2099,-344,1273,4799 } },
{ "CINE 650", 0, 0,
{ 3390,480,-500,-800,3610,340,-550,2336,1192 } },
{ "CINE 660", 0, 0,
{ 3390,480,-500,-800,3610,340,-550,2336,1192 } },
{ "CINE", 0, 0,
{ 20183,-4295,-423,-3940,15330,3985,-280,4870,9800 } },
{ "Contax N Digital", 0, 0xf1e,
{ 7777,1285,-1053,-9280,16543,2916,-3677,5679,7060 } },
{ "Epson R-D1", 0, 0,
{ 6827,-1878,-732,-8429,16012,2564,-704,592,7145 } },
{ "Fujifilm E550", 0, 0,
{ 11044,-3888,-1120,-7248,15168,2208,-1531,2277,8069 } },
{ "Fujifilm E900", 0, 0,
{ 9183,-2526,-1078,-7461,15071,2574,-2022,2440,8639 } },
{ "Fujifilm F5", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F6", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F77", 0, 0xfe9,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F7", 0, 0,
{ 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } },
{ "Fujifilm F8", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm S100FS", 514, 0,
{ 11521,-4355,-1065,-6524,13767,3058,-1466,1984,6045 } },
{ "Fujifilm S200EXR", 512, 0x3fff,
{ 11401,-4498,-1312,-5088,12751,2613,-838,1568,5941 } },
{ "Fujifilm S20Pro", 0, 0,
{ 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } },
{ "Fujifilm S2Pro", 128, 0,
{ 12492,-4690,-1402,-7033,15423,1647,-1507,2111,7697 } },
{ "Fujifilm S3Pro", 0, 0,
{ 11807,-4612,-1294,-8927,16968,1988,-2120,2741,8006 } },
{ "Fujifilm S5Pro", 0, 0,
{ 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } },
{ "Fujifilm S5000", 0, 0,
{ 8754,-2732,-1019,-7204,15069,2276,-1702,2334,6982 } },
{ "Fujifilm S5100", 0, 0,
{ 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } },
{ "Fujifilm S5500", 0, 0,
{ 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } },
{ "Fujifilm S5200", 0, 0,
{ 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } },
{ "Fujifilm S5600", 0, 0,
{ 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } },
{ "Fujifilm S6", 0, 0,
{ 12628,-4887,-1401,-6861,14996,1962,-2198,2782,7091 } },
{ "Fujifilm S7000", 0, 0,
{ 10190,-3506,-1312,-7153,15051,2238,-2003,2399,7505 } },
{ "Fujifilm S9000", 0, 0,
{ 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } },
{ "Fujifilm S9500", 0, 0,
{ 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } },
{ "Fujifilm S9100", 0, 0,
{ 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } },
{ "Fujifilm S9600", 0, 0,
{ 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } },
{ "Fujifilm SL1000", 0, 0,
{ 11705,-4262,-1107,-2282,10791,1709,-555,1713,4945 } },
{ "Fujifilm IS-1", 0, 0,
{ 21461,-10807,-1441,-2332,10599,1999,289,875,7703 } },
{ "Fujifilm IS Pro", 0, 0,
{ 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } },
{ "Fujifilm HS10 HS11", 0, 0xf68,
{ 12440,-3954,-1183,-1123,9674,1708,-83,1614,4086 } },
{ "Fujifilm HS20EXR", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm HS3", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm HS50EXR", 0, 0,
{ 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } },
{ "Fujifilm X100S", 0, 0,
{ 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } },
{ "Fujifilm X100", 0, 0,
{ 12161,-4457,-1069,-5034,12874,2400,-795,1724,6904 } },
{ "Fujifilm X10", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X20", 0, 0,
{ 11768,-4971,-1133,-4904,12927,2183,-480,1723,4605 } },
{ "Fujifilm X-Pro1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-A1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-E1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-E2", 0, 0,
{ 12066,-5927,-367,-1969,9878,1503,-721,2034,5453 } },
{ "Fujifilm XF1", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X-M1", 0, 0,
{ 13193,-6685,-425,-2229,10458,1534,-878,1763,5217 } },
{ "Fujifilm X-S1", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm XQ1", 0, 0,
{ 14305,-7365,-687,-3117,12383,432,-287,1660,4361 } },
{ "Hasselblad Lunar", 128, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Hasselblad Stellar", 200, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{ "Imacon Ixpress", 0, 0, /* DJC */
{ 7025,-1415,-704,-5188,13765,1424,-1248,2742,6038 } },
{ "Kodak NC2000", 0, 0,
{ 13891,-6055,-803,-465,9919,642,2121,82,1291 } },
{ "Kodak DCS315C", 8, 0,
{ 17523,-4827,-2510,756,8546,-137,6113,1649,2250 } },
{ "Kodak DCS330C", 8, 0,
{ 20620,-7572,-2801,-103,10073,-396,3551,-233,2220 } },
{ "Kodak DCS420", 0, 0,
{ 10868,-1852,-644,-1537,11083,484,2343,628,2216 } },
{ "Kodak DCS460", 0, 0,
{ 10592,-2206,-967,-1944,11685,230,2206,670,1273 } },
{ "Kodak EOSDCS1", 0, 0,
{ 10592,-2206,-967,-1944,11685,230,2206,670,1273 } },
{ "Kodak EOSDCS3B", 0, 0,
{ 9898,-2700,-940,-2478,12219,206,1985,634,1031 } },
{ "Kodak DCS520C", 178, 0,
{ 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } },
{ "Kodak DCS560C", 177, 0,
{ 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } },
{ "Kodak DCS620C", 177, 0,
{ 23617,-10175,-3149,-2054,11749,-272,2586,-489,3453 } },
{ "Kodak DCS620X", 176, 0,
{ 13095,-6231,154,12221,-21,-2137,895,4602,2258 } },
{ "Kodak DCS660C", 173, 0,
{ 18244,-6351,-2739,-791,11193,-521,3711,-129,2802 } },
{ "Kodak DCS720X", 0, 0,
{ 11775,-5884,950,9556,1846,-1286,-1019,6221,2728 } },
{ "Kodak DCS760C", 0, 0,
{ 16623,-6309,-1411,-4344,13923,323,2285,274,2926 } },
{ "Kodak DCS Pro SLR", 0, 0,
{ 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } },
{ "Kodak DCS Pro 14nx", 0, 0,
{ 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } },
{ "Kodak DCS Pro 14", 0, 0,
{ 7791,3128,-776,-8588,16458,2039,-2455,4006,6198 } },
{ "Kodak ProBack645", 0, 0,
{ 16414,-6060,-1470,-3555,13037,473,2545,122,4948 } },
{ "Kodak ProBack", 0, 0,
{ 21179,-8316,-2918,-915,11019,-165,3477,-180,4210 } },
{ "Kodak P712", 0, 0,
{ 9658,-3314,-823,-5163,12695,2768,-1342,1843,6044 } },
{ "Kodak P850", 0, 0xf7c,
{ 10511,-3836,-1102,-6946,14587,2558,-1481,1792,6246 } },
{ "Kodak P880", 0, 0xfff,
{ 12805,-4662,-1376,-7480,15267,2360,-1626,2194,7904 } },
{ "Kodak EasyShare Z980", 0, 0,
{ 11313,-3559,-1101,-3893,11891,2257,-1214,2398,4908 } },
{ "Kodak EasyShare Z981", 0, 0,
{ 12729,-4717,-1188,-1367,9187,2582,274,860,4411 } },
{ "Kodak EasyShare Z990", 0, 0xfed,
{ 11749,-4048,-1309,-1867,10572,1489,-138,1449,4522 } },
{ "Kodak EASYSHARE Z1015", 0, 0xef1,
{ 11265,-4286,-992,-4694,12343,2647,-1090,1523,5447 } },
{ "Leaf CMost", 0, 0,
{ 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } },
{ "Leaf Valeo 6", 0, 0,
{ 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } },
{ "Leaf Aptus 54S", 0, 0,
{ 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } },
{ "Leaf Aptus 65", 0, 0,
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf Aptus 75", 0, 0,
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf", 0, 0,
{ 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } },
{ "Mamiya ZD", 0, 0,
{ 7645,2579,-1363,-8689,16717,2015,-3712,5941,5961 } },
{ "Micron 2010", 110, 0, /* DJC */
{ 16695,-3761,-2151,155,9682,163,3433,951,4904 } },
{ "Minolta DiMAGE 5", 0, 0xf7d,
{ 8983,-2942,-963,-6556,14476,2237,-2426,2887,8014 } },
{ "Minolta DiMAGE 7Hi", 0, 0xf7d,
{ 11368,-3894,-1242,-6521,14358,2339,-2475,3056,7285 } },
{ "Minolta DiMAGE 7", 0, 0xf7d,
{ 9144,-2777,-998,-6676,14556,2281,-2470,3019,7744 } },
{ "Minolta DiMAGE A1", 0, 0xf8b,
{ 9274,-2547,-1167,-8220,16323,1943,-2273,2720,8340 } },
{ "Minolta DiMAGE A200", 0, 0,
{ 8560,-2487,-986,-8112,15535,2771,-1209,1324,7743 } },
{ "Minolta DiMAGE A2", 0, 0xf8f,
{ 9097,-2726,-1053,-8073,15506,2762,-966,981,7763 } },
{ "Minolta DiMAGE Z2", 0, 0, /* DJC */
{ 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } },
{ "Minolta DYNAX 5", 0, 0xffb,
{ 10284,-3283,-1086,-7957,15762,2316,-829,882,6644 } },
{ "Minolta DYNAX 7", 0, 0xffb,
{ 10239,-3104,-1099,-8037,15727,2451,-927,925,6871 } },
{ "Motorola PIXL", 0, 0, /* DJC */
{ 8898,-989,-1033,-3292,11619,1674,-661,3178,5216 } },
{ "Nikon D100", 0, 0,
{ 5902,-933,-782,-8983,16719,2354,-1402,1455,6464 } },
{ "Nikon D1H", 0, 0,
{ 7577,-2166,-926,-7454,15592,1934,-2377,2808,8606 } },
{ "Nikon D1X", 0, 0,
{ 7702,-2245,-975,-9114,17242,1875,-2679,3055,8521 } },
{ "Nikon D1", 0, 0, /* multiplied by 2.218750, 1.0, 1.148438 */
{ 16772,-4726,-2141,-7611,15713,1972,-2846,3494,9521 } },
{ "Nikon D200", 0, 0xfbc,
{ 8367,-2248,-763,-8758,16447,2422,-1527,1550,8053 } },
{ "Nikon D2H", 0, 0,
{ 5710,-901,-615,-8594,16617,2024,-2975,4120,6830 } },
{ "Nikon D2X", 0, 0,
{ 10231,-2769,-1255,-8301,15900,2552,-797,680,7148 } },
{ "Nikon D3000", 0, 0,
{ 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } },
{ "Nikon D3100", 0, 0,
{ 7911,-2167,-813,-5327,13150,2408,-1288,2483,7968 } },
{ "Nikon D3200", 0, 0xfb9,
{ 7013,-1408,-635,-5268,12902,2640,-1470,2801,7379 } },
{ "Nikon D300", 0, 0,
{ 9030,-1992,-715,-8465,16302,2255,-2689,3217,8069 } },
{ "Nikon D3X", 0, 0,
{ 7171,-1986,-648,-8085,15555,2718,-2170,2512,7457 } },
{ "Nikon D3S", 0, 0,
{ 8828,-2406,-694,-4874,12603,2541,-660,1509,7587 } },
{ "Nikon D3", 0, 0,
{ 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } },
{ "Nikon D40X", 0, 0,
{ 8819,-2543,-911,-9025,16928,2151,-1329,1213,8449 } },
{ "Nikon D40", 0, 0,
{ 6992,-1668,-806,-8138,15748,2543,-874,850,7897 } },
{ "Nikon D4", 0, 0,
{ 10076,-4135,-659,-4586,13006,746,-1189,2107,6185 } },
{ "Nikon D5000", 0, 0xf00,
{ 7309,-1403,-519,-8474,16008,2622,-2433,2826,8064 } },
{ "Nikon D5100", 0, 0x3de6,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon D5200", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{"Nikon D5300",0, 0,
{ 10645,-5086,-698,-4938,13608,761,-1107,1874,5312 } },
{ "Nikon D50", 0, 0,
{ 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } },
{ "Nikon D600", 0, 0x3e07,
{ 8178,-2245,-609,-4857,12394,2776,-1207,2086,7298 } },
{"Nikon D610",0, 0,
{ 10426,-4005,-444,-3565,11764,1403,-1206,2266,6549 } },
{ "Nikon D60", 0, 0,
{ 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } },
{ "Nikon D7000", 0, 0,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon D7100", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D700", 0, 0,
{ 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } },
{ "Nikon D70", 0, 0,
{ 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } },
{ "Nikon D800", 0, 0,
{ 7866,-2108,-555,-4869,12483,2681,-1176,2069,7501 } },
{ "Nikon D80", 0, 0,
{ 8629,-2410,-883,-9055,16940,2171,-1490,1363,8520 } },
{ "Nikon D90", 0, 0xf00,
{ 7309,-1403,-519,-8474,16008,2622,-2434,2826,8064 } },
{"Nikon Df",0, 0,
{ 10076,-4135,-659,-4586,13006,746,-1189,2107,6185 } },
{ "Nikon E700", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E800", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E950", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E995", 0, 0, /* copied from E5000 */
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E2100", 0, 0, /* copied from Z2, new white balance */
{ 13142,-4152,-1596,-4655,12374,2282,-1769,2696,6711} },
{ "Nikon E2500", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E3200", 0, 0, /* DJC */
{ 9846,-2085,-1019,-3278,11109,2170,-774,2134,5745 } },
{ "Nikon E4300", 0, 0, /* copied from Minolta DiMAGE Z2 */
{ 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } },
{ "Nikon E4500", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E5000", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E5400", 0, 0,
{ 9349,-2987,-1001,-7919,15766,2266,-2098,2680,6839 } },
{ "Nikon E5700", 0, 0,
{ -5368,11478,2368,5537,-113,3148,-4969,10021,5782,778,9028,211 } },
{ "Nikon E8400", 0, 0,
{ 7842,-2320,-992,-8154,15718,2599,-1098,1342,7560 } },
{ "Nikon E8700", 0, 0,
{ 8489,-2583,-1036,-8051,15583,2643,-1307,1407,7354 } },
{ "Nikon E8800", 0, 0,
{ 7971,-2314,-913,-8451,15762,2894,-1442,1520,7610 } },
{ "Nikon COOLPIX A", 0, 0,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon COOLPIX P330", 0, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P6000", 0, 0,
{ 9698,-3367,-914,-4706,12584,2368,-837,968,5801 } },
{ "Nikon COOLPIX P7000", 0, 0,
{ 11432,-3679,-1111,-3169,11239,2202,-791,1380,4455 } },
{ "Nikon COOLPIX P7100", 0, 0,
{ 11053,-4269,-1024,-1976,10182,2088,-526,1263,4469 } },
{ "Nikon COOLPIX P7700", 200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P7800", 200, 0,
{ 13443,-6418,-673,-1309,10025,1131,-462,1827,4782 } },
{ "Nikon 1 V2", 0, 0,
{ 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } },
{ "Nikon 1 J3", 0, 0,
{ 8144,-2671,-473,-1740,9834,1601,-58,1971,4296 } },
{ "Nikon 1 AW1", 0, 0,
{ 8144,-2671,-473,-1740,9834,1601,-58,1971,4296 } },
{ "Nikon 1 ", 0, 0,
{ 8994,-2667,-865,-4594,12324,2552,-699,1786,6260 } },
{ "Olympus C5050", 0, 0,
{ 10508,-3124,-1273,-6079,14294,1901,-1653,2306,6237 } },
{ "Olympus C5060", 0, 0,
{ 10445,-3362,-1307,-7662,15690,2058,-1135,1176,7602 } },
{ "Olympus C7070", 0, 0,
{ 10252,-3531,-1095,-7114,14850,2436,-1451,1723,6365 } },
{ "Olympus C70", 0, 0,
{ 10793,-3791,-1146,-7498,15177,2488,-1390,1577,7321 } },
{ "Olympus C80", 0, 0,
{ 8606,-2509,-1014,-8238,15714,2703,-942,979,7760 } },
{ "Olympus E-10", 0, 0xffc,
{ 12745,-4500,-1416,-6062,14542,1580,-1934,2256,6603 } },
{ "Olympus E-1", 0, 0,
{ 11846,-4767,-945,-7027,15878,1089,-2699,4122,8311 } },
{ "Olympus E-20", 0, 0xffc,
{ 13173,-4732,-1499,-5807,14036,1895,-2045,2452,7142 } },
{ "Olympus E-300", 0, 0,
{ 7828,-1761,-348,-5788,14071,1830,-2853,4518,6557 } },
{ "Olympus E-330", 0, 0,
{ 8961,-2473,-1084,-7979,15990,2067,-2319,3035,8249 } },
{ "Olympus E-30", 0, 0xfbc,
{ 8144,-1861,-1111,-7763,15894,1929,-1865,2542,7607 } },
{ "Olympus E-3", 0, 0xf99,
{ 9487,-2875,-1115,-7533,15606,2010,-1618,2100,7389 } },
{ "Olympus E-400", 0, 0,
{ 6169,-1483,-21,-7107,14761,2536,-2904,3580,8568 } },
{ "Olympus E-410", 0, 0xf6a,
{ 8856,-2582,-1026,-7761,15766,2082,-2009,2575,7469 } },
{ "Olympus E-420", 0, 0xfd7,
{ 8746,-2425,-1095,-7594,15612,2073,-1780,2309,7416 } },
{ "Olympus E-450", 0, 0xfd2,
{ 8745,-2425,-1095,-7594,15613,2073,-1780,2309,7416 } },
{ "Olympus E-500", 0, 0,
{ 8136,-1968,-299,-5481,13742,1871,-2556,4205,6630 } },
{ "Olympus E-510", 0, 0xf6a,
{ 8785,-2529,-1033,-7639,15624,2112,-1783,2300,7817 } },
{ "Olympus E-520", 0, 0xfd2,
{ 8344,-2322,-1020,-7596,15635,2048,-1748,2269,7287 } },
{ "Olympus E-5", 0, 0xeec,
{ 11200,-3783,-1325,-4576,12593,2206,-695,1742,7504 } },
{ "Olympus E-600", 0, 0xfaf,
{ 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } },
{ "Olympus E-620", 0, 0xfaf,
{ 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } },
{ "Olympus E-P1", 0, 0xffd,
{ 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } },
{ "Olympus E-P2", 0, 0xffd,
{ 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } },
{ "Olympus E-P3", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "OLYMPUS E-P5", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL1s", 0, 0,
{ 11409,-3872,-1393,-4572,12757,2003,-709,1810,7415 } },
{ "Olympus E-PL1", 0, 0,
{ 11408,-4289,-1215,-4286,12385,2118,-387,1467,7787 } },
{ "Olympus E-PL2", 0, 0xcf3,
{ 15030,-5552,-1806,-3987,12387,1767,-592,1670,7023 } },
{ "Olympus E-PL3", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-PL5", 0, 0xfcb,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PM1", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-PM2", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{"Olympus E-M1", 0, 0,
{ 11663,-5527,-419,-1683,9915,1389,-582,1933,5016 } },
{ "Olympus E-M5", 0, 0xfe1,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus SP350", 0, 0,
{ 12078,-4836,-1069,-6671,14306,2578,-786,939,7418 } },
{ "Olympus SP3", 0, 0,
{ 11766,-4445,-1067,-6901,14421,2707,-1029,1217,7572 } },
{ "Olympus SP500UZ", 0, 0xfff,
{ 9493,-3415,-666,-5211,12334,3260,-1548,2262,6482 } },
{ "Olympus SP510UZ", 0, 0xffe,
{ 10593,-3607,-1010,-5881,13127,3084,-1200,1805,6721 } },
{ "Olympus SP550UZ", 0, 0xffe,
{ 11597,-4006,-1049,-5432,12799,2957,-1029,1750,6516 } },
{ "Olympus SP560UZ", 0, 0xff9,
{ 10915,-3677,-982,-5587,12986,2911,-1168,1968,6223 } },
{ "Olympus SP570UZ", 0, 0,
{ 11522,-4044,-1146,-4736,12172,2904,-988,1829,6039 } },
{"Olympus STYLUS1",0, 0,
{ 11976,-5518,-545,-1419,10472,846,-475,1766,4524 } },
{ "Olympus XZ-10", 0, 0,
{ 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } },
{ "Olympus XZ-1", 0, 0,
{ 10901,-4095,-1074,-1141,9208,2293,-62,1417,5158 } },
{ "Olympus XZ-2", 0, 0,
{ 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } },
{ "OmniVision ov5647", 0, 0, /* DJC */
{ 12782,-4059,-379,-478,9066,1413,1340,1513,5176 } },
{ "Pentax *ist DL2", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Pentax *ist DL", 0, 0,
{ 10829,-2838,-1115,-8339,15817,2696,-837,680,11939 } },
{ "Pentax *ist DS2", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Pentax *ist DS", 0, 0,
{ 10371,-2333,-1206,-8688,16231,2602,-1230,1116,11282 } },
{ "Pentax *ist D", 0, 0,
{ 9651,-2059,-1189,-8881,16512,2487,-1460,1345,10687 } },
{ "Pentax K10D", 0, 0,
{ 9566,-2863,-803,-7170,15172,2112,-818,803,9705 } },
{ "Pentax K1", 0, 0,
{ 11095,-3157,-1324,-8377,15834,2720,-1108,947,11688 } },
{ "Pentax K20D", 0, 0,
{ 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } },
{ "Pentax K200D", 0, 0,
{ 9186,-2678,-907,-8693,16517,2260,-1129,1094,8524 } },
{ "Pentax K2000", 0, 0,
{ 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } },
{ "Pentax K-m", 0, 0,
{ 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } },
{ "Pentax K-x", 0, 0,
{ 8843,-2837,-625,-5025,12644,2668,-411,1234,7410 } },
{ "Pentax K-r", 0, 0,
{ 9895,-3077,-850,-5304,13035,2521,-883,1768,6936 } },
{ "Pentax K-5 II", 0, 0,
{ 8170,-2725,-639,-4440,12017,2744,-771,1465,6599 } },
{ "Pentax K-5", 0, 0,
{ 8713,-2833,-743,-4342,11900,2772,-722,1543,6247 } },
{ "Pentax K-7", 0, 0,
{ 9142,-2947,-678,-8648,16967,1663,-2224,2898,8615 } },
{ "Pentax MX-1", 0, 0,
{ 8804,-2523,-1238,-2423,11627,860,-682,1774,4753 } },
{ "Pentax Q10", 0, 0,
{ 12995,-5593,-1107,-1879,10139,2027,-64,1233,4919 } },
{ "Pentax 645D", 0, 0x3e00,
{ 10646,-3593,-1158,-3329,11699,1831,-667,2874,6287 } },
{ "Panasonic DMC-FZ8", 0, 0xf7f,
{ 8986,-2755,-802,-6341,13575,3077,-1476,2144,6379 } },
{ "Panasonic DMC-FZ18", 0, 0,
{ 9932,-3060,-935,-5809,13331,2753,-1267,2155,5575 } },
{ "Panasonic DMC-FZ28", 15, 0xf96,
{ 10109,-3488,-993,-5412,12812,2916,-1305,2140,5543 } },
{ "Panasonic DMC-FZ30", 0, 0xf94,
{ 10976,-4029,-1141,-7918,15491,2600,-1670,2071,8246 } },
{ "Panasonic DMC-FZ3", 143, 0,
{ 9938,-2780,-890,-4604,12393,2480,-1117,2304,4620 } },
{ "Panasonic DMC-FZ4", 143, 0,
{ 13639,-5535,-1371,-1698,9633,2430,316,1152,4108 } },
{ "Panasonic DMC-FZ50", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Leica V-LUX1", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Panasonic DMC-L10", 15, 0xf96,
{ 8025,-1942,-1050,-7920,15904,2100,-2456,3005,7039 } },
{ "Panasonic DMC-L1", 0, 0xf7f,
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Leica DIGILUX 3", 0, 0xf7f,
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Panasonic DMC-LC1", 0, 0,
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Leica DIGILUX 2", 0, 0,
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Panasonic DMC-LF1", 143, 0,
{ 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } },
{ "Leica C", 143, 0,
{ 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } },
{ "Panasonic DMC-LX1", 0, 0xf7f,
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Leica D-LUX2", 0, 0xf7f,
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Panasonic DMC-LX2", 0, 0,
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Leica D-LUX3", 0, 0,
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Panasonic DMC-LX3", 15, 0,
{ 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } },
{ "Leica D-LUX 4", 15, 0,
{ 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } },
{ "Panasonic DMC-LX5", 143, 0,
{ 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } },
{ "Leica D-LUX 5", 143, 0,
{ 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } },
{ "Panasonic DMC-LX7", 143, 0,
{ 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } },
{ "Leica D-LUX 6", 143, 0,
{ 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } },
{ "Panasonic DMC-FZ100", 143, 0xfff,
{ 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } },
{ "Leica V-LUX 2", 143, 0xfff,
{ 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } },
{ "Panasonic DMC-FZ150", 143, 0xfff,
{ 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } },
{ "Leica V-LUX 3", 143, 0xfff,
{ 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } },
{ "Panasonic DMC-FZ200", 143, 0xfff,
{ 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } },
{ "Leica V-LUX 4", 143, 0xfff,
{ 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } },
{ "Panasonic DMC-FX150", 15, 0xfff,
{ 9082,-2907,-925,-6119,13377,3058,-1797,2641,5609 } },
{ "Panasonic DMC-G10", 0, 0,
{ 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } },
{ "Panasonic DMC-G1", 15, 0xf94,
{ 8199,-2065,-1056,-8124,16156,2033,-2458,3022,7220 } },
{ "Panasonic DMC-G2", 15, 0xf3c,
{ 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } },
{ "Panasonic DMC-G3", 143, 0xfff,
{ 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } },
{ "Panasonic DMC-G5", 143, 0xfff,
{ 7798,-2562,-740,-3879,11584,2613,-1055,2248,5434 } },
{ "Panasonic DMC-G6", 143, 0xfff, /* DJC */
{ 6395,-2583,-40,-3677,9109,4569,-1502,2806,6431 } },
{ "Panasonic DMC-GF1", 15, 0xf92,
{ 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } },
{ "Panasonic DMC-GF2", 143, 0xfff,
{ 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } },
{ "Panasonic DMC-GF3", 143, 0xfff,
{ 9051,-2468,-1204,-5212,13276,2121,-1197,2510,6890 } },
{ "Panasonic DMC-GF5", 143, 0xfff,
{ 8228,-2945,-660,-3938,11792,2430,-1094,2278,5793 } },
{ "Panasonic DMC-GF6", 143, 0,
{ 8130,-2801,-946,-3520,11289,2552,-1314,2511,5791 } },
{ "Panasonic DMC-GH1", 15, 0xf92,
{ 6299,-1466,-532,-6535,13852,2969,-2331,3112,5984 } },
{ "Panasonic DMC-GH2", 15, 0xf95,
{ 7780,-2410,-806,-3913,11724,2484,-1018,2390,5298 } },
{ "Panasonic DMC-GH3", 144, 0,
{ 6559,-1752,-491,-3672,11407,2586,-962,1875,5130 } },
{ "Panasonic DMC-GM1", 143, 0,
{ 8977,-3976,-425,-3050,11095,1117,-1217,2563,4750 } },
{ "Panasonic DMC-GX1", 143, 0,
{ 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } },
{"Panasonic DMC-GX7",143,0,
{7541,-2355,-591,-3163,10598,1894,-933,2109,5006}},
{ "Phase One H 20", 0, 0, /* DJC */
{ 1313,1855,-109,-6715,15908,808,-327,1840,6020 } },
{ "Phase One H 25", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One P 2", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One P 30", 0, 0,
{ 4516,-245,-37,-7020,14976,2173,-3206,4671,7087 } },
{ "Phase One P 45", 0, 0,
{ 5053,-24,-117,-5684,14076,1702,-2619,4492,5849 } },
{ "Phase One P40", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Phase One P65", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Red One", 704, 0xffff, /* DJC */
{ 21014,-7891,-2613,-3056,12201,856,-2203,5125,8042 } },
{ "Samsung EK-GN120", 0, 0, /* Adobe; Galaxy NX */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung EX1", 0, 0x3e00,
{ 8898,-2498,-994,-3144,11328,2066,-760,1381,4576 } },
{ "Samsung EX2F", 0, 0x7ff,
{ 10648,-3897,-1055,-2022,10573,1668,-492,1611,4742 } },
{ "Samsung NX300", 0, 0,
{ 8873,-3984,-372,-3759,12305,1013,-994,1981,4788 } },
{ "Samsung NX2000", 0, 0,
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX2", 0, 0xfff, /* NX20, NX200, NX210 */
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX1000", 0, 0,
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX1100", 0, 0,
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX", 0, 0, /* NX5, NX10, NX11, NX100 */
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung WB2000", 0, 0xfff,
{ 12093,-3557,-1155,-1000,9534,1733,-22,1787,4576 } },
{ "Samsung GX-1", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Samsung S85", 0, 0, /* DJC */
{ 11885,-3968,-1473,-4214,12299,1916,-835,1655,5549 } },
// Foveon: LibRaw color data
{ "Sigma SD9", 15, 4095, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
//{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } },
{ "Sigma SD10", 15, 16383, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
//{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } },
{ "Sigma SD14", 15, 16383, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
//{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } },
{ "Sigma SD15", 15, 4095, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
//{ 7401,-1169,-567,2059,3769,1510,664,3367,5328 } },
// Merills + SD1
{ "Sigma SD1", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP1 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP2 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP3 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
// Sigma DP (non-Merill Versions)
{ "Sigma DP", 0, 4095, /* LibRaw */
// { 7401,-1169,-567,2059,3769,1510,664,3367,5328 } },
{ 13100,-3638,-847,6855,2369,580,2723,3218,3251 } },
{ "Sinar", 0, 0, /* DJC */
{ 16442,-2956,-2422,-2877,12128,750,-1136,6066,4559 } },
{ "Sony DSC-F828", 0, 0,
{ 7924,-1910,-777,-8226,15459,2998,-1517,2199,6818,-7242,11401,3481 } },
{ "Sony DSC-R1", -512, 0,
{ 8512,-2641,-694,-8042,15670,2526,-1821,2117,7414 } },
{ "Sony DSC-V3", 0, 0,
{ 7511,-2571,-692,-7894,15088,3060,-948,1111,8128 } },
{ "Sony DSC-RX100M2", -200, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{ "Sony DSC-RX100", -200, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{"Sony DSC-RX10",0, 0,
{ 8562,-3595,-385,-2715,11089,1128,-1023,2081,4400 } },
{ "Sony DSC-RX1R", -128, 0,
{ 8195,-2800,-422,-4261,12273,1709,-1505,2400,5624 } },
{ "Sony DSC-RX1", -128, 0,
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
{ "Sony DSLR-A100", 0, 0xfeb,
{ 9437,-2811,-774,-8405,16215,2290,-710,596,7181 } },
{ "Sony DSLR-A290", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A2", 0, 0,
{ 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } },
{ "Sony DSLR-A300", 0, 0,
{ 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } },
{ "Sony DSLR-A330", 0, 0,
{ 9847,-3091,-929,-8485,16346,2225,-714,595,7103 } },
{ "Sony DSLR-A350", 0, 0xffc,
{ 6038,-1484,-578,-9146,16746,2513,-875,746,7217 } },
{ "Sony DSLR-A380", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A390", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A450", -128, 0xfeb,
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A580", -128, 0xfeb,
{ 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } },
{ "Sony DSLR-A5", -128, 0xfeb,
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A700", -128, 0,
{ 5775,-805,-359,-8574,16295,2391,-1943,2341,7249 } },
{ "Sony DSLR-A850", -128, 0,
{ 5413,-1162,-365,-5665,13098,2866,-608,1179,8440 } },
{ "Sony DSLR-A900", -128, 0,
{ 5209,-1072,-397,-8845,16120,2919,-1618,1803,8654 } },
{"Sony ILCE-3000",-128, 0,
{ 14009,-8208,729,3738,4752,2932,5743,-3800,6494 } },
{"Sony ILCE-A7R",-128, 0,
{ 8592,-3219,-348,-3846,12042,1475,-1079,2166,5893 } },
{"Sony ILCE-A7",-128, 0,
{ 8592,-3219,-348,-3846,12042,1475,-1079,2166,5893 } },
{ "Sony NEX-5T", -128, 0,
{ 7623,-2693,-347,-4060,11875,1928,-1363,2329,5752 } },
{ "Sony NEX-5N", -128, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony NEX-5R", -128, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-3N", -128, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-3", -128, 0, /* Adobe */
{ 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } },
{ "Sony NEX-5", -128, 0, /* Adobe */
{ 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } },
{ "Sony NEX-6", -128, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-7", -128, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony NEX", -128, 0, /* NEX-C3, NEX-F3 */
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A33", -128, 0,
{ 6069,-1221,-366,-5221,12779,2734,-1024,2066,6834 } },
{ "Sony SLT-A35", -128, 0,
{ 5986,-1618,-415,-4557,11820,3120,-681,1404,6971 } },
{ "Sony SLT-A37", -128, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A55", -128, 0,
{ 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } },
{ "Sony SLT-A57", -128, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A58", -128, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A65", -128, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony SLT-A77", -128, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony SLT-A99", -128, 0,
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
};
double cam_xyz[4][3];
char name[130];
int i, j;
sprintf (name, "%s %s", t_make, t_model);
for (i=0; i < sizeof table / sizeof *table; i++)
if (!strncasecmp(name, table[i].prefix, strlen(table[i].prefix))) {
if (table[i].t_black>0) black = (ushort) table[i].t_black;
else if(table[i].t_black <0 && black == 0 ) black = (ushort) (-table[i].t_black);
if (table[i].t_maximum) maximum = (ushort) table[i].t_maximum;
if (table[i].trans[0]) {
for (j=0; j < 12; j++)
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.cam_xyz[0][j] =
#endif
cam_xyz[0][j] = table[i].trans[j] / 10000.0;
cam_xyz_coeff (cam_xyz);
}
break;
}
}
void CLASS simple_coeff (int index)
{
static const float table[][12] = {
/* index 0 -- all Foveon cameras */
{ 1.4032,-0.2231,-0.1016,-0.5263,1.4816,0.017,-0.0112,0.0183,0.9113 },
/* index 1 -- Kodak DC20 and DC25 */
{ 2.25,0.75,-1.75,-0.25,-0.25,0.75,0.75,-0.25,-0.25,-1.75,0.75,2.25 },
/* index 2 -- Logitech Fotoman Pixtura */
{ 1.893,-0.418,-0.476,-0.495,1.773,-0.278,-1.017,-0.655,2.672 },
/* index 3 -- Nikon E880, E900, and E990 */
{ -1.936280, 1.800443, -1.448486, 2.584324,
1.405365, -0.524955, -0.289090, 0.408680,
-1.204965, 1.082304, 2.941367, -1.818705 }
};
int i, c;
for (raw_color = i=0; i < 3; i++)
FORCC rgb_cam[i][c] = table[index][i*colors+c];
}
short CLASS guess_byte_order (int words)
{
uchar test[4][2];
int t=2, msb;
double diff, sum[2] = {0,0};
fread (test[0], 2, 2, ifp);
for (words-=2; words--; ) {
fread (test[t], 2, 1, ifp);
for (msb=0; msb < 2; msb++) {
diff = (test[t^2][msb] << 8 | test[t^2][!msb])
- (test[t ][msb] << 8 | test[t ][!msb]);
sum[msb] += diff*diff;
}
t = (t+1) & 3;
}
return sum[0] < sum[1] ? 0x4d4d : 0x4949;
}
float CLASS find_green (int bps, int bite, int off0, int off1)
{
UINT64 bitbuf=0;
int vbits, col, i, c;
ushort img[2][2064];
double sum[]={0,0};
FORC(2) {
fseek (ifp, c ? off1:off0, SEEK_SET);
for (vbits=col=0; col < width; col++) {
for (vbits -= bps; vbits < 0; vbits += bite) {
bitbuf <<= bite;
for (i=0; i < bite; i+=8)
bitbuf |= (unsigned) (fgetc(ifp) << i);
}
img[c][col] = bitbuf << (64-bps-vbits) >> (64-bps);
}
}
FORC(width-1) {
sum[ c & 1] += ABS(img[0][c]-img[1][c+1]);
sum[~c & 1] += ABS(img[1][c]-img[0][c+1]);
}
return 100 * log(sum[0]/sum[1]);
}
/*
Identify which camera created this file, and set global variables
accordingly.
*/
void CLASS identify()
{
static const short pana[][6] = {
{ 3130, 1743, 4, 0, -6, 0 },
{ 3130, 2055, 4, 0, -6, 0 },
{ 3130, 2319, 4, 0, -6, 0 },
{ 3170, 2103, 18, 0,-42, 20 },
{ 3170, 2367, 18, 13,-42,-21 },
{ 3177, 2367, 0, 0, -1, 0 },
{ 3304, 2458, 0, 0, -1, 0 },
{ 3330, 2463, 9, 0, -5, 0 },
{ 3330, 2479, 9, 0,-17, 4 },
{ 3370, 1899, 15, 0,-44, 20 },
{ 3370, 2235, 15, 0,-44, 20 },
{ 3370, 2511, 15, 10,-44,-21 },
{ 3690, 2751, 3, 0, -8, -3 },
{ 3710, 2751, 0, 0, -3, 0 },
{ 3724, 2450, 0, 0, 0, -2 },
{ 3770, 2487, 17, 0,-44, 19 },
{ 3770, 2799, 17, 15,-44,-19 },
{ 3880, 2170, 6, 0, -6, 0 },
{ 4060, 3018, 0, 0, 0, -2 },
{ 4290, 2391, 3, 0, -8, -1 },
{ 4330, 2439, 17, 15,-44,-19 },
{ 4508, 2962, 0, 0, -3, -4 },
{ 4508, 3330, 0, 0, -3, -6 },
};
static const ushort canon[][6] = {
{ 1944, 1416, 0, 0, 48, 0 },
{ 2144, 1560, 4, 8, 52, 2 },
{ 2224, 1456, 48, 6, 0, 2 },
{ 2376, 1728, 12, 6, 52, 2 },
{ 2672, 1968, 12, 6, 44, 2 },
{ 3152, 2068, 64, 12, 0, 0 },
{ 3160, 2344, 44, 12, 4, 4 },
{ 3344, 2484, 4, 6, 52, 6 },
{ 3516, 2328, 42, 14, 0, 0 },
{ 3596, 2360, 74, 12, 0, 0 },
{ 3744, 2784, 52, 12, 8, 12 },
{ 3944, 2622, 30, 18, 6, 2 },
{ 3948, 2622, 42, 18, 0, 2 },
{ 3984, 2622, 76, 20, 0, 2 },
{ 4104, 3048, 48, 12, 24, 12 },
{ 4116, 2178, 4, 2, 0, 0 },
{ 4152, 2772, 192, 12, 0, 0 },
{ 4160, 3124, 104, 11, 8, 65 },
{ 4176, 3062, 96, 17, 8, 0 },
{ 4312, 2876, 22, 18, 0, 2 },
{ 4352, 2874, 62, 18, 0, 0 },
{ 4476, 2954, 90, 34, 0, 0 },
{ 4480, 3348, 12, 10, 36, 12 },
{ 4496, 3366, 80, 50, 12, 0 },
{ 4832, 3204, 62, 26, 0, 0 },
{ 4832, 3228, 62, 51, 0, 0 },
{ 5108, 3349, 98, 13, 0, 0 },
{ 5120, 3318, 142, 45, 62, 0 },
{ 5280, 3528, 72, 52, 0, 0 },
{ 5344, 3516, 142, 51, 0, 0 },
{ 5344, 3584, 126,100, 0, 2 },
{ 5360, 3516, 158, 51, 0, 0 },
{ 5568, 3708, 72, 38, 0, 0 },
{ 5712, 3774, 62, 20, 10, 2 },
{ 5792, 3804, 158, 51, 0, 0 },
{ 5920, 3950, 122, 80, 2, 0 },
};
static const struct {
ushort id;
char t_model[20];
} unique[] = {
{ 0x168, "EOS 10D" }, { 0x001, "EOS-1D" },
{ 0x175, "EOS 20D" }, { 0x174, "EOS-1D Mark II" },
{ 0x234, "EOS 30D" }, { 0x232, "EOS-1D Mark II N" },
{ 0x190, "EOS 40D" }, { 0x169, "EOS-1D Mark III" },
{ 0x261, "EOS 50D" }, { 0x281, "EOS-1D Mark IV" },
{ 0x287, "EOS 60D" }, { 0x167, "EOS-1DS" },
{ 0x170, "EOS 300D" }, { 0x188, "EOS-1Ds Mark II" },
{ 0x176, "EOS 450D" }, { 0x215, "EOS-1Ds Mark III" },
{ 0x189, "EOS 350D" }, { 0x324, "EOS-1D C" },
{ 0x236, "EOS 400D" }, { 0x269, "EOS-1D X" },
{ 0x252, "EOS 500D" }, { 0x213, "EOS 5D" },
{ 0x270, "EOS 550D" }, { 0x218, "EOS 5D Mark II" },
{ 0x286, "EOS 600D" }, { 0x285, "EOS 5D Mark III" },
{ 0x301, "EOS 650D" }, { 0x302, "EOS 6D" },
{ 0x325, "EOS 70D" }, { 0x326, "EOS 700D" }, { 0x250, "EOS 7D" },
{ 0x254, "EOS 1000D" },
{ 0x288, "EOS 1100D" },
{ 0x346, "EOS 100D" },
{ 0x331, "EOS M" },
};
static const struct {
ushort id;
char t_model[20];
} sony_unique[] = {
{2,"DSC-R1"},
{256,"DSLR-A100"},
{257,"DSLR-A900"},
{258,"DSLR-A700"},
{259,"DSLR-A200"},
{260,"DSLR-A350"},
{261,"DSLR-A300"},
{262,"DSLR-A900"},
{263,"DSLR-A380"},
{264,"DSLR-A330"},
{265,"DSLR-A230"},
{266,"DSLR-A290"},
{269,"DSLR-A850"},
{270,"DSLR-A850"},
{273,"DSLR-A550"},
{274,"DSLR-A500"},
{275,"DSLR-A450"},
{278,"NEX-5"},
{279,"NEX-3"},
{280,"SLT-A33"},
{281,"SLT-A55"},
{282,"DSLR-A560"},
{283,"DSLR-A580"},
{284,"NEX-C3"},
{285,"SLT-A35"},
{286,"SLT-A65"},
{287,"SLT-A77"},
{288,"NEX-5N"},
{289,"NEX-7"},
{290,"NEX-VG20E"},
{291,"SLT-A37"},
{292,"SLT-A57"},
{293,"NEX-F3"},
{294,"SLT-A99"},
{295,"NEX-6"},
{296,"NEX-5R"},
{297,"DSC-RX100"},
{298,"DSC-RX1"},
{299,"NEX-VG900"},
{300,"NEX-VG30E"},
{302,"ILCE-3000"},
{303,"SLT-A58"},
{305,"NEX-3N"},
{306,"ILCE-A7"},
{307,"NEX-5T"},
{308,"DSC-RX100M2"},
{310,"DSC-RX1R"},
{311,"ILCE-A7R"},
};
static const struct {
unsigned fsize;
ushort rw, rh;
uchar lm, tm, rm, bm, lf, cf, max, flags;
char t_make[10], t_model[20];
ushort offset;
} table[] = {
{ 786432,1024, 768, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-080C" },
{ 1447680,1392,1040, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-145C" },
{ 1920000,1600,1200, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-201C" },
{ 5067304,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C" },
{ 5067316,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C",12 },
{ 10134608,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C" },
{ 10134620,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C",12 },
{ 16157136,3272,2469, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-810C" },
{ 15980544,3264,2448, 0, 0, 0, 0, 8,0x61,0,1,"AgfaPhoto","DC-833m" },
{ 2868726,1384,1036, 0, 0, 0, 0,64,0x49,0,8,"Baumer","TXG14",1078 },
{ 5298000,2400,1766,12,12,44, 2,40,0x94,0,2,"Canon","PowerShot SD300" },
{ 6553440,2664,1968, 4, 4,44, 4,40,0x94,0,2,"Canon","PowerShot A460" },
{ 6573120,2672,1968,12, 8,44, 0,40,0x94,0,2,"Canon","PowerShot A610" },
{ 6653280,2672,1992,10, 6,42, 2,40,0x94,0,2,"Canon","PowerShot A530" },
{ 7710960,2888,2136,44, 8, 4, 0,40,0x94,0,2,"Canon","PowerShot S3 IS" },
{ 9219600,3152,2340,36,12, 4, 0,40,0x94,0,2,"Canon","PowerShot A620" },
{ 9243240,3152,2346,12, 7,44,13,40,0x49,0,2,"Canon","PowerShot A470" },
{ 10341600,3336,2480, 6, 5,32, 3,40,0x94,0,2,"Canon","PowerShot A720 IS" },
{ 10383120,3344,2484,12, 6,44, 6,40,0x94,0,2,"Canon","PowerShot A630" },
{ 12945240,3736,2772,12, 6,52, 6,40,0x94,0,2,"Canon","PowerShot A640" },
{ 15636240,4104,3048,48,12,24,12,40,0x94,0,2,"Canon","PowerShot A650" },
{ 15467760,3720,2772, 6,12,30, 0,40,0x94,0,2,"Canon","PowerShot SX110 IS" },
{ 15534576,3728,2778,12, 9,44, 9,40,0x94,0,2,"Canon","PowerShot SX120 IS" },
{ 18653760,4080,3048,24,12,24,12,40,0x94,0,2,"Canon","PowerShot SX20 IS" },
{ 19131120,4168,3060,92,16, 4, 1, 8,0x94,0,2,"Canon","PowerShot SX220 HS" },
{ 21936096,4464,3276,25,10,73,12,40,0x16,0,2,"Canon","PowerShot SX30 IS" },
{ 24724224,4704,3504, 8,16,56, 8,40,0x49,0,2,"Canon","PowerShot A3300 IS" },
{ 1976352,1632,1211, 0, 2, 0, 1, 0,0x94,0,1,"Casio","QV-2000UX" },
{ 3217760,2080,1547, 0, 0,10, 1, 0,0x94,0,1,"Casio","QV-3*00EX" },
{ 6218368,2585,1924, 0, 0, 9, 0, 0,0x94,0,1,"Casio","QV-5700" },
{ 7816704,2867,2181, 0, 0,34,36, 0,0x16,0,1,"Casio","EX-Z60" },
{ 2937856,1621,1208, 0, 0, 1, 0, 0,0x94,7,13,"Casio","EX-S20" },
{ 4948608,2090,1578, 0, 0,32,34, 0,0x94,7,1,"Casio","EX-S100" },
{ 6054400,2346,1720, 2, 0,32, 0, 0,0x94,7,1,"Casio","QV-R41" },
{ 7426656,2568,1928, 0, 0, 0, 0, 0,0x94,0,1,"Casio","EX-P505" },
{ 7530816,2602,1929, 0, 0,22, 0, 0,0x94,7,1,"Casio","QV-R51" },
{ 7542528,2602,1932, 0, 0,32, 0, 0,0x94,7,1,"Casio","EX-Z50" },
{ 7562048,2602,1937, 0, 0,25, 0, 0,0x16,7,1,"Casio","EX-Z500" },
{ 7753344,2602,1986, 0, 0,32,26, 0,0x94,7,1,"Casio","EX-Z55" },
{ 9313536,2858,2172, 0, 0,14,30, 0,0x94,7,1,"Casio","EX-P600" },
{ 10834368,3114,2319, 0, 0,27, 0, 0,0x94,0,1,"Casio","EX-Z750" },
{ 10843712,3114,2321, 0, 0,25, 0, 0,0x94,0,1,"Casio","EX-Z75" },
{ 10979200,3114,2350, 0, 0,32,32, 0,0x94,7,1,"Casio","EX-P700" },
{ 12310144,3285,2498, 0, 0, 6,30, 0,0x94,0,1,"Casio","EX-Z850" },
{ 12489984,3328,2502, 0, 0,47,35, 0,0x94,0,1,"Casio","EX-Z8" },
{ 15499264,3754,2752, 0, 0,82, 0, 0,0x94,0,1,"Casio","EX-Z1050" },
{ 18702336,4096,3044, 0, 0,24, 0,80,0x94,7,1,"Casio","EX-ZR100" },
{ 7684000,2260,1700, 0, 0, 0, 0,13,0x94,0,1,"Casio","QV-4000" },
{ 787456,1024, 769, 0, 1, 0, 0, 0,0x49,0,0,"Creative","PC-CAM 600" },
{ 3840000,1600,1200, 0, 0, 0, 0,65,0x49,0,0,"Foculus","531C" },
{ 307200, 640, 480, 0, 0, 0, 0, 0,0x94,0,0,"Generic","640x480" },
{ 62464, 256, 244, 1, 1, 6, 1, 0,0x8d,0,0,"Kodak","DC20" },
{ 124928, 512, 244, 1, 1,10, 1, 0,0x8d,0,0,"Kodak","DC20" },
{ 1652736,1536,1076, 0,52, 0, 0, 0,0x61,0,0,"Kodak","DCS200" },
{ 4159302,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330" },
{ 4162462,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330",3160 },
{ 6163328,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603" },
{ 6166488,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603",3160 },
{ 460800, 640, 480, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" },
{ 9116448,2848,2134, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" },
{ 614400, 640, 480, 0, 3, 0, 0,64,0x94,0,0,"Kodak","KAI-0340" },
{ 3884928,1608,1207, 0, 0, 0, 0,96,0x16,0,0,"Micron","2010",3212 },
{ 1138688,1534, 986, 0, 0, 0, 0, 0,0x61,0,0,"Minolta","RD175",513 },
{ 1581060,1305, 969, 0, 0,18, 6, 6,0x1e,4,1,"Nikon","E900" },
{ 2465792,1638,1204, 0, 0,22, 1, 6,0x4b,5,1,"Nikon","E950" },
{ 2940928,1616,1213, 0, 0, 0, 7,30,0x94,0,1,"Nikon","E2100" },
{ 4771840,2064,1541, 0, 0, 0, 1, 6,0xe1,0,1,"Nikon","E990" },
{ 4775936,2064,1542, 0, 0, 0, 0,30,0x94,0,1,"Nikon","E3700" },
{ 5865472,2288,1709, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E4500" },
{ 5869568,2288,1710, 0, 0, 0, 0, 6,0x16,0,1,"Nikon","E4300" },
{ 7438336,2576,1925, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E5000" },
{ 8998912,2832,2118, 0, 0, 0, 0,30,0x94,7,1,"Nikon","COOLPIX S6" },
{ 5939200,2304,1718, 0, 0, 0, 0,30,0x16,0,0,"Olympus","C770UZ" },
{ 3178560,2064,1540, 0, 0, 0, 0, 0,0x94,0,1,"Pentax","Optio S" },
{ 4841984,2090,1544, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S" },
{ 6114240,2346,1737, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S4" },
{ 10702848,3072,2322, 0, 0, 0,21,30,0x94,0,1,"Pentax","Optio 750Z" },
{ 13248000,2208,3000, 0, 0, 0, 0,13,0x61,0,0,"Pixelink","A782" },
{ 6291456,2048,1536, 0, 0, 0, 0,96,0x61,0,0,"RoverShot","3320AF" },
{ 311696, 644, 484, 0, 0, 0, 0, 0,0x16,0,8,"ST Micro","STV680 VGA" },
{ 16098048,3288,2448, 0, 0,24, 0, 9,0x94,0,1,"Samsung","S85" },
{ 16215552,3312,2448, 0, 0,48, 0, 9,0x94,0,1,"Samsung","S85" },
{ 20487168,3648,2808, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" },
{ 24000000,4000,3000, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" },
{ 12582980,3072,2048, 0, 0, 0, 0,33,0x61,0,0,"Sinar","3072x2048",68 },
{ 33292868,4080,4080, 0, 0, 0, 0,33,0x61,0,0,"Sinar","4080x4080",68 },
{ 44390468,4080,5440, 0, 0, 0, 0,33,0x61,0,0,"Sinar","4080x5440",68 },
{ 1409024,1376,1024, 0, 0, 1, 0, 0,0x49,0,0,"Sony","XCD-SX910CR" },
{ 2818048,1376,1024, 0, 0, 1, 0,97,0x49,0,0,"Sony","XCD-SX910CR" },
};
static const char *corp[] =
{ "AgfaPhoto", "Canon", "Casio", "Epson", "Fujifilm",
"Mamiya", "Minolta", "Motorola", "Kodak", "Konica", "Leica",
"Nikon", "Nokia", "Olympus", "Pentax", "Phase One", "Ricoh",
"Samsung", "Sigma", "Sinar", "Sony" };
char head[32], *cp;
int hlen, flen, fsize, zero_fsize=1, i, c;
struct jhead jh;
tiff_flip = flip = filters = UINT_MAX; /* unknown */
raw_height = raw_width = fuji_width = fuji_layout = cr2_slice[0] = 0;
maximum = height = width = top_margin = left_margin = 0;
cdesc[0] = desc[0] = artist[0] = make[0] = model[0] = model2[0] = 0;
iso_speed = shutter = aperture = focal_len = unique_id = 0;
tiff_nifds = 0;
memset (tiff_ifd, 0, sizeof tiff_ifd);
memset (gpsdata, 0, sizeof gpsdata);
memset (cblack, 0, sizeof cblack);
memset (white, 0, sizeof white);
memset (mask, 0, sizeof mask);
thumb_offset = thumb_length = thumb_width = thumb_height = 0;
load_raw = thumb_load_raw = 0;
write_thumb = &CLASS jpeg_thumb;
data_offset = meta_length = tiff_bps = tiff_compress = 0;
kodak_cbpp = zero_after_ff = dng_version = load_flags = 0;
timestamp = shot_order = tiff_samples = black = is_foveon = 0;
mix_green = profile_length = data_error = zero_is_bad = 0;
pixel_aspect = is_raw = raw_color = 1;
tile_width = tile_length = 0;
for (i=0; i < 4; i++) {
cam_mul[i] = i == 1;
pre_mul[i] = i < 3;
FORC3 cmatrix[c][i] = 0;
FORC3 rgb_cam[c][i] = c == i;
}
colors = 3;
for (i=0; i < 0x10000; i++) curve[i] = i;
order = get2();
hlen = get4();
fseek (ifp, 0, SEEK_SET);
fread (head, 1, 32, ifp);
fseek (ifp, 0, SEEK_END);
flen = fsize = ftell(ifp);
if ((cp = (char *) memmem (head, 32, (char*)"MMMM", 4)) ||
(cp = (char *) memmem (head, 32, (char*)"IIII", 4))) {
parse_phase_one (cp-head);
if (cp-head && parse_tiff(0)) apply_tiff();
} else if (order == 0x4949 || order == 0x4d4d) {
if (!memcmp (head+6,"HEAPCCDR",8)) {
data_offset = hlen;
parse_ciff (hlen, flen-hlen, 0);
load_raw = &CLASS canon_load_raw;
} else if (parse_tiff(0)) apply_tiff();
} else if (!memcmp (head,"\xff\xd8\xff\xe1",4) &&
!memcmp (head+6,"Exif",4)) {
fseek (ifp, 4, SEEK_SET);
data_offset = 4 + get2();
fseek (ifp, data_offset, SEEK_SET);
if (fgetc(ifp) != 0xff)
parse_tiff(12);
thumb_offset = 0;
} else if (!memcmp (head+25,"ARECOYK",7)) {
strcpy (make, "Contax");
strcpy (model,"N Digital");
fseek (ifp, 33, SEEK_SET);
get_timestamp(1);
fseek (ifp, 60, SEEK_SET);
FORC4 cam_mul[c ^ (c >> 1)] = get4();
} else if (!strcmp (head, "PXN")) {
strcpy (make, "Logitech");
strcpy (model,"Fotoman Pixtura");
} else if (!strcmp (head, "qktk")) {
strcpy (make, "Apple");
strcpy (model,"QuickTake 100");
load_raw = &CLASS quicktake_100_load_raw;
} else if (!strcmp (head, "qktn")) {
strcpy (make, "Apple");
strcpy (model,"QuickTake 150");
load_raw = &CLASS kodak_radc_load_raw;
} else if (!memcmp (head,"FUJIFILM",8)) {
fseek (ifp, 84, SEEK_SET);
thumb_offset = get4();
thumb_length = get4();
fseek (ifp, 92, SEEK_SET);
parse_fuji (get4());
if (thumb_offset > 120) {
fseek (ifp, 120, SEEK_SET);
is_raw += (i = get4()) && 1;
if (is_raw == 2 && shot_select)
parse_fuji (i);
}
load_raw = &CLASS unpacked_load_raw;
fseek (ifp, 100+28*(shot_select > 0), SEEK_SET);
parse_tiff (data_offset = get4());
parse_tiff (thumb_offset+12);
apply_tiff();
} else if (!memcmp (head,"RIFF",4)) {
fseek (ifp, 0, SEEK_SET);
parse_riff();
} else if (!memcmp (head,"\0\001\0\001\0@",6)) {
fseek (ifp, 6, SEEK_SET);
fread (make, 1, 8, ifp);
fread (model, 1, 8, ifp);
fread (model2, 1, 16, ifp);
data_offset = get2();
get2();
raw_width = get2();
raw_height = get2();
load_raw = &CLASS nokia_load_raw;
filters = 0x61616161;
} else if (!memcmp (head,"NOKIARAW",8)) {
strcpy (make, "NOKIA");
strcpy (model, "X2");
order = 0x4949;
fseek (ifp, 300, SEEK_SET);
data_offset = get4();
i = get4();
width = get2();
height = get2();
data_offset += i - width * 5 / 4 * height;
load_raw = &CLASS nokia_load_raw;
filters = 0x61616161;
} else if (!memcmp (head,"ARRI",4)) {
order = 0x4949;
fseek (ifp, 20, SEEK_SET);
width = get4();
height = get4();
strcpy (make, "ARRI");
fseek (ifp, 668, SEEK_SET);
fread (model, 1, 64, ifp);
data_offset = 4096;
load_raw = &CLASS packed_load_raw;
load_flags = 88;
filters = 0x61616161;
} else if (!memcmp (head,"XPDS",4)) {
order = 0x4949;
fseek (ifp, 0x800, SEEK_SET);
fread (make, 1, 41, ifp);
raw_height = get2();
raw_width = get2();
fseek (ifp, 56, SEEK_CUR);
fread (model, 1, 30, ifp);
data_offset = 0x10000;
load_raw = &CLASS canon_rmf_load_raw;
} else if (!memcmp (head+4,"RED1",4)) {
strcpy (make, "Red");
strcpy (model,"One");
parse_redcine();
load_raw = &CLASS redcine_load_raw;
gamma_curve (1/2.4, 12.92, 1, 4095);
filters = 0x49494949;
} else if (!memcmp (head,"DSC-Image",9))
parse_rollei();
else if (!memcmp (head,"PWAD",4))
parse_sinar_ia();
else if (!memcmp (head,"\0MRM",4))
parse_minolta(0);
else if (!memcmp (head,"FOVb",4))
{
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
if(!imgdata.params.force_foveon_x3f)
parse_foveon();
else
#endif
parse_x3f();
#else
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
parse_foveon();
#endif
#endif
}
else if (!memcmp (head,"CI",2))
parse_cine();
else
for (zero_fsize=i=0; i < sizeof table / sizeof *table; i++)
if (fsize == table[i].fsize) {
strcpy (make, table[i].t_make );
strcpy (model, table[i].t_model);
flip = table[i].flags >> 2;
zero_is_bad = table[i].flags & 2;
if (table[i].flags & 1)
parse_external_jpeg();
data_offset = table[i].offset;
raw_width = table[i].rw;
raw_height = table[i].rh;
left_margin = table[i].lm;
top_margin = table[i].tm;
width = raw_width - left_margin - table[i].rm;
height = raw_height - top_margin - table[i].bm;
filters = 0x1010101 * table[i].cf;
colors = 4 - !((filters & filters >> 1) & 0x5555);
load_flags = table[i].lf;
switch (tiff_bps = (fsize-data_offset)*8 / (raw_width*raw_height)) {
case 6:
load_raw = &CLASS minolta_rd175_load_raw; break;
case 8:
load_raw = &CLASS eight_bit_load_raw; break;
case 10: case 12:
load_flags |= 128;
load_raw = &CLASS packed_load_raw; break;
case 16:
order = 0x4949 | 0x404 * (load_flags & 1);
tiff_bps -= load_flags >> 4;
tiff_bps -= load_flags = load_flags >> 1 & 7;
load_raw = &CLASS unpacked_load_raw;
}
maximum = (1 << tiff_bps) - (1 << table[i].max);
}
if (zero_fsize) fsize = 0;
if (make[0] == 0) parse_smal (0, flen);
if (make[0] == 0) {
parse_jpeg(0);
fseek(ifp,0,SEEK_END);
int sz = ftell(ifp);
if (!strncmp(model,"ov",2) && sz>=6404096 && !fseek (ifp, -6404096, SEEK_END) &&
fread (head, 1, 32, ifp) && !strcmp(head,"BRCMn")) {
strcpy (make, "OmniVision");
data_offset = ftell(ifp) + 0x8000-32;
width = raw_width;
raw_width = 2611;
load_raw = &CLASS nokia_load_raw;
filters = 0x16161616;
} else is_raw = 0;
}
for (i=0; i < sizeof corp / sizeof *corp; i++)
if (strcasestr (make, corp[i])) /* Simplify company names */
strcpy (make, corp[i]);
if ((!strcmp(make,"Kodak") || !strcmp(make,"Leica")) &&
((cp = strcasestr(model," DIGITAL CAMERA")) ||
(cp = strstr(model,"FILE VERSION"))))
*cp = 0;
cp = make + strlen(make); /* Remove trailing spaces */
while (*--cp == ' ') *cp = 0;
cp = model + strlen(model);
while (*--cp == ' ') *cp = 0;
i = strlen(make); /* Remove make from model */
if (!strncasecmp (model, make, i) && model[i++] == ' ')
memmove (model, model+i, 64-i);
if (!strncmp (model,"FinePix ",8))
strcpy (model, model+8);
if (!strncmp (model,"Digital Camera ",15))
strcpy (model, model+15);
desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0;
if (!is_raw) goto notraw;
if (!height) height = raw_height;
if (!width) width = raw_width;
if (height == 2624 && width == 3936) /* Pentax K10D and Samsung GX10 */
{ height = 2616; width = 3896; }
if (height == 3136 && width == 4864) /* Pentax K20D and Samsung GX20 */
{ height = 3124; width = 4688; filters = 0x16161616; }
if (width == 4352 && (!strcmp(model,"K-r") || !strcmp(model,"K-x")))
{ width = 4309; filters = 0x16161616; }
if (width >= 4960 && !strncmp(model,"K-5",3))
{ left_margin = 10; width = 4950; filters = 0x16161616; }
if (width == 4736 && !strcmp(model,"K-7"))
{ height = 3122; width = 4684; filters = 0x16161616; top_margin = 2; }
if (width == 7424 && !strcmp(model,"645D"))
{ height = 5502; width = 7328; filters = 0x61616161; top_margin = 29;
left_margin = 48; }
if (height == 3014 && width == 4096) /* Ricoh GX200 */
width = 4014;
if (dng_version) {
if (filters == UINT_MAX) filters = 0;
if (filters) is_raw = tiff_samples;
else colors = tiff_samples;
switch (tiff_compress) {
case 0: /* Compression not set, assuming uncompressed */
case 1: load_raw = &CLASS packed_dng_load_raw; break;
case 7: load_raw = &CLASS lossless_dng_load_raw; break;
case 34892: load_raw = &CLASS lossy_dng_load_raw; break;
default: load_raw = 0;
}
goto dng_skip;
}
if (!strcmp(make,"Canon") && !fsize && tiff_bps != 15) {
if (!load_raw)
load_raw = &CLASS lossless_jpeg_load_raw;
for (i=0; i < sizeof canon / sizeof *canon; i++)
if (raw_width == canon[i][0] && raw_height == canon[i][1]) {
width = raw_width - (left_margin = canon[i][2]);
height = raw_height - (top_margin = canon[i][3]);
width -= canon[i][4];
height -= canon[i][5];
}
if ((unique_id | 0x20000) == 0x2720000) {
left_margin = 8;
top_margin = 16;
}
}
if (!strcmp(make,"Canon") && unique_id)
{
for (i=0; i < sizeof unique / sizeof *unique; i++)
if (unique_id == 0x80000000 + unique[i].id)
{
adobe_coeff ("Canon", unique[i].t_model);
strcpy(model,unique[i].t_model);
}
}
if (!strcasecmp(make,"Sony") && unique_id)
{
for (i=0; i < sizeof sony_unique / sizeof *sony_unique; i++)
if (unique_id == sony_unique[i].id)
{
adobe_coeff ("Sony", sony_unique[i].t_model);
strcpy(model,sony_unique[i].t_model);
}
}
if (!strcmp(make,"Nikon")) {
if (!load_raw)
load_raw = &CLASS packed_load_raw;
if (model[0] == 'E')
load_flags |= !data_offset << 2 | 2;
}
/* Set parameters based on camera name (for non-DNG files). */
if (!strcmp(model,"KAI-0340")
&& find_green (16, 16, 3840, 5120) < 25) {
height = 480;
top_margin = filters = 0;
strcpy (model,"C603");
}
if (is_foveon) {
if (height*2 < width) pixel_aspect = 0.5;
if (height > width) pixel_aspect = 2;
filters = 0;
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
if(!imgdata.params.force_foveon_x3f)
simple_coeff(0);
#endif
} else if (!strcmp(make,"Canon") && tiff_bps == 15) {
switch (width) {
case 3344: width -= 66;
case 3872: width -= 6;
}
if (height > width) SWAP(height,width);
filters = 0;
tiff_samples = colors = 3;
load_raw = &CLASS canon_sraw_load_raw;
} else if (!strcmp(model,"PowerShot 600")) {
height = 613;
width = 854;
raw_width = 896;
colors = 4;
filters = 0xe1e4e1e4;
load_raw = &CLASS canon_600_load_raw;
} else if (!strcmp(model,"PowerShot A5") ||
!strcmp(model,"PowerShot A5 Zoom")) {
height = 773;
width = 960;
raw_width = 992;
pixel_aspect = 256/235.0;
filters = 0x1e4e1e4e;
goto canon_a5;
} else if (!strcmp(model,"PowerShot A50")) {
height = 968;
width = 1290;
raw_width = 1320;
filters = 0x1b4e4b1e;
goto canon_a5;
} else if (!strcmp(model,"PowerShot Pro70")) {
height = 1024;
width = 1552;
filters = 0x1e4b4e1b;
canon_a5:
colors = 4;
tiff_bps = 10;
load_raw = &CLASS packed_load_raw;
load_flags = 40;
} else if (!strcmp(model,"PowerShot Pro90 IS") ||
!strcmp(model,"PowerShot G1")) {
colors = 4;
filters = 0xb4b4b4b4;
} else if (!strcmp(model,"PowerShot A610")) {
if (canon_s2is()) strcpy (model+10, "S2 IS");
} else if (!strcmp(model,"PowerShot SX220 HS")) {
mask[0][0] = top_margin = 16;
mask[0][2] = top_margin + height;
mask[0][3] = left_margin = 92;
} else if (!strcmp(model,"PowerShot S120")) {
raw_width = 4192;
raw_height = 3062;
width = 4022;
height = 3017;
mask[0][0] = top_margin = 30;
mask[0][2] = top_margin + height;
left_margin = 120;
mask[0][1] = 23;
mask[0][3] = 72;
} else if (!strcmp(model,"PowerShot G16")) {
mask[0][0] = 0;
mask[0][2] = 80;
mask[0][1] = 0;
mask[0][3] = 16;
top_margin = 28;
left_margin = 120;
width = raw_width-left_margin-48;
height = raw_height-top_margin-14;
} else if (!strcmp(model,"PowerShot SX50 HS")) {
mask[0][0] = top_margin = 17;
mask[0][2] = raw_height;
mask[0][3] = 80;
filters = 0x49494949;
} else if (!strcmp(model,"PowerShot G10")) {
filters = 0x49494949;
} else if (!strcmp(model,"EOS D2000C")) {
filters = 0x61616161;
black = curve[200];
} else if (!strcmp(model,"D1")) {
cam_mul[0] *= 256/527.0;
cam_mul[2] *= 256/317.0;
} else if (!strcmp(model,"D1X")) {
width -= 4;
pixel_aspect = 0.5;
} else if (!strcmp(model,"D40X") ||
!strcmp(model,"D60") ||
!strcmp(model,"D80") ||
!strcmp(model,"D3000")) {
height -= 3;
width -= 4;
} else if (!strcmp(model,"D3") ||
!strcmp(model,"D3S") ||
!strcmp(model,"D700")) {
width -= 4;
left_margin = 2;
} else if (!strcmp(model,"D3100")) {
width -= 28;
left_margin = 6;
} else if (!strcmp(model,"D5000") ||
!strcmp(model,"D90")) {
width -= 42;
} else if (!strcmp(model,"D5100") ||
!strcmp(model,"D7000") ||
!strcmp(model,"COOLPIX A")) {
width -= 44;
} else if (!strcmp(model,"D3200") ||
!strcmp(model,"D600") ||
!strcmp(model,"D610") ||
!strncmp(model,"D800",4)) {
width -= 46;
} else if (!strcmp(model,"D4")) {
width -= 52;
left_margin = 2;
} else if (!strncmp(model,"D40",3) ||
!strncmp(model,"D50",3) ||
!strncmp(model,"D70",3)) {
width--;
} else if (!strcmp(model,"D100")) {
if (load_flags)
raw_width = (width += 3) + 3;
} else if (!strcmp(model,"D200")) {
left_margin = 1;
width -= 4;
filters = 0x94949494;
} else if (!strncmp(model,"D2H",3)) {
left_margin = 6;
width -= 14;
} else if (!strncmp(model,"D2X",3)) {
if (width == 3264) width -= 32;
else width -= 8;
} else if (!strncmp(model,"D300",4)) {
width -= 32;
} else if (!strcmp(make,"Nikon") && !strcmp(model,"Df")) {
left_margin=4;
width-=64;
} else if (!strcmp(make,"Nikon") && raw_width == 4032) {
adobe_coeff ("Nikon","COOLPIX P7700");
} else if (!strncmp(model,"COOLPIX P",9)) {
load_flags = 24;
filters = 0x94949494;
if (model[9] == '7' && iso_speed >= 400)
black = 255;
} else if (!strncmp(model,"1 ",2)) {
height -= 2;
} else if (fsize == 1581060) {
simple_coeff(3);
pre_mul[0] = 1.2085;
pre_mul[1] = 1.0943;
pre_mul[3] = 1.1103;
} else if (fsize == 3178560) {
cam_mul[0] *= 4;
cam_mul[2] *= 4;
} else if (fsize == 4771840) {
if (!timestamp && nikon_e995())
strcpy (model, "E995");
if (strcmp(model,"E995")) {
filters = 0xb4b4b4b4;
simple_coeff(3);
pre_mul[0] = 1.196;
pre_mul[1] = 1.246;
pre_mul[2] = 1.018;
}
} else if (fsize == 2940928) {
if (!timestamp && !nikon_e2100())
strcpy (model,"E2500");
if (!strcmp(model,"E2500")) {
height -= 2;
load_flags = 6;
colors = 4;
filters = 0x4b4b4b4b;
}
} else if (fsize == 4775936) {
if (!timestamp) nikon_3700();
if (model[0] == 'E' && atoi(model+1) < 3700)
filters = 0x49494949;
if (!strcmp(model,"Optio 33WR")) {
flip = 1;
filters = 0x16161616;
}
if (make[0] == 'O') {
i = find_green (12, 32, 1188864, 3576832);
c = find_green (12, 32, 2383920, 2387016);
if (abs(i) < abs(c)) {
SWAP(i,c);
load_flags = 24;
}
if (i < 0) filters = 0x61616161;
}
} else if (fsize == 5869568) {
if (!timestamp && minolta_z2()) {
strcpy (make, "Minolta");
strcpy (model,"DiMAGE Z2");
}
load_flags = 6 + 24*(make[0] == 'M');
} else if (fsize == 6291456) {
fseek (ifp, 0x300000, SEEK_SET);
if ((order = guess_byte_order(0x10000)) == 0x4d4d) {
height -= (top_margin = 16);
width -= (left_margin = 28);
maximum = 0xf5c0;
strcpy (make, "ISG");
model[0] = 0;
}
} else if (!strcmp(make,"Fujifilm")) {
if (!strcmp(model+7,"S2Pro")) {
strcpy (model,"S2Pro");
height = 2144;
width = 2880;
flip = 6;
} else if (load_raw != &CLASS packed_load_raw)
maximum = (is_raw == 2 && shot_select) ? 0x2f00 : 0x3e00;
top_margin = (raw_height - height) >> 2 << 1;
left_margin = (raw_width - width ) >> 2 << 1;
if (width == 2848 || width == 3664) filters = 0x16161616;
if (width == 4032 || width == 4952) left_margin = 0;
if (width == 3328 && (width -= 66)) left_margin = 34;
if (width == 4936) left_margin = 4;
if (!strcmp(model,"HS50EXR")) {
width += 2;
left_margin = 0;
filters = 0x16161616;
}
if (fuji_layout) raw_width *= is_raw;
} else if (!strcmp(model,"KD-400Z")) {
height = 1712;
width = 2312;
raw_width = 2336;
goto konica_400z;
} else if (!strcmp(model,"KD-510Z")) {
goto konica_510z;
} else if (!strcasecmp(make,"Minolta")) {
if (!load_raw && (maximum = 0xfff))
load_raw = &CLASS unpacked_load_raw;
if (!strncmp(model,"DiMAGE A",8)) {
if (!strcmp(model,"DiMAGE A200"))
filters = 0x49494949;
tiff_bps = 12;
load_raw = &CLASS packed_load_raw;
} else if (!strncmp(model,"ALPHA",5) ||
!strncmp(model,"DYNAX",5) ||
!strncmp(model,"MAXXUM",6)) {
sprintf (model+20, "DYNAX %-10s", model+6+(model[0]=='M'));
adobe_coeff (make, model+20);
load_raw = &CLASS packed_load_raw;
} else if (!strncmp(model,"DiMAGE G",8)) {
if (model[8] == '4') {
height = 1716;
width = 2304;
} else if (model[8] == '5') {
konica_510z:
height = 1956;
width = 2607;
raw_width = 2624;
} else if (model[8] == '6') {
height = 2136;
width = 2848;
}
data_offset += 14;
filters = 0x61616161;
konica_400z:
load_raw = &CLASS unpacked_load_raw;
maximum = 0x3df;
order = 0x4d4d;
}
} else if (!strcmp(model,"*ist D")) {
load_raw = &CLASS unpacked_load_raw;
data_error = -1;
} else if (!strcmp(model,"*ist DS")) {
height -= 2;
} else if (!strcmp(make,"Samsung") && raw_width == 4704) {
height -= top_margin = 8;
width -= 2 * (left_margin = 8);
load_flags = 32;
} else if (!strcmp(make,"Samsung") && raw_height == 3714) {
height -= 18;
width = 5536;
filters = 0x49494949;
} else if (!strcmp(make,"Samsung") && raw_width == 5632) {
order = 0x4949;
height = 3694;
top_margin = 2;
width = 5574 - (left_margin = 32 + tiff_bps);
if (tiff_bps == 12) load_flags = 80;
} else if (!strcmp(model,"EX1")) {
order = 0x4949;
height -= 20;
top_margin = 2;
if ((width -= 6) > 3682) {
height -= 10;
width -= 46;
top_margin = 8;
}
} else if (!strcmp(model,"WB2000")) {
order = 0x4949;
height -= 3;
top_margin = 2;
if ((width -= 10) > 3718) {
height -= 28;
width -= 56;
top_margin = 8;
}
} else if (strstr(model,"WB550")) {
strcpy (model, "WB550");
} else if (!strcmp(model,"EX2F")) {
height = 3045;
width = 4070;
top_margin = 3;
order = 0x4949;
filters = 0x49494949;
load_raw = &CLASS unpacked_load_raw;
} else if (!strcmp(model,"STV680 VGA")) {
black = 16;
} else if (!strcmp(model,"N95")) {
height = raw_height - (top_margin = 2);
} else if (!strcmp(model,"640x480")) {
gamma_curve (0.45, 4.5, 1, 255);
} else if (!strcmp(make,"Hasselblad")) {
if (load_raw == &CLASS lossless_jpeg_load_raw)
load_raw = &CLASS hasselblad_load_raw;
if (raw_width == 7262) {
height = 5444;
width = 7248;
top_margin = 4;
left_margin = 7;
filters = 0x61616161;
} else if (raw_width == 7410) {
height = 5502;
width = 7328;
top_margin = 4;
left_margin = 41;
filters = 0x61616161;
} else if (raw_width == 9044) {
height = 6716;
width = 8964;
top_margin = 8;
left_margin = 40;
black += load_flags = 256;
maximum = 0x8101;
} else if (raw_width == 4090) {
strcpy (model, "V96C");
height -= (top_margin = 6);
width -= (left_margin = 3) + 7;
filters = 0x61616161;
}
} else if (!strcmp(make,"Sinar")) {
if (!load_raw) load_raw = &CLASS unpacked_load_raw;
maximum = 0x3fff;
} else if (!strcmp(make,"Leaf")) {
maximum = 0x3fff;
fseek (ifp, data_offset, SEEK_SET);
if (ljpeg_start (&jh, 1) && jh.bits == 15)
maximum = 0x1fff;
if (tiff_samples > 1) filters = 0;
if (tiff_samples > 1 || tile_length < raw_height) {
load_raw = &CLASS leaf_hdr_load_raw;
raw_width = tile_width;
}
if ((width | height) == 2048) {
if (tiff_samples == 1) {
filters = 1;
strcpy (cdesc, "RBTG");
strcpy (model, "CatchLight");
top_margin = 8; left_margin = 18; height = 2032; width = 2016;
} else {
strcpy (model, "DCB2");
top_margin = 10; left_margin = 16; height = 2028; width = 2022;
}
} else if (width+height == 3144+2060) {
if (!model[0]) strcpy (model, "Cantare");
if (width > height) {
top_margin = 6; left_margin = 32; height = 2048; width = 3072;
filters = 0x61616161;
} else {
left_margin = 6; top_margin = 32; width = 2048; height = 3072;
filters = 0x16161616;
}
if (!cam_mul[0] || model[0] == 'V') filters = 0;
else is_raw = tiff_samples;
} else if (width == 2116) {
strcpy (model, "Valeo 6");
height -= 2 * (top_margin = 30);
width -= 2 * (left_margin = 55);
filters = 0x49494949;
} else if (width == 3171) {
strcpy (model, "Valeo 6");
height -= 2 * (top_margin = 24);
width -= 2 * (left_margin = 24);
filters = 0x16161616;
}
} else if (!strcmp(make,"Leica") || !strcmp(make,"Panasonic")) {
if ((flen - data_offset) / (raw_width*8/7) == raw_height)
load_raw = &CLASS panasonic_load_raw;
if (!load_raw) {
load_raw = &CLASS unpacked_load_raw;
load_flags = 4;
}
zero_is_bad = 1;
if ((height += 12) > raw_height) height = raw_height;
for (i=0; i < sizeof pana / sizeof *pana; i++)
if (raw_width == pana[i][0] && raw_height == pana[i][1]) {
left_margin = pana[i][2];
top_margin = pana[i][3];
width += pana[i][4];
height += pana[i][5];
}
filters = 0x01010101 * (uchar) "\x94\x61\x49\x16"
[((filters-1) ^ (left_margin & 1) ^ (top_margin << 1)) & 3];
} else if (!strcmp(model,"C770UZ")) {
height = 1718;
width = 2304;
filters = 0x16161616;
load_raw = &CLASS packed_load_raw;
load_flags = 30;
} else if (!strcmp(make,"Olympus")) {
height += height & 1;
if (exif_cfa) filters = exif_cfa;
if (width == 4100) width -= 4;
if (width == 4080) width -= 24;
if (load_raw == &CLASS unpacked_load_raw)
load_flags = 4;
tiff_bps = 12;
if (!strcmp(model,"E-300") ||
!strcmp(model,"E-500")) {
width -= 20;
if (load_raw == &CLASS unpacked_load_raw) {
maximum = 0xfc3;
memset (cblack, 0, sizeof cblack);
}
} else if (!strcmp(model,"STYLUS1")) {
width -= 14;
maximum = 0xfff;
} else if (!strcmp(model,"E-330")) {
width -= 30;
if (load_raw == &CLASS unpacked_load_raw)
maximum = 0xf79;
} else if (!strcmp(model,"SP550UZ")) {
thumb_length = flen - (thumb_offset = 0xa39800);
thumb_height = 480;
thumb_width = 640;
}
} else if (!strcmp(model,"N Digital")) {
height = 2047;
width = 3072;
filters = 0x61616161;
data_offset = 0x1a00;
load_raw = &CLASS packed_load_raw;
} else if (!strcmp(model,"DSC-F828")) {
width = 3288;
left_margin = 5;
mask[1][3] = -17;
data_offset = 862144;
load_raw = &CLASS sony_load_raw;
filters = 0x9c9c9c9c;
colors = 4;
strcpy (cdesc, "RGBE");
} else if (!strcmp(model,"DSC-V3")) {
width = 3109;
left_margin = 59;
mask[0][1] = 9;
data_offset = 787392;
load_raw = &CLASS sony_load_raw;
} else if (!strcmp(make,"Sony") && raw_width == 3984) {
adobe_coeff ("Sony","DSC-R1");
width = 3925;
order = 0x4d4d;
} else if (!strcmp(make,"Sony") && !strcmp(model,"ILCE-3000")) {
width -= 32;
} else if (!strcmp(make,"Sony") && raw_width == 5504) {
width -= 8;
} else if (!strcmp(make,"Sony") && raw_width == 6048) {
width -= 24;
} else if (!strcmp(make,"Sony") && raw_width == 7392) {
width -= 24; // 21 pix really
} else if (!strcmp(model,"DSLR-A100")) {
if (width == 3880) {
height--;
width = ++raw_width;
} else {
order = 0x4d4d;
load_flags = 2;
}
filters = 0x61616161;
} else if (!strcmp(model,"DSLR-A350")) {
height -= 4;
} else if (!strcmp(model,"PIXL")) {
height -= top_margin = 4;
width -= left_margin = 32;
gamma_curve (0, 7, 1, 255);
} else if (!strcmp(model,"C603") || !strcmp(model,"C330")) {
order = 0x4949;
if (filters && data_offset) {
fseek (ifp, 168, SEEK_SET);
read_shorts (curve, 256);
} else gamma_curve (0, 3.875, 1, 255);
load_raw = filters ? &CLASS eight_bit_load_raw
: &CLASS kodak_yrgb_load_raw;
} else if (!strncasecmp(model,"EasyShare",9)) {
data_offset = data_offset < 0x15000 ? 0x15000 : 0x17000;
load_raw = &CLASS packed_load_raw;
} else if (!strcasecmp(make,"Kodak")) {
if (filters == UINT_MAX) filters = 0x61616161;
if (!strncmp(model,"NC2000",6)) {
width -= 4;
left_margin = 2;
} else if (!strcmp(model,"EOSDCS3B")) {
width -= 4;
left_margin = 2;
} else if (!strcmp(model,"EOSDCS1")) {
width -= 4;
left_margin = 2;
} else if (!strcmp(model,"DCS420")) {
width -= 4;
left_margin = 2;
} else if (!strncmp(model,"DCS460 ",7)) {
model[6] = 0;
width -= 4;
left_margin = 2;
} else if (!strcmp(model,"DCS460A")) {
width -= 4;
left_margin = 2;
colors = 1;
filters = 0;
} else if (!strcmp(model,"DCS660M")) {
black = 214;
colors = 1;
filters = 0;
} else if (!strcmp(model,"DCS760M")) {
colors = 1;
filters = 0;
}
if (!strcmp(model+4,"20X"))
strcpy (cdesc, "MYCY");
if (strstr(model,"DC25")) {
strcpy (model, "DC25");
data_offset = 15424;
}
if (!strncmp(model,"DC2",3)) {
raw_height = 2 + (height = 242);
if (flen < 100000) {
raw_width = 256; width = 249;
pixel_aspect = (4.0*height) / (3.0*width);
} else {
raw_width = 512; width = 501;
pixel_aspect = (493.0*height) / (373.0*width);
}
top_margin = left_margin = 1;
colors = 4;
filters = 0x8d8d8d8d;
simple_coeff(1);
pre_mul[1] = 1.179;
pre_mul[2] = 1.209;
pre_mul[3] = 1.036;
load_raw = &CLASS eight_bit_load_raw;
} else if (!strcmp(model,"40")) {
strcpy (model, "DC40");
height = 512;
width = 768;
data_offset = 1152;
load_raw = &CLASS kodak_radc_load_raw;
} else if (strstr(model,"DC50")) {
strcpy (model, "DC50");
height = 512;
width = 768;
data_offset = 19712;
load_raw = &CLASS kodak_radc_load_raw;
} else if (strstr(model,"DC120")) {
strcpy (model, "DC120");
height = 976;
width = 848;
pixel_aspect = height/0.75/width;
load_raw = tiff_compress == 7 ?
&CLASS kodak_jpeg_load_raw : &CLASS kodak_dc120_load_raw;
} else if (!strcmp(model,"DCS200")) {
thumb_height = 128;
thumb_width = 192;
thumb_offset = 6144;
thumb_misc = 360;
write_thumb = &CLASS layer_thumb;
black = 17;
}
} else if (!strcmp(model,"Fotoman Pixtura")) {
height = 512;
width = 768;
data_offset = 3632;
load_raw = &CLASS kodak_radc_load_raw;
filters = 0x61616161;
simple_coeff(2);
} else if (!strncmp(model,"QuickTake",9)) {
if (head[5]) strcpy (model+10, "200");
fseek (ifp, 544, SEEK_SET);
height = get2();
width = get2();
data_offset = (get4(),get2()) == 30 ? 738:736;
if (height > width) {
SWAP(height,width);
fseek (ifp, data_offset-6, SEEK_SET);
flip = ~get2() & 3 ? 5:6;
}
filters = 0x61616161;
} else if (!strcmp(make,"Rollei") && !load_raw) {
switch (raw_width) {
case 1316:
height = 1030;
width = 1300;
top_margin = 1;
left_margin = 6;
break;
case 2568:
height = 1960;
width = 2560;
top_margin = 2;
left_margin = 8;
}
filters = 0x16161616;
load_raw = &CLASS rollei_load_raw;
}
else if (!strcmp(model,"GRAS-50S5C")) {
height = 2048;
width = 2440;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x49494949;
order = 0x4949;
maximum = 0xfffC;
} else if (!strcmp(model,"BB-500CL")) {
height = 2058;
width = 2448;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x3fff;
} else if (!strcmp(model,"BB-500GE")) {
height = 2058;
width = 2456;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x3fff;
} else if (!strcmp(model,"SVS625CL")) {
height = 2050;
width = 2448;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x0fff;
}
/* Early reject for damaged images */
if (!load_raw || height < 22 || width < 22 ||
tiff_bps > 16 || tiff_samples > 4 || colors > 4 || colors < 1)
{
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2);
#endif
return;
}
if (!model[0])
sprintf (model, "%dx%d", width, height);
if (filters == UINT_MAX) filters = 0x94949494;
if (raw_color) adobe_coeff (make, model);
if (load_raw == &CLASS kodak_radc_load_raw)
if (raw_color) adobe_coeff ("Apple","Quicktake");
if (thumb_offset && !thumb_height) {
fseek (ifp, thumb_offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
dng_skip:
if (fuji_width) {
fuji_width = width >> !fuji_layout;
if (~fuji_width & 1) filters = 0x49494949;
width = (height >> fuji_layout) + fuji_width;
height = width - 1;
pixel_aspect = 1;
} else {
if (raw_height < height) raw_height = height;
if (raw_width < width ) raw_width = width;
}
if (!tiff_bps) tiff_bps = 12;
if (!maximum) maximum = (1 << tiff_bps) - 1;
if (!load_raw || height < 22 || width < 22 ||
tiff_bps > 16 || tiff_samples > 4 || colors > 4)
is_raw = 0;
#ifdef NO_JASPER
if (load_raw == &CLASS redcine_load_raw) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: You must link dcraw with %s!!\n"),
ifname, "libjasper");
#endif
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_JASPER;
#endif
}
#endif
#ifdef NO_JPEG
if (load_raw == &CLASS kodak_jpeg_load_raw ||
load_raw == &CLASS lossy_dng_load_raw) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: You must link dcraw with %s!!\n"),
ifname, "libjpeg");
#endif
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_JPEGLIB;
#endif
}
#endif
if (!cdesc[0])
strcpy (cdesc, colors == 3 ? "RGBG":"GMCY");
if (!raw_height) raw_height = height;
if (!raw_width ) raw_width = width;
if (filters > 999 && colors == 3)
filters |= ((filters >> 2 & 0x22222222) |
(filters << 2 & 0x88888888)) & filters << 1;
notraw:
if (flip == UINT_MAX) flip = tiff_flip;
if (flip == UINT_MAX) flip = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2);
#endif
}
#line 10303 "dcraw/dcraw.c"
void CLASS convert_to_rgb()
{
#ifndef LIBRAW_LIBRARY_BUILD
int row, col, c;
#endif
int i, j, k;
#ifndef LIBRAW_LIBRARY_BUILD
ushort *img;
float out[3];
#endif
float out_cam[3][4];
double num, inverse[3][3];
static const double xyzd50_srgb[3][3] =
{ { 0.436083, 0.385083, 0.143055 },
{ 0.222507, 0.716888, 0.060608 },
{ 0.013930, 0.097097, 0.714022 } };
static const double rgb_rgb[3][3] =
{ { 1,0,0 }, { 0,1,0 }, { 0,0,1 } };
static const double adobe_rgb[3][3] =
{ { 0.715146, 0.284856, 0.000000 },
{ 0.000000, 1.000000, 0.000000 },
{ 0.000000, 0.041166, 0.958839 } };
static const double wide_rgb[3][3] =
{ { 0.593087, 0.404710, 0.002206 },
{ 0.095413, 0.843149, 0.061439 },
{ 0.011621, 0.069091, 0.919288 } };
static const double prophoto_rgb[3][3] =
{ { 0.529317, 0.330092, 0.140588 },
{ 0.098368, 0.873465, 0.028169 },
{ 0.016879, 0.117663, 0.865457 } };
static const double (*out_rgb[])[3] =
{ rgb_rgb, adobe_rgb, wide_rgb, prophoto_rgb, xyz_rgb };
static const char *name[] =
{ "sRGB", "Adobe RGB (1998)", "WideGamut D65", "ProPhoto D65", "XYZ" };
static const unsigned phead[] =
{ 1024, 0, 0x2100000, 0x6d6e7472, 0x52474220, 0x58595a20, 0, 0, 0,
0x61637370, 0, 0, 0x6e6f6e65, 0, 0, 0, 0, 0xf6d6, 0x10000, 0xd32d };
unsigned pbody[] =
{ 10, 0x63707274, 0, 36, /* cprt */
0x64657363, 0, 40, /* desc */
0x77747074, 0, 20, /* wtpt */
0x626b7074, 0, 20, /* bkpt */
0x72545243, 0, 14, /* rTRC */
0x67545243, 0, 14, /* gTRC */
0x62545243, 0, 14, /* bTRC */
0x7258595a, 0, 20, /* rXYZ */
0x6758595a, 0, 20, /* gXYZ */
0x6258595a, 0, 20 }; /* bXYZ */
static const unsigned pwhite[] = { 0xf351, 0x10000, 0x116cc };
unsigned pcurve[] = { 0x63757276, 0, 1, 0x1000000 };
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,0,2);
#endif
gamma_curve (gamm[0], gamm[1], 0, 0);
memcpy (out_cam, rgb_cam, sizeof out_cam);
#ifndef LIBRAW_LIBRARY_BUILD
raw_color |= colors == 1 || document_mode ||
output_color < 1 || output_color > 5;
#else
raw_color |= colors == 1 ||
output_color < 1 || output_color > 5;
#endif
if (!raw_color) {
oprof = (unsigned *) calloc (phead[0], 1);
merror (oprof, "convert_to_rgb()");
memcpy (oprof, phead, sizeof phead);
if (output_color == 5) oprof[4] = oprof[5];
oprof[0] = 132 + 12*pbody[0];
for (i=0; i < pbody[0]; i++) {
oprof[oprof[0]/4] = i ? (i > 1 ? 0x58595a20 : 0x64657363) : 0x74657874;
pbody[i*3+2] = oprof[0];
oprof[0] += (pbody[i*3+3] + 3) & -4;
}
memcpy (oprof+32, pbody, sizeof pbody);
oprof[pbody[5]/4+2] = strlen(name[output_color-1]) + 1;
memcpy ((char *)oprof+pbody[8]+8, pwhite, sizeof pwhite);
pcurve[3] = (short)(256/gamm[5]+0.5) << 16;
for (i=4; i < 7; i++)
memcpy ((char *)oprof+pbody[i*3+2], pcurve, sizeof pcurve);
pseudoinverse ((double (*)[3]) out_rgb[output_color-1], inverse, 3);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++) {
for (num = k=0; k < 3; k++)
num += xyzd50_srgb[i][k] * inverse[j][k];
oprof[pbody[j*3+23]/4+i+2] = num * 0x10000 + 0.5;
}
for (i=0; i < phead[0]/4; i++)
oprof[i] = htonl(oprof[i]);
strcpy ((char *)oprof+pbody[2]+8, "auto-generated by dcraw");
strcpy ((char *)oprof+pbody[5]+12, name[output_color-1]);
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
for (out_cam[i][j] = k=0; k < 3; k++)
out_cam[i][j] += out_rgb[output_color-1][i][k] * rgb_cam[k][j];
}
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr, raw_color ? _("Building histograms...\n") :
_("Converting to %s colorspace...\n"), name[output_color-1]);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
convert_to_rgb_loop(out_cam);
#else
memset (histogram, 0, sizeof histogram);
for (img=image[0], row=0; row < height; row++)
for (col=0; col < width; col++, img+=4) {
if (!raw_color) {
out[0] = out[1] = out[2] = 0;
FORCC {
out[0] += out_cam[0][c] * img[c];
out[1] += out_cam[1][c] * img[c];
out[2] += out_cam[2][c] * img[c];
}
FORC3 img[c] = CLIP((int) out[c]);
}
else if (document_mode)
img[0] = img[fcol(row,col)];
FORCC histogram[c][img[c] >> 3]++;
}
#endif
if (colors == 4 && output_color) colors = 3;
#ifndef LIBRAW_LIBRARY_BUILD
if (document_mode && filters) colors = 1;
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,1,2);
#endif
}
void CLASS fuji_rotate()
{
int i, row, col;
double step;
float r, c, fr, fc;
unsigned ur, uc;
ushort wide, high, (*img)[4], (*pix)[4];
if (!fuji_width) return;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Rotating image 45 degrees...\n"));
#endif
fuji_width = (fuji_width - 1 + shrink) >> shrink;
step = sqrt(0.5);
wide = fuji_width / step;
high = (height - fuji_width) / step;
img = (ushort (*)[4]) calloc (high, wide*sizeof *img);
merror (img, "fuji_rotate()");
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,0,2);
#endif
for (row=0; row < high; row++)
for (col=0; col < wide; col++) {
ur = r = fuji_width + (row-col)*step;
uc = c = (row+col)*step;
if (ur > height-2 || uc > width-2) continue;
fr = r - ur;
fc = c - uc;
pix = image + ur*width + uc;
for (i=0; i < colors; i++)
img[row*wide+col][i] =
(pix[ 0][i]*(1-fc) + pix[ 1][i]*fc) * (1-fr) +
(pix[width][i]*(1-fc) + pix[width+1][i]*fc) * fr;
}
free (image);
width = wide;
height = high;
image = img;
fuji_width = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,1,2);
#endif
}
void CLASS stretch()
{
ushort newdim, (*img)[4], *pix0, *pix1;
int row, col, c;
double rc, frac;
if (pixel_aspect == 1) return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,0,2);
#endif
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Stretching the image...\n"));
#endif
if (pixel_aspect < 1) {
newdim = height / pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (width, newdim*sizeof *img);
merror (img, "stretch()");
for (rc=row=0; row < newdim; row++, rc+=pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c*width];
if (c+1 < height) pix1 += width*4;
for (col=0; col < width; col++, pix0+=4, pix1+=4)
FORCC img[row*width+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
height = newdim;
} else {
newdim = width * pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (height, newdim*sizeof *img);
merror (img, "stretch()");
for (rc=col=0; col < newdim; col++, rc+=1/pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c];
if (c+1 < width) pix1 += 4;
for (row=0; row < height; row++, pix0+=width*4, pix1+=width*4)
FORCC img[row*newdim+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
width = newdim;
}
free (image);
image = img;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,1,2);
#endif
}
int CLASS flip_index (int row, int col)
{
if (flip & 4) SWAP(row,col);
if (flip & 2) row = iheight - 1 - row;
if (flip & 1) col = iwidth - 1 - col;
return row * iwidth + col;
}
#line 10559 "dcraw/dcraw.c"
void CLASS tiff_set (ushort *ntag,
ushort tag, ushort type, int count, int val)
{
struct tiff_tag *tt;
int c;
tt = (struct tiff_tag *)(ntag+1) + (*ntag)++;
tt->tag = tag;
tt->type = type;
tt->count = count;
if (type < 3 && count <= 4)
FORC(4) tt->val.c[c] = val >> (c << 3);
else if (type == 3 && count <= 2)
FORC(2) tt->val.s[c] = val >> (c << 4);
else tt->val.i = val;
}
#define TOFF(ptr) ((char *)(&(ptr)) - (char *)th)
void CLASS tiff_head (struct tiff_hdr *th, int full)
{
int c, psize=0;
struct tm *t;
memset (th, 0, sizeof *th);
th->t_order = htonl(0x4d4d4949) >> 16;
th->magic = 42;
th->ifd = 10;
if (full) {
tiff_set (&th->ntag, 254, 4, 1, 0);
tiff_set (&th->ntag, 256, 4, 1, width);
tiff_set (&th->ntag, 257, 4, 1, height);
tiff_set (&th->ntag, 258, 3, colors, output_bps);
if (colors > 2)
th->tag[th->ntag-1].val.i = TOFF(th->bps);
FORC4 th->bps[c] = output_bps;
tiff_set (&th->ntag, 259, 3, 1, 1);
tiff_set (&th->ntag, 262, 3, 1, 1 + (colors > 1));
}
tiff_set (&th->ntag, 270, 2, 512, TOFF(th->t_desc));
tiff_set (&th->ntag, 271, 2, 64, TOFF(th->t_make));
tiff_set (&th->ntag, 272, 2, 64, TOFF(th->t_model));
if (full) {
if (oprof) psize = ntohl(oprof[0]);
tiff_set (&th->ntag, 273, 4, 1, sizeof *th + psize);
tiff_set (&th->ntag, 277, 3, 1, colors);
tiff_set (&th->ntag, 278, 4, 1, height);
tiff_set (&th->ntag, 279, 4, 1, height*width*colors*output_bps/8);
} else
tiff_set (&th->ntag, 274, 3, 1, "12435867"[flip]-'0');
tiff_set (&th->ntag, 282, 5, 1, TOFF(th->rat[0]));
tiff_set (&th->ntag, 283, 5, 1, TOFF(th->rat[2]));
tiff_set (&th->ntag, 284, 3, 1, 1);
tiff_set (&th->ntag, 296, 3, 1, 2);
tiff_set (&th->ntag, 305, 2, 32, TOFF(th->soft));
tiff_set (&th->ntag, 306, 2, 20, TOFF(th->date));
tiff_set (&th->ntag, 315, 2, 64, TOFF(th->t_artist));
tiff_set (&th->ntag, 34665, 4, 1, TOFF(th->nexif));
if (psize) tiff_set (&th->ntag, 34675, 7, psize, sizeof *th);
tiff_set (&th->nexif, 33434, 5, 1, TOFF(th->rat[4]));
tiff_set (&th->nexif, 33437, 5, 1, TOFF(th->rat[6]));
tiff_set (&th->nexif, 34855, 3, 1, iso_speed);
tiff_set (&th->nexif, 37386, 5, 1, TOFF(th->rat[8]));
if (gpsdata[1]) {
tiff_set (&th->ntag, 34853, 4, 1, TOFF(th->ngps));
tiff_set (&th->ngps, 0, 1, 4, 0x202);
tiff_set (&th->ngps, 1, 2, 2, gpsdata[29]);
tiff_set (&th->ngps, 2, 5, 3, TOFF(th->gps[0]));
tiff_set (&th->ngps, 3, 2, 2, gpsdata[30]);
tiff_set (&th->ngps, 4, 5, 3, TOFF(th->gps[6]));
tiff_set (&th->ngps, 5, 1, 1, gpsdata[31]);
tiff_set (&th->ngps, 6, 5, 1, TOFF(th->gps[18]));
tiff_set (&th->ngps, 7, 5, 3, TOFF(th->gps[12]));
tiff_set (&th->ngps, 18, 2, 12, TOFF(th->gps[20]));
tiff_set (&th->ngps, 29, 2, 12, TOFF(th->gps[23]));
memcpy (th->gps, gpsdata, sizeof th->gps);
}
th->rat[0] = th->rat[2] = 300;
th->rat[1] = th->rat[3] = 1;
FORC(6) th->rat[4+c] = 1000000;
th->rat[4] *= shutter;
th->rat[6] *= aperture;
th->rat[8] *= focal_len;
strncpy (th->t_desc, desc, 512);
strncpy (th->t_make, make, 64);
strncpy (th->t_model, model, 64);
strcpy (th->soft, "dcraw v"DCRAW_VERSION);
t = localtime (×tamp);
sprintf (th->date, "%04d:%02d:%02d %02d:%02d:%02d",
t->tm_year+1900,t->tm_mon+1,t->tm_mday,t->tm_hour,t->tm_min,t->tm_sec);
strncpy (th->t_artist, artist, 64);
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS jpeg_thumb_writer (FILE *tfp,char *t_humb,int t_humb_length)
{
ushort exif[5];
struct tiff_hdr th;
fputc (0xff, tfp);
fputc (0xd8, tfp);
if (strcmp (t_humb+6, "Exif")) {
memcpy (exif, "\xff\xe1 Exif\0\0", 10);
exif[1] = htons (8 + sizeof th);
fwrite (exif, 1, sizeof exif, tfp);
tiff_head (&th, 0);
fwrite (&th, 1, sizeof th, tfp);
}
fwrite (t_humb+2, 1, t_humb_length-2, tfp);
}
void CLASS jpeg_thumb()
{
char *thumb;
thumb = (char *) malloc (thumb_length);
merror (thumb, "jpeg_thumb()");
fread (thumb, 1, thumb_length, ifp);
jpeg_thumb_writer(ofp,thumb,thumb_length);
free (thumb);
}
#else
void CLASS jpeg_thumb()
{
char *thumb;
ushort exif[5];
struct tiff_hdr th;
thumb = (char *) malloc (thumb_length);
merror (thumb, "jpeg_thumb()");
fread (thumb, 1, thumb_length, ifp);
fputc (0xff, ofp);
fputc (0xd8, ofp);
if (strcmp (thumb+6, "Exif")) {
memcpy (exif, "\xff\xe1 Exif\0\0", 10);
exif[1] = htons (8 + sizeof th);
fwrite (exif, 1, sizeof exif, ofp);
tiff_head (&th, 0);
fwrite (&th, 1, sizeof th, ofp);
}
fwrite (thumb+2, 1, thumb_length-2, ofp);
free (thumb);
}
#endif
void CLASS write_ppm_tiff()
{
struct tiff_hdr th;
uchar *ppm;
ushort *ppm2;
int c, row, col, soff, rstep, cstep;
int perc, val, total, t_white=0x2000;
#ifdef LIBRAW_LIBRARY_BUILD
perc = width * height * auto_bright_thr; /* 99th percentile white level */
#else
perc = width * height * 0.01; /* 99th percentile white level */
#endif
if (fuji_width) perc /= 2;
if (!((highlight & ~2) || no_auto_bright))
for (t_white=c=0; c < colors; c++) {
for (val=0x2000, total=0; --val > 32; )
if ((total += histogram[c][val]) > perc) break;
if (t_white < val) t_white = val;
}
gamma_curve (gamm[0], gamm[1], 2, (t_white << 3)/bright);
iheight = height;
iwidth = width;
if (flip & 4) SWAP(height,width);
ppm = (uchar *) calloc (width, colors*output_bps/8);
ppm2 = (ushort *) ppm;
merror (ppm, "write_ppm_tiff()");
if (output_tiff) {
tiff_head (&th, 1);
fwrite (&th, sizeof th, 1, ofp);
if (oprof)
fwrite (oprof, ntohl(oprof[0]), 1, ofp);
} else if (colors > 3)
fprintf (ofp,
"P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLTYPE %s\nENDHDR\n",
width, height, colors, (1 << output_bps)-1, cdesc);
else
fprintf (ofp, "P%d\n%d %d\n%d\n",
colors/2+5, width, height, (1 << output_bps)-1);
soff = flip_index (0, 0);
cstep = flip_index (0, 1) - soff;
rstep = flip_index (1, 0) - flip_index (0, width);
for (row=0; row < height; row++, soff += rstep) {
for (col=0; col < width; col++, soff += cstep)
if (output_bps == 8)
FORCC ppm [col*colors+c] = curve[image[soff][c]] >> 8;
else FORCC ppm2[col*colors+c] = curve[image[soff][c]];
if (output_bps == 16 && !output_tiff && htons(0x55aa) != 0x55aa)
swab ((char*)ppm2, (char*)ppm2, width*colors*2);
fwrite (ppm, colors*output_bps/8, width, ofp);
}
free (ppm);
}
| ./CrossVul/dataset_final_sorted/CWE-189/cpp/good_1605_3 |
crossvul-cpp_data_bad_3618_0 | /***************************************************************************
copyright : (C) 2002 - 2008 by Scott Wheeler
email : wheeler@kde.org
***************************************************************************/
/***************************************************************************
* This library is free software; you can redistribute it and/or modify *
* it under the terms of the GNU Lesser General Public License version *
* 2.1 as published by the Free Software Foundation. *
* *
* This library is distributed in the hope that it will be useful, but *
* WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
* Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public *
* License along with this library; if not, write to the Free Software *
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA *
* 02110-1301 USA *
* *
* Alternatively, this file is available under the Mozilla Public *
* License Version 1.1. You may obtain a copy of the License at *
* http://www.mozilla.org/MPL/ *
***************************************************************************/
#include <ostream>
#include <tstring.h>
#include <tdebug.h>
#include <string.h>
#include "tbytevector.h"
// This is a bit ugly to keep writing over and over again.
// A rather obscure feature of the C++ spec that I hadn't thought of that makes
// working with C libs much more effecient. There's more here:
//
// http://www.informit.com/isapi/product_id~{9C84DAB4-FE6E-49C5-BB0A-FB50331233EA}/content/index.asp
#define DATA(x) (&(x->data[0]))
namespace TagLib {
static const char hexTable[17] = "0123456789abcdef";
static const uint crcTable[256] = {
0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b,
0x1a864db2, 0x1e475005, 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7,
0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3,
0x709f7b7a, 0x745e66cd, 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, 0xbe2b5b58, 0xbaea46ef,
0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb,
0xceb42022, 0xca753d95, 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, 0x34867077, 0x30476dc0,
0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x018aeb13, 0x054bf6a4,
0x0808d07d, 0x0cc9cdca, 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, 0x5e9f46bf, 0x5a5e5b08,
0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc,
0xb6238b25, 0xb2e29692, 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, 0xe0b41de7, 0xe4750050,
0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34,
0xdc3abded, 0xd8fba05a, 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, 0x4f040d56, 0x4bc510e1,
0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5,
0x3f9b762c, 0x3b5a6b9b, 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff,
0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, 0xf12f560e, 0xf5ee4bb9,
0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd,
0xcda1f604, 0xc960ebb3, 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, 0x9b3660c6, 0x9ff77d71,
0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2,
0x470cdd2b, 0x43cdc09c, 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, 0x119b4be9, 0x155a565e,
0x18197087, 0x1cd86d30, 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec,
0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a,
0x2d15ebe3, 0x29d4f654, 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, 0xe3a1cbc1, 0xe760d676,
0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662,
0x933eb0bb, 0x97ffad0c, 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
};
/*!
* A templatized KMP find that works both with a ByteVector and a ByteVectorMirror.
*/
template <class Vector>
int vectorFind(const Vector &v, const Vector &pattern, uint offset, int byteAlign)
{
if(pattern.size() > v.size() || offset > v.size() - 1)
return -1;
// Let's go ahead and special case a pattern of size one since that's common
// and easy to make fast.
if(pattern.size() == 1) {
char p = pattern[0];
for(uint i = offset; i < v.size(); i++) {
if(v[i] == p && (i - offset) % byteAlign == 0)
return i;
}
return -1;
}
uchar lastOccurrence[256];
for(uint i = 0; i < 256; ++i)
lastOccurrence[i] = uchar(pattern.size());
for(uint i = 0; i < pattern.size() - 1; ++i)
lastOccurrence[uchar(pattern[i])] = uchar(pattern.size() - i - 1);
for(uint i = pattern.size() - 1 + offset; i < v.size(); i += lastOccurrence[uchar(v.at(i))]) {
int iBuffer = i;
int iPattern = pattern.size() - 1;
while(iPattern >= 0 && v.at(iBuffer) == pattern[iPattern]) {
--iBuffer;
--iPattern;
}
if(-1 == iPattern && (iBuffer + 1 - offset) % byteAlign == 0)
return iBuffer + 1;
}
return -1;
}
/*!
* Wraps the accessors to a ByteVector to make the search algorithm access the
* elements in reverse.
*
* \see vectorFind()
* \see ByteVector::rfind()
*/
class ByteVectorMirror
{
public:
ByteVectorMirror(const ByteVector &source) : v(source) {}
char operator[](int index) const
{
return v[v.size() - index - 1];
}
char at(int index) const
{
return v.at(v.size() - index - 1);
}
ByteVectorMirror mid(uint index, uint length = 0xffffffff) const
{
return length == 0xffffffff ? v.mid(0, index) : v.mid(index - length, length);
}
uint size() const
{
return v.size();
}
int find(const ByteVectorMirror &pattern, uint offset = 0, int byteAlign = 1) const
{
ByteVectorMirror v(*this);
if(offset > 0) {
offset = size() - offset - pattern.size();
if(offset >= size())
offset = 0;
}
const int pos = vectorFind<ByteVectorMirror>(v, pattern, offset, byteAlign);
// If the offset is zero then we need to adjust the location in the search
// to be appropriately reversed. If not we need to account for the fact
// that the recursive call (called from the above line) has already ajusted
// for this but that the normal templatized find above will add the offset
// to the returned value.
//
// This is a little confusing at first if you don't first stop to think
// through the logic involved in the forward search.
if(pos == -1)
return -1;
return size() - pos - pattern.size();
}
private:
const ByteVector &v;
};
template <class T>
T toNumber(const std::vector<char> &data, bool mostSignificantByteFirst)
{
T sum = 0;
if(data.size() <= 0) {
debug("ByteVectorMirror::toNumber<T>() -- data is empty, returning 0");
return sum;
}
uint size = sizeof(T);
uint last = data.size() > size ? size - 1 : data.size() - 1;
for(uint i = 0; i <= last; i++)
sum |= (T) uchar(data[i]) << ((mostSignificantByteFirst ? last - i : i) * 8);
return sum;
}
template <class T>
ByteVector fromNumber(T value, bool mostSignificantByteFirst)
{
int size = sizeof(T);
ByteVector v(size, 0);
for(int i = 0; i < size; i++)
v[i] = uchar(value >> ((mostSignificantByteFirst ? size - 1 - i : i) * 8) & 0xff);
return v;
}
}
using namespace TagLib;
class ByteVector::ByteVectorPrivate : public RefCounter
{
public:
ByteVectorPrivate() : RefCounter(), size(0) {}
ByteVectorPrivate(const std::vector<char> &v) : RefCounter(), data(v), size(v.size()) {}
ByteVectorPrivate(TagLib::uint len, char value) : RefCounter(), data(len, value), size(len) {}
std::vector<char> data;
// std::vector<T>::size() is very slow, so we'll cache the value
uint size;
};
////////////////////////////////////////////////////////////////////////////////
// static members
////////////////////////////////////////////////////////////////////////////////
ByteVector ByteVector::null;
ByteVector ByteVector::fromCString(const char *s, uint length)
{
ByteVector v;
if(length == 0xffffffff)
v.setData(s);
else
v.setData(s, length);
return v;
}
ByteVector ByteVector::fromUInt(uint value, bool mostSignificantByteFirst)
{
return fromNumber<uint>(value, mostSignificantByteFirst);
}
ByteVector ByteVector::fromShort(short value, bool mostSignificantByteFirst)
{
return fromNumber<short>(value, mostSignificantByteFirst);
}
ByteVector ByteVector::fromLongLong(long long value, bool mostSignificantByteFirst)
{
return fromNumber<long long>(value, mostSignificantByteFirst);
}
////////////////////////////////////////////////////////////////////////////////
// public members
////////////////////////////////////////////////////////////////////////////////
ByteVector::ByteVector()
{
d = new ByteVectorPrivate;
}
ByteVector::ByteVector(uint size, char value)
{
d = new ByteVectorPrivate(size, value);
}
ByteVector::ByteVector(const ByteVector &v) : d(v.d)
{
d->ref();
}
ByteVector::ByteVector(char c)
{
d = new ByteVectorPrivate;
d->data.push_back(c);
d->size = 1;
}
ByteVector::ByteVector(const char *data, uint length)
{
d = new ByteVectorPrivate;
setData(data, length);
}
ByteVector::ByteVector(const char *data)
{
d = new ByteVectorPrivate;
setData(data);
}
ByteVector::~ByteVector()
{
if(d->deref())
delete d;
}
ByteVector &ByteVector::setData(const char *data, uint length)
{
detach();
resize(length);
if(length > 0)
::memcpy(DATA(d), data, length);
return *this;
}
ByteVector &ByteVector::setData(const char *data)
{
return setData(data, ::strlen(data));
}
char *ByteVector::data()
{
detach();
return size() > 0 ? DATA(d) : 0;
}
const char *ByteVector::data() const
{
return size() > 0 ? DATA(d) : 0;
}
ByteVector ByteVector::mid(uint index, uint length) const
{
ByteVector v;
if(index > size())
return v;
ConstIterator endIt;
if(length < 0xffffffff && length + index < size())
endIt = d->data.begin() + index + length;
else
endIt = d->data.end();
v.d->data.insert(v.d->data.begin(), ConstIterator(d->data.begin() + index), endIt);
v.d->size = v.d->data.size();
return v;
}
char ByteVector::at(uint index) const
{
return index < size() ? d->data[index] : 0;
}
int ByteVector::find(const ByteVector &pattern, uint offset, int byteAlign) const
{
return vectorFind<ByteVector>(*this, pattern, offset, byteAlign);
}
int ByteVector::rfind(const ByteVector &pattern, uint offset, int byteAlign) const
{
// Ok, this is a little goofy, but pretty cool after it sinks in. Instead of
// reversing the find method's Boyer-Moore search algorithm I created a "mirror"
// for a ByteVector to reverse the behavior of the accessors.
ByteVectorMirror v(*this);
ByteVectorMirror p(pattern);
return v.find(p, offset, byteAlign);
}
bool ByteVector::containsAt(const ByteVector &pattern, uint offset, uint patternOffset, uint patternLength) const
{
if(pattern.size() < patternLength)
patternLength = pattern.size();
// do some sanity checking -- all of these things are needed for the search to be valid
if(patternLength > size() || offset >= size() || patternOffset >= pattern.size() || patternLength == 0)
return false;
// loop through looking for a mismatch
for(uint i = 0; i < patternLength - patternOffset; i++) {
if(at(i + offset) != pattern[i + patternOffset])
return false;
}
return true;
}
bool ByteVector::startsWith(const ByteVector &pattern) const
{
return containsAt(pattern, 0);
}
bool ByteVector::endsWith(const ByteVector &pattern) const
{
return containsAt(pattern, size() - pattern.size());
}
ByteVector &ByteVector::replace(const ByteVector &pattern, const ByteVector &with)
{
if(pattern.size() == 0 || pattern.size() > size())
return *this;
const uint withSize = with.size();
const uint patternSize = pattern.size();
int offset = 0;
if(withSize == patternSize) {
// I think this case might be common enough to optimize it
detach();
offset = find(pattern);
while(offset >= 0) {
::memcpy(data() + offset, with.data(), withSize);
offset = find(pattern, offset + withSize);
}
return *this;
}
// calculate new size:
uint newSize = 0;
for(;;) {
int next = find(pattern, offset);
if(next < 0) {
if(offset == 0)
// pattern not found, do nothing:
return *this;
newSize += size() - offset;
break;
}
newSize += (next - offset) + withSize;
offset = next + patternSize;
}
// new private data of appropriate size:
ByteVectorPrivate *newData = new ByteVectorPrivate(newSize, 0);
char *target = DATA(newData);
const char *source = data();
// copy modified data into new private data:
offset = 0;
for(;;) {
int next = find(pattern, offset);
if(next < 0) {
::memcpy(target, source + offset, size() - offset);
break;
}
int chunkSize = next - offset;
::memcpy(target, source + offset, chunkSize);
target += chunkSize;
::memcpy(target, with.data(), withSize);
target += withSize;
offset += chunkSize + patternSize;
}
// replace private data:
if(d->deref())
delete d;
d = newData;
return *this;
}
int ByteVector::endsWithPartialMatch(const ByteVector &pattern) const
{
if(pattern.size() > size())
return -1;
const int startIndex = size() - pattern.size();
// try to match the last n-1 bytes from the vector (where n is the pattern
// size) -- continue trying to match n-2, n-3...1 bytes
for(uint i = 1; i < pattern.size(); i++) {
if(containsAt(pattern, startIndex + i, 0, pattern.size() - i))
return startIndex + i;
}
return -1;
}
ByteVector &ByteVector::append(const ByteVector &v)
{
if(v.d->size == 0)
return *this; // Simply return if appending nothing.
detach();
uint originalSize = d->size;
resize(d->size + v.d->size);
::memcpy(DATA(d) + originalSize, DATA(v.d), v.size());
return *this;
}
ByteVector &ByteVector::clear()
{
detach();
d->data.clear();
d->size = 0;
return *this;
}
TagLib::uint ByteVector::size() const
{
return d->size;
}
ByteVector &ByteVector::resize(uint size, char padding)
{
if(d->size < size) {
d->data.reserve(size);
d->data.insert(d->data.end(), size - d->size, padding);
}
else
d->data.erase(d->data.begin() + size, d->data.end());
d->size = size;
return *this;
}
ByteVector::Iterator ByteVector::begin()
{
return d->data.begin();
}
ByteVector::ConstIterator ByteVector::begin() const
{
return d->data.begin();
}
ByteVector::Iterator ByteVector::end()
{
return d->data.end();
}
ByteVector::ConstIterator ByteVector::end() const
{
return d->data.end();
}
bool ByteVector::isNull() const
{
return d == null.d;
}
bool ByteVector::isEmpty() const
{
return d->data.size() == 0;
}
TagLib::uint ByteVector::checksum() const
{
uint sum = 0;
for(ByteVector::ConstIterator it = begin(); it != end(); ++it)
sum = (sum << 8) ^ crcTable[((sum >> 24) & 0xff) ^ uchar(*it)];
return sum;
}
TagLib::uint ByteVector::toUInt(bool mostSignificantByteFirst) const
{
return toNumber<uint>(d->data, mostSignificantByteFirst);
}
short ByteVector::toShort(bool mostSignificantByteFirst) const
{
return toNumber<unsigned short>(d->data, mostSignificantByteFirst);
}
unsigned short ByteVector::toUShort(bool mostSignificantByteFirst) const
{
return toNumber<unsigned short>(d->data, mostSignificantByteFirst);
}
long long ByteVector::toLongLong(bool mostSignificantByteFirst) const
{
return toNumber<unsigned long long>(d->data, mostSignificantByteFirst);
}
const char &ByteVector::operator[](int index) const
{
return d->data[index];
}
char &ByteVector::operator[](int index)
{
detach();
return d->data[index];
}
bool ByteVector::operator==(const ByteVector &v) const
{
if(d->size != v.d->size)
return false;
return ::memcmp(data(), v.data(), size()) == 0;
}
bool ByteVector::operator!=(const ByteVector &v) const
{
return !operator==(v);
}
bool ByteVector::operator==(const char *s) const
{
if(d->size != ::strlen(s))
return false;
return ::memcmp(data(), s, d->size) == 0;
}
bool ByteVector::operator!=(const char *s) const
{
return !operator==(s);
}
bool ByteVector::operator<(const ByteVector &v) const
{
int result = ::memcmp(data(), v.data(), d->size < v.d->size ? d->size : v.d->size);
if(result != 0)
return result < 0;
else
return size() < v.size();
}
bool ByteVector::operator>(const ByteVector &v) const
{
return v < *this;
}
ByteVector ByteVector::operator+(const ByteVector &v) const
{
ByteVector sum(*this);
sum.append(v);
return sum;
}
ByteVector &ByteVector::operator=(const ByteVector &v)
{
if(&v == this)
return *this;
if(d->deref())
delete d;
d = v.d;
d->ref();
return *this;
}
ByteVector &ByteVector::operator=(char c)
{
*this = ByteVector(c);
return *this;
}
ByteVector &ByteVector::operator=(const char *data)
{
*this = ByteVector(data);
return *this;
}
ByteVector ByteVector::toHex() const
{
ByteVector encoded(size() * 2);
uint j = 0;
for(uint i = 0; i < size(); i++) {
unsigned char c = d->data[i];
encoded[j++] = hexTable[(c >> 4) & 0x0F];
encoded[j++] = hexTable[(c ) & 0x0F];
}
return encoded;
}
////////////////////////////////////////////////////////////////////////////////
// protected members
////////////////////////////////////////////////////////////////////////////////
void ByteVector::detach()
{
if(d->count() > 1) {
d->deref();
d = new ByteVectorPrivate(d->data);
}
}
////////////////////////////////////////////////////////////////////////////////
// related functions
////////////////////////////////////////////////////////////////////////////////
std::ostream &operator<<(std::ostream &s, const ByteVector &v)
{
for(TagLib::uint i = 0; i < v.size(); i++)
s << v[i];
return s;
}
| ./CrossVul/dataset_final_sorted/CWE-189/cpp/bad_3618_0 |
crossvul-cpp_data_good_2218_0 | /*
* Routines for driver control interface
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/threads.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/time.h>
#include <sound/core.h>
#include <sound/minors.h>
#include <sound/info.h>
#include <sound/control.h>
/* max number of user-defined controls */
#define MAX_USER_CONTROLS 32
#define MAX_CONTROL_COUNT 1028
struct snd_kctl_ioctl {
struct list_head list; /* list of all ioctls */
snd_kctl_ioctl_func_t fioctl;
};
static DECLARE_RWSEM(snd_ioctl_rwsem);
static LIST_HEAD(snd_control_ioctls);
#ifdef CONFIG_COMPAT
static LIST_HEAD(snd_control_compat_ioctls);
#endif
static int snd_ctl_open(struct inode *inode, struct file *file)
{
unsigned long flags;
struct snd_card *card;
struct snd_ctl_file *ctl;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
card = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_CONTROL);
if (!card) {
err = -ENODEV;
goto __error1;
}
err = snd_card_file_add(card, file);
if (err < 0) {
err = -ENODEV;
goto __error1;
}
if (!try_module_get(card->module)) {
err = -EFAULT;
goto __error2;
}
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (ctl == NULL) {
err = -ENOMEM;
goto __error;
}
INIT_LIST_HEAD(&ctl->events);
init_waitqueue_head(&ctl->change_sleep);
spin_lock_init(&ctl->read_lock);
ctl->card = card;
ctl->prefer_pcm_subdevice = -1;
ctl->prefer_rawmidi_subdevice = -1;
ctl->pid = get_pid(task_pid(current));
file->private_data = ctl;
write_lock_irqsave(&card->ctl_files_rwlock, flags);
list_add_tail(&ctl->list, &card->ctl_files);
write_unlock_irqrestore(&card->ctl_files_rwlock, flags);
snd_card_unref(card);
return 0;
__error:
module_put(card->module);
__error2:
snd_card_file_remove(card, file);
__error1:
if (card)
snd_card_unref(card);
return err;
}
static void snd_ctl_empty_read_queue(struct snd_ctl_file * ctl)
{
unsigned long flags;
struct snd_kctl_event *cread;
spin_lock_irqsave(&ctl->read_lock, flags);
while (!list_empty(&ctl->events)) {
cread = snd_kctl_event(ctl->events.next);
list_del(&cread->list);
kfree(cread);
}
spin_unlock_irqrestore(&ctl->read_lock, flags);
}
static int snd_ctl_release(struct inode *inode, struct file *file)
{
unsigned long flags;
struct snd_card *card;
struct snd_ctl_file *ctl;
struct snd_kcontrol *control;
unsigned int idx;
ctl = file->private_data;
file->private_data = NULL;
card = ctl->card;
write_lock_irqsave(&card->ctl_files_rwlock, flags);
list_del(&ctl->list);
write_unlock_irqrestore(&card->ctl_files_rwlock, flags);
down_write(&card->controls_rwsem);
list_for_each_entry(control, &card->controls, list)
for (idx = 0; idx < control->count; idx++)
if (control->vd[idx].owner == ctl)
control->vd[idx].owner = NULL;
up_write(&card->controls_rwsem);
snd_ctl_empty_read_queue(ctl);
put_pid(ctl->pid);
kfree(ctl);
module_put(card->module);
snd_card_file_remove(card, file);
return 0;
}
void snd_ctl_notify(struct snd_card *card, unsigned int mask,
struct snd_ctl_elem_id *id)
{
unsigned long flags;
struct snd_ctl_file *ctl;
struct snd_kctl_event *ev;
if (snd_BUG_ON(!card || !id))
return;
read_lock(&card->ctl_files_rwlock);
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
card->mixer_oss_change_count++;
#endif
list_for_each_entry(ctl, &card->ctl_files, list) {
if (!ctl->subscribed)
continue;
spin_lock_irqsave(&ctl->read_lock, flags);
list_for_each_entry(ev, &ctl->events, list) {
if (ev->id.numid == id->numid) {
ev->mask |= mask;
goto _found;
}
}
ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
if (ev) {
ev->id = *id;
ev->mask = mask;
list_add_tail(&ev->list, &ctl->events);
} else {
dev_err(card->dev, "No memory available to allocate event\n");
}
_found:
wake_up(&ctl->change_sleep);
spin_unlock_irqrestore(&ctl->read_lock, flags);
kill_fasync(&ctl->fasync, SIGIO, POLL_IN);
}
read_unlock(&card->ctl_files_rwlock);
}
EXPORT_SYMBOL(snd_ctl_notify);
/**
* snd_ctl_new - create a control instance from the template
* @control: the control template
* @access: the default control access
*
* Allocates a new struct snd_kcontrol instance and copies the given template
* to the new instance. It does not copy volatile data (access).
*
* Return: The pointer of the new instance, or %NULL on failure.
*/
static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control,
unsigned int access)
{
struct snd_kcontrol *kctl;
unsigned int idx;
if (snd_BUG_ON(!control || !control->count))
return NULL;
if (control->count > MAX_CONTROL_COUNT)
return NULL;
kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);
if (kctl == NULL) {
pr_err("ALSA: Cannot allocate control instance\n");
return NULL;
}
*kctl = *control;
for (idx = 0; idx < kctl->count; idx++)
kctl->vd[idx].access = access;
return kctl;
}
/**
* snd_ctl_new1 - create a control instance from the template
* @ncontrol: the initialization record
* @private_data: the private data to set
*
* Allocates a new struct snd_kcontrol instance and initialize from the given
* template. When the access field of ncontrol is 0, it's assumed as
* READWRITE access. When the count field is 0, it's assumes as one.
*
* Return: The pointer of the newly generated instance, or %NULL on failure.
*/
struct snd_kcontrol *snd_ctl_new1(const struct snd_kcontrol_new *ncontrol,
void *private_data)
{
struct snd_kcontrol kctl;
unsigned int access;
if (snd_BUG_ON(!ncontrol || !ncontrol->info))
return NULL;
memset(&kctl, 0, sizeof(kctl));
kctl.id.iface = ncontrol->iface;
kctl.id.device = ncontrol->device;
kctl.id.subdevice = ncontrol->subdevice;
if (ncontrol->name) {
strlcpy(kctl.id.name, ncontrol->name, sizeof(kctl.id.name));
if (strcmp(ncontrol->name, kctl.id.name) != 0)
pr_warn("ALSA: Control name '%s' truncated to '%s'\n",
ncontrol->name, kctl.id.name);
}
kctl.id.index = ncontrol->index;
kctl.count = ncontrol->count ? ncontrol->count : 1;
access = ncontrol->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
(ncontrol->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
SNDRV_CTL_ELEM_ACCESS_VOLATILE|
SNDRV_CTL_ELEM_ACCESS_INACTIVE|
SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE|
SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND|
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK));
kctl.info = ncontrol->info;
kctl.get = ncontrol->get;
kctl.put = ncontrol->put;
kctl.tlv.p = ncontrol->tlv.p;
kctl.private_value = ncontrol->private_value;
kctl.private_data = private_data;
return snd_ctl_new(&kctl, access);
}
EXPORT_SYMBOL(snd_ctl_new1);
/**
* snd_ctl_free_one - release the control instance
* @kcontrol: the control instance
*
* Releases the control instance created via snd_ctl_new()
* or snd_ctl_new1().
* Don't call this after the control was added to the card.
*/
void snd_ctl_free_one(struct snd_kcontrol *kcontrol)
{
if (kcontrol) {
if (kcontrol->private_free)
kcontrol->private_free(kcontrol);
kfree(kcontrol);
}
}
EXPORT_SYMBOL(snd_ctl_free_one);
static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
unsigned int count)
{
struct snd_kcontrol *kctl;
/* Make sure that the ids assigned to the control do not wrap around */
if (card->last_numid >= UINT_MAX - count)
card->last_numid = 0;
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.numid < card->last_numid + 1 + count &&
kctl->id.numid + kctl->count > card->last_numid + 1) {
card->last_numid = kctl->id.numid + kctl->count - 1;
return true;
}
}
return false;
}
static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
{
unsigned int iter = 100000;
while (snd_ctl_remove_numid_conflict(card, count)) {
if (--iter == 0) {
/* this situation is very unlikely */
dev_err(card->dev, "unable to allocate new control numid\n");
return -ENOMEM;
}
}
return 0;
}
/**
* snd_ctl_add - add the control instance to the card
* @card: the card instance
* @kcontrol: the control instance to add
*
* Adds the control instance created via snd_ctl_new() or
* snd_ctl_new1() to the given card. Assigns also an unique
* numid used for fast search.
*
* It frees automatically the control which cannot be added.
*
* Return: Zero if successful, or a negative error code on failure.
*
*/
int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
{
struct snd_ctl_elem_id id;
unsigned int idx;
unsigned int count;
int err = -EINVAL;
if (! kcontrol)
return err;
if (snd_BUG_ON(!card || !kcontrol->info))
goto error;
id = kcontrol->id;
if (id.index > UINT_MAX - kcontrol->count)
goto error;
down_write(&card->controls_rwsem);
if (snd_ctl_find_id(card, &id)) {
up_write(&card->controls_rwsem);
dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n",
id.iface,
id.device,
id.subdevice,
id.name,
id.index);
err = -EBUSY;
goto error;
}
if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
up_write(&card->controls_rwsem);
err = -ENOMEM;
goto error;
}
list_add_tail(&kcontrol->list, &card->controls);
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
count = kcontrol->count;
up_write(&card->controls_rwsem);
for (idx = 0; idx < count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
return 0;
error:
snd_ctl_free_one(kcontrol);
return err;
}
EXPORT_SYMBOL(snd_ctl_add);
/**
* snd_ctl_replace - replace the control instance of the card
* @card: the card instance
* @kcontrol: the control instance to replace
* @add_on_replace: add the control if not already added
*
* Replaces the given control. If the given control does not exist
* and the add_on_replace flag is set, the control is added. If the
* control exists, it is destroyed first.
*
* It frees automatically the control which cannot be added or replaced.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
bool add_on_replace)
{
struct snd_ctl_elem_id id;
unsigned int count;
unsigned int idx;
struct snd_kcontrol *old;
int ret;
if (!kcontrol)
return -EINVAL;
if (snd_BUG_ON(!card || !kcontrol->info)) {
ret = -EINVAL;
goto error;
}
id = kcontrol->id;
down_write(&card->controls_rwsem);
old = snd_ctl_find_id(card, &id);
if (!old) {
if (add_on_replace)
goto add;
up_write(&card->controls_rwsem);
ret = -EINVAL;
goto error;
}
ret = snd_ctl_remove(card, old);
if (ret < 0) {
up_write(&card->controls_rwsem);
goto error;
}
add:
if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
up_write(&card->controls_rwsem);
ret = -ENOMEM;
goto error;
}
list_add_tail(&kcontrol->list, &card->controls);
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
count = kcontrol->count;
up_write(&card->controls_rwsem);
for (idx = 0; idx < count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
return 0;
error:
snd_ctl_free_one(kcontrol);
return ret;
}
EXPORT_SYMBOL(snd_ctl_replace);
/**
* snd_ctl_remove - remove the control from the card and release it
* @card: the card instance
* @kcontrol: the control instance to remove
*
* Removes the control from the card and then releases the instance.
* You don't need to call snd_ctl_free_one(). You must be in
* the write lock - down_write(&card->controls_rwsem).
*
* Return: 0 if successful, or a negative error code on failure.
*/
int snd_ctl_remove(struct snd_card *card, struct snd_kcontrol *kcontrol)
{
struct snd_ctl_elem_id id;
unsigned int idx;
if (snd_BUG_ON(!card || !kcontrol))
return -EINVAL;
list_del(&kcontrol->list);
card->controls_count -= kcontrol->count;
id = kcontrol->id;
for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_REMOVE, &id);
snd_ctl_free_one(kcontrol);
return 0;
}
EXPORT_SYMBOL(snd_ctl_remove);
/**
* snd_ctl_remove_id - remove the control of the given id and release it
* @card: the card instance
* @id: the control id to remove
*
* Finds the control instance with the given id, removes it from the
* card list and releases it.
*
* Return: 0 if successful, or a negative error code on failure.
*/
int snd_ctl_remove_id(struct snd_card *card, struct snd_ctl_elem_id *id)
{
struct snd_kcontrol *kctl;
int ret;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, id);
if (kctl == NULL) {
up_write(&card->controls_rwsem);
return -ENOENT;
}
ret = snd_ctl_remove(card, kctl);
up_write(&card->controls_rwsem);
return ret;
}
EXPORT_SYMBOL(snd_ctl_remove_id);
/**
* snd_ctl_remove_user_ctl - remove and release the unlocked user control
* @file: active control handle
* @id: the control id to remove
*
* Finds the control instance with the given id, removes it from the
* card list and releases it.
*
* Return: 0 if successful, or a negative error code on failure.
*/
static int snd_ctl_remove_user_ctl(struct snd_ctl_file * file,
struct snd_ctl_elem_id *id)
{
struct snd_card *card = file->card;
struct snd_kcontrol *kctl;
int idx, ret;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, id);
if (kctl == NULL) {
ret = -ENOENT;
goto error;
}
if (!(kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_USER)) {
ret = -EINVAL;
goto error;
}
for (idx = 0; idx < kctl->count; idx++)
if (kctl->vd[idx].owner != NULL && kctl->vd[idx].owner != file) {
ret = -EBUSY;
goto error;
}
ret = snd_ctl_remove(card, kctl);
if (ret < 0)
goto error;
card->user_ctl_count--;
error:
up_write(&card->controls_rwsem);
return ret;
}
/**
* snd_ctl_activate_id - activate/inactivate the control of the given id
* @card: the card instance
* @id: the control id to activate/inactivate
* @active: non-zero to activate
*
* Finds the control instance with the given id, and activate or
* inactivate the control together with notification, if changed.
*
* Return: 0 if unchanged, 1 if changed, or a negative error code on failure.
*/
int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id,
int active)
{
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int ret;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, id);
if (kctl == NULL) {
ret = -ENOENT;
goto unlock;
}
index_offset = snd_ctl_get_ioff(kctl, &kctl->id);
vd = &kctl->vd[index_offset];
ret = 0;
if (active) {
if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE))
goto unlock;
vd->access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
} else {
if (vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE)
goto unlock;
vd->access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
}
ret = 1;
unlock:
up_write(&card->controls_rwsem);
if (ret > 0)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO, id);
return ret;
}
EXPORT_SYMBOL_GPL(snd_ctl_activate_id);
/**
* snd_ctl_rename_id - replace the id of a control on the card
* @card: the card instance
* @src_id: the old id
* @dst_id: the new id
*
* Finds the control with the old id from the card, and replaces the
* id with the new one.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_ctl_rename_id(struct snd_card *card, struct snd_ctl_elem_id *src_id,
struct snd_ctl_elem_id *dst_id)
{
struct snd_kcontrol *kctl;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, src_id);
if (kctl == NULL) {
up_write(&card->controls_rwsem);
return -ENOENT;
}
kctl->id = *dst_id;
kctl->id.numid = card->last_numid + 1;
card->last_numid += kctl->count;
up_write(&card->controls_rwsem);
return 0;
}
EXPORT_SYMBOL(snd_ctl_rename_id);
/**
* snd_ctl_find_numid - find the control instance with the given number-id
* @card: the card instance
* @numid: the number-id to search
*
* Finds the control instance with the given number-id from the card.
*
* The caller must down card->controls_rwsem before calling this function
* (if the race condition can happen).
*
* Return: The pointer of the instance if found, or %NULL if not.
*
*/
struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numid)
{
struct snd_kcontrol *kctl;
if (snd_BUG_ON(!card || !numid))
return NULL;
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.numid <= numid && kctl->id.numid + kctl->count > numid)
return kctl;
}
return NULL;
}
EXPORT_SYMBOL(snd_ctl_find_numid);
/**
* snd_ctl_find_id - find the control instance with the given id
* @card: the card instance
* @id: the id to search
*
* Finds the control instance with the given id from the card.
*
* The caller must down card->controls_rwsem before calling this function
* (if the race condition can happen).
*
* Return: The pointer of the instance if found, or %NULL if not.
*
*/
struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card,
struct snd_ctl_elem_id *id)
{
struct snd_kcontrol *kctl;
if (snd_BUG_ON(!card || !id))
return NULL;
if (id->numid != 0)
return snd_ctl_find_numid(card, id->numid);
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.iface != id->iface)
continue;
if (kctl->id.device != id->device)
continue;
if (kctl->id.subdevice != id->subdevice)
continue;
if (strncmp(kctl->id.name, id->name, sizeof(kctl->id.name)))
continue;
if (kctl->id.index > id->index)
continue;
if (kctl->id.index + kctl->count <= id->index)
continue;
return kctl;
}
return NULL;
}
EXPORT_SYMBOL(snd_ctl_find_id);
static int snd_ctl_card_info(struct snd_card *card, struct snd_ctl_file * ctl,
unsigned int cmd, void __user *arg)
{
struct snd_ctl_card_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
down_read(&snd_ioctl_rwsem);
info->card = card->number;
strlcpy(info->id, card->id, sizeof(info->id));
strlcpy(info->driver, card->driver, sizeof(info->driver));
strlcpy(info->name, card->shortname, sizeof(info->name));
strlcpy(info->longname, card->longname, sizeof(info->longname));
strlcpy(info->mixername, card->mixername, sizeof(info->mixername));
strlcpy(info->components, card->components, sizeof(info->components));
up_read(&snd_ioctl_rwsem);
if (copy_to_user(arg, info, sizeof(struct snd_ctl_card_info))) {
kfree(info);
return -EFAULT;
}
kfree(info);
return 0;
}
static int snd_ctl_elem_list(struct snd_card *card,
struct snd_ctl_elem_list __user *_list)
{
struct list_head *plist;
struct snd_ctl_elem_list list;
struct snd_kcontrol *kctl;
struct snd_ctl_elem_id *dst, *id;
unsigned int offset, space, jidx;
if (copy_from_user(&list, _list, sizeof(list)))
return -EFAULT;
offset = list.offset;
space = list.space;
/* try limit maximum space */
if (space > 16384)
return -ENOMEM;
if (space > 0) {
/* allocate temporary buffer for atomic operation */
dst = vmalloc(space * sizeof(struct snd_ctl_elem_id));
if (dst == NULL)
return -ENOMEM;
down_read(&card->controls_rwsem);
list.count = card->controls_count;
plist = card->controls.next;
while (plist != &card->controls) {
if (offset == 0)
break;
kctl = snd_kcontrol(plist);
if (offset < kctl->count)
break;
offset -= kctl->count;
plist = plist->next;
}
list.used = 0;
id = dst;
while (space > 0 && plist != &card->controls) {
kctl = snd_kcontrol(plist);
for (jidx = offset; space > 0 && jidx < kctl->count; jidx++) {
snd_ctl_build_ioff(id, kctl, jidx);
id++;
space--;
list.used++;
}
plist = plist->next;
offset = 0;
}
up_read(&card->controls_rwsem);
if (list.used > 0 &&
copy_to_user(list.pids, dst,
list.used * sizeof(struct snd_ctl_elem_id))) {
vfree(dst);
return -EFAULT;
}
vfree(dst);
} else {
down_read(&card->controls_rwsem);
list.count = card->controls_count;
up_read(&card->controls_rwsem);
}
if (copy_to_user(_list, &list, sizeof(list)))
return -EFAULT;
return 0;
}
static int snd_ctl_elem_info(struct snd_ctl_file *ctl,
struct snd_ctl_elem_info *info)
{
struct snd_card *card = ctl->card;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int result;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &info->id);
if (kctl == NULL) {
up_read(&card->controls_rwsem);
return -ENOENT;
}
#ifdef CONFIG_SND_DEBUG
info->access = 0;
#endif
result = kctl->info(kctl, info);
if (result >= 0) {
snd_BUG_ON(info->access);
index_offset = snd_ctl_get_ioff(kctl, &info->id);
vd = &kctl->vd[index_offset];
snd_ctl_build_ioff(&info->id, kctl, index_offset);
info->access = vd->access;
if (vd->owner) {
info->access |= SNDRV_CTL_ELEM_ACCESS_LOCK;
if (vd->owner == ctl)
info->access |= SNDRV_CTL_ELEM_ACCESS_OWNER;
info->owner = pid_vnr(vd->owner->pid);
} else {
info->owner = -1;
}
}
up_read(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_info_user(struct snd_ctl_file *ctl,
struct snd_ctl_elem_info __user *_info)
{
struct snd_ctl_elem_info info;
int result;
if (copy_from_user(&info, _info, sizeof(info)))
return -EFAULT;
snd_power_lock(ctl->card);
result = snd_power_wait(ctl->card, SNDRV_CTL_POWER_D0);
if (result >= 0)
result = snd_ctl_elem_info(ctl, &info);
snd_power_unlock(ctl->card);
if (result >= 0)
if (copy_to_user(_info, &info, sizeof(info)))
return -EFAULT;
return result;
}
static int snd_ctl_elem_read(struct snd_card *card,
struct snd_ctl_elem_value *control)
{
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int result;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &control->id);
if (kctl == NULL) {
result = -ENOENT;
} else {
index_offset = snd_ctl_get_ioff(kctl, &control->id);
vd = &kctl->vd[index_offset];
if ((vd->access & SNDRV_CTL_ELEM_ACCESS_READ) &&
kctl->get != NULL) {
snd_ctl_build_ioff(&control->id, kctl, index_offset);
result = kctl->get(kctl, control);
} else
result = -EPERM;
}
up_read(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_read_user(struct snd_card *card,
struct snd_ctl_elem_value __user *_control)
{
struct snd_ctl_elem_value *control;
int result;
control = memdup_user(_control, sizeof(*control));
if (IS_ERR(control))
return PTR_ERR(control);
snd_power_lock(card);
result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
if (result >= 0)
result = snd_ctl_elem_read(card, control);
snd_power_unlock(card);
if (result >= 0)
if (copy_to_user(_control, control, sizeof(*control)))
result = -EFAULT;
kfree(control);
return result;
}
static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
struct snd_ctl_elem_value *control)
{
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int result;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &control->id);
if (kctl == NULL) {
result = -ENOENT;
} else {
index_offset = snd_ctl_get_ioff(kctl, &control->id);
vd = &kctl->vd[index_offset];
if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_WRITE) ||
kctl->put == NULL ||
(file && vd->owner && vd->owner != file)) {
result = -EPERM;
} else {
snd_ctl_build_ioff(&control->id, kctl, index_offset);
result = kctl->put(kctl, control);
}
if (result > 0) {
struct snd_ctl_elem_id id = control->id;
up_read(&card->controls_rwsem);
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
return 0;
}
}
up_read(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_write_user(struct snd_ctl_file *file,
struct snd_ctl_elem_value __user *_control)
{
struct snd_ctl_elem_value *control;
struct snd_card *card;
int result;
control = memdup_user(_control, sizeof(*control));
if (IS_ERR(control))
return PTR_ERR(control);
card = file->card;
snd_power_lock(card);
result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
if (result >= 0)
result = snd_ctl_elem_write(card, file, control);
snd_power_unlock(card);
if (result >= 0)
if (copy_to_user(_control, control, sizeof(*control)))
result = -EFAULT;
kfree(control);
return result;
}
static int snd_ctl_elem_lock(struct snd_ctl_file *file,
struct snd_ctl_elem_id __user *_id)
{
struct snd_card *card = file->card;
struct snd_ctl_elem_id id;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
int result;
if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &id);
if (kctl == NULL) {
result = -ENOENT;
} else {
vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)];
if (vd->owner != NULL)
result = -EBUSY;
else {
vd->owner = file;
result = 0;
}
}
up_write(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
struct snd_ctl_elem_id __user *_id)
{
struct snd_card *card = file->card;
struct snd_ctl_elem_id id;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
int result;
if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &id);
if (kctl == NULL) {
result = -ENOENT;
} else {
vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)];
if (vd->owner == NULL)
result = -EINVAL;
else if (vd->owner != file)
result = -EPERM;
else {
vd->owner = NULL;
result = 0;
}
}
up_write(&card->controls_rwsem);
return result;
}
struct user_element {
struct snd_ctl_elem_info info;
struct snd_card *card;
void *elem_data; /* element data */
unsigned long elem_data_size; /* size of element data in bytes */
void *tlv_data; /* TLV data */
unsigned long tlv_data_size; /* TLV data size */
void *priv_data; /* private data (like strings for enumerated type) */
};
static int snd_ctl_elem_user_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct user_element *ue = kcontrol->private_data;
*uinfo = ue->info;
return 0;
}
static int snd_ctl_elem_user_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct user_element *ue = kcontrol->private_data;
const char *names;
unsigned int item;
item = uinfo->value.enumerated.item;
*uinfo = ue->info;
item = min(item, uinfo->value.enumerated.items - 1);
uinfo->value.enumerated.item = item;
names = ue->priv_data;
for (; item > 0; --item)
names += strlen(names) + 1;
strcpy(uinfo->value.enumerated.name, names);
return 0;
}
static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct user_element *ue = kcontrol->private_data;
mutex_lock(&ue->card->user_ctl_lock);
memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
mutex_unlock(&ue->card->user_ctl_lock);
return 0;
}
static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int change;
struct user_element *ue = kcontrol->private_data;
mutex_lock(&ue->card->user_ctl_lock);
change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
if (change)
memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
mutex_unlock(&ue->card->user_ctl_lock);
return change;
}
static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
int op_flag,
unsigned int size,
unsigned int __user *tlv)
{
struct user_element *ue = kcontrol->private_data;
int change = 0;
void *new_data;
if (op_flag > 0) {
if (size > 1024 * 128) /* sane value */
return -EINVAL;
new_data = memdup_user(tlv, size);
if (IS_ERR(new_data))
return PTR_ERR(new_data);
mutex_lock(&ue->card->user_ctl_lock);
change = ue->tlv_data_size != size;
if (!change)
change = memcmp(ue->tlv_data, new_data, size);
kfree(ue->tlv_data);
ue->tlv_data = new_data;
ue->tlv_data_size = size;
mutex_unlock(&ue->card->user_ctl_lock);
} else {
int ret = 0;
mutex_lock(&ue->card->user_ctl_lock);
if (!ue->tlv_data_size || !ue->tlv_data) {
ret = -ENXIO;
goto err_unlock;
}
if (size < ue->tlv_data_size) {
ret = -ENOSPC;
goto err_unlock;
}
if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
ret = -EFAULT;
err_unlock:
mutex_unlock(&ue->card->user_ctl_lock);
if (ret)
return ret;
}
return change;
}
static int snd_ctl_elem_init_enum_names(struct user_element *ue)
{
char *names, *p;
size_t buf_len, name_len;
unsigned int i;
const uintptr_t user_ptrval = ue->info.value.enumerated.names_ptr;
if (ue->info.value.enumerated.names_length > 64 * 1024)
return -EINVAL;
names = memdup_user((const void __user *)user_ptrval,
ue->info.value.enumerated.names_length);
if (IS_ERR(names))
return PTR_ERR(names);
/* check that there are enough valid names */
buf_len = ue->info.value.enumerated.names_length;
p = names;
for (i = 0; i < ue->info.value.enumerated.items; ++i) {
name_len = strnlen(p, buf_len);
if (name_len == 0 || name_len >= 64 || name_len == buf_len) {
kfree(names);
return -EINVAL;
}
p += name_len + 1;
buf_len -= name_len + 1;
}
ue->priv_data = names;
ue->info.value.enumerated.names_ptr = 0;
return 0;
}
static void snd_ctl_elem_user_free(struct snd_kcontrol *kcontrol)
{
struct user_element *ue = kcontrol->private_data;
kfree(ue->tlv_data);
kfree(ue->priv_data);
kfree(ue);
}
static int snd_ctl_elem_add(struct snd_ctl_file *file,
struct snd_ctl_elem_info *info, int replace)
{
struct snd_card *card = file->card;
struct snd_kcontrol kctl, *_kctl;
unsigned int access;
long private_size;
struct user_element *ue;
int idx, err;
if (info->count < 1)
return -EINVAL;
access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
(info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
SNDRV_CTL_ELEM_ACCESS_INACTIVE|
SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
info->id.numid = 0;
memset(&kctl, 0, sizeof(kctl));
if (replace) {
err = snd_ctl_remove_user_ctl(file, &info->id);
if (err)
return err;
}
if (card->user_ctl_count >= MAX_USER_CONTROLS)
return -ENOMEM;
memcpy(&kctl.id, &info->id, sizeof(info->id));
kctl.count = info->owner ? info->owner : 1;
access |= SNDRV_CTL_ELEM_ACCESS_USER;
if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED)
kctl.info = snd_ctl_elem_user_enum_info;
else
kctl.info = snd_ctl_elem_user_info;
if (access & SNDRV_CTL_ELEM_ACCESS_READ)
kctl.get = snd_ctl_elem_user_get;
if (access & SNDRV_CTL_ELEM_ACCESS_WRITE)
kctl.put = snd_ctl_elem_user_put;
if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) {
kctl.tlv.c = snd_ctl_elem_user_tlv;
access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
}
switch (info->type) {
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
case SNDRV_CTL_ELEM_TYPE_INTEGER:
private_size = sizeof(long);
if (info->count > 128)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_INTEGER64:
private_size = sizeof(long long);
if (info->count > 64)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
private_size = sizeof(unsigned int);
if (info->count > 128 || info->value.enumerated.items == 0)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_BYTES:
private_size = sizeof(unsigned char);
if (info->count > 512)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_IEC958:
private_size = sizeof(struct snd_aes_iec958);
if (info->count != 1)
return -EINVAL;
break;
default:
return -EINVAL;
}
private_size *= info->count;
ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
if (ue == NULL)
return -ENOMEM;
ue->card = card;
ue->info = *info;
ue->info.access = 0;
ue->elem_data = (char *)ue + sizeof(*ue);
ue->elem_data_size = private_size;
if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) {
err = snd_ctl_elem_init_enum_names(ue);
if (err < 0) {
kfree(ue);
return err;
}
}
kctl.private_free = snd_ctl_elem_user_free;
_kctl = snd_ctl_new(&kctl, access);
if (_kctl == NULL) {
kfree(ue->priv_data);
kfree(ue);
return -ENOMEM;
}
_kctl->private_data = ue;
for (idx = 0; idx < _kctl->count; idx++)
_kctl->vd[idx].owner = file;
err = snd_ctl_add(card, _kctl);
if (err < 0)
return err;
down_write(&card->controls_rwsem);
card->user_ctl_count++;
up_write(&card->controls_rwsem);
return 0;
}
static int snd_ctl_elem_add_user(struct snd_ctl_file *file,
struct snd_ctl_elem_info __user *_info, int replace)
{
struct snd_ctl_elem_info info;
if (copy_from_user(&info, _info, sizeof(info)))
return -EFAULT;
return snd_ctl_elem_add(file, &info, replace);
}
static int snd_ctl_elem_remove(struct snd_ctl_file *file,
struct snd_ctl_elem_id __user *_id)
{
struct snd_ctl_elem_id id;
if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
return snd_ctl_remove_user_ctl(file, &id);
}
static int snd_ctl_subscribe_events(struct snd_ctl_file *file, int __user *ptr)
{
int subscribe;
if (get_user(subscribe, ptr))
return -EFAULT;
if (subscribe < 0) {
subscribe = file->subscribed;
if (put_user(subscribe, ptr))
return -EFAULT;
return 0;
}
if (subscribe) {
file->subscribed = 1;
return 0;
} else if (file->subscribed) {
snd_ctl_empty_read_queue(file);
file->subscribed = 0;
}
return 0;
}
static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
struct snd_ctl_tlv __user *_tlv,
int op_flag)
{
struct snd_card *card = file->card;
struct snd_ctl_tlv tlv;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int len;
int err = 0;
if (copy_from_user(&tlv, _tlv, sizeof(tlv)))
return -EFAULT;
if (tlv.length < sizeof(unsigned int) * 2)
return -EINVAL;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_numid(card, tlv.numid);
if (kctl == NULL) {
err = -ENOENT;
goto __kctl_end;
}
if (kctl->tlv.p == NULL) {
err = -ENXIO;
goto __kctl_end;
}
vd = &kctl->vd[tlv.numid - kctl->id.numid];
if ((op_flag == 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) == 0) ||
(op_flag > 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) == 0) ||
(op_flag < 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND) == 0)) {
err = -ENXIO;
goto __kctl_end;
}
if (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
if (vd->owner != NULL && vd->owner != file) {
err = -EPERM;
goto __kctl_end;
}
err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
if (err > 0) {
struct snd_ctl_elem_id id = kctl->id;
up_read(&card->controls_rwsem);
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
return 0;
}
} else {
if (op_flag) {
err = -ENXIO;
goto __kctl_end;
}
len = kctl->tlv.p[1] + 2 * sizeof(unsigned int);
if (tlv.length < len) {
err = -ENOMEM;
goto __kctl_end;
}
if (copy_to_user(_tlv->tlv, kctl->tlv.p, len))
err = -EFAULT;
}
__kctl_end:
up_read(&card->controls_rwsem);
return err;
}
static long snd_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct snd_ctl_file *ctl;
struct snd_card *card;
struct snd_kctl_ioctl *p;
void __user *argp = (void __user *)arg;
int __user *ip = argp;
int err;
ctl = file->private_data;
card = ctl->card;
if (snd_BUG_ON(!card))
return -ENXIO;
switch (cmd) {
case SNDRV_CTL_IOCTL_PVERSION:
return put_user(SNDRV_CTL_VERSION, ip) ? -EFAULT : 0;
case SNDRV_CTL_IOCTL_CARD_INFO:
return snd_ctl_card_info(card, ctl, cmd, argp);
case SNDRV_CTL_IOCTL_ELEM_LIST:
return snd_ctl_elem_list(card, argp);
case SNDRV_CTL_IOCTL_ELEM_INFO:
return snd_ctl_elem_info_user(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_READ:
return snd_ctl_elem_read_user(card, argp);
case SNDRV_CTL_IOCTL_ELEM_WRITE:
return snd_ctl_elem_write_user(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_LOCK:
return snd_ctl_elem_lock(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_UNLOCK:
return snd_ctl_elem_unlock(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_ADD:
return snd_ctl_elem_add_user(ctl, argp, 0);
case SNDRV_CTL_IOCTL_ELEM_REPLACE:
return snd_ctl_elem_add_user(ctl, argp, 1);
case SNDRV_CTL_IOCTL_ELEM_REMOVE:
return snd_ctl_elem_remove(ctl, argp);
case SNDRV_CTL_IOCTL_SUBSCRIBE_EVENTS:
return snd_ctl_subscribe_events(ctl, ip);
case SNDRV_CTL_IOCTL_TLV_READ:
return snd_ctl_tlv_ioctl(ctl, argp, 0);
case SNDRV_CTL_IOCTL_TLV_WRITE:
return snd_ctl_tlv_ioctl(ctl, argp, 1);
case SNDRV_CTL_IOCTL_TLV_COMMAND:
return snd_ctl_tlv_ioctl(ctl, argp, -1);
case SNDRV_CTL_IOCTL_POWER:
return -ENOPROTOOPT;
case SNDRV_CTL_IOCTL_POWER_STATE:
#ifdef CONFIG_PM
return put_user(card->power_state, ip) ? -EFAULT : 0;
#else
return put_user(SNDRV_CTL_POWER_D0, ip) ? -EFAULT : 0;
#endif
}
down_read(&snd_ioctl_rwsem);
list_for_each_entry(p, &snd_control_ioctls, list) {
err = p->fioctl(card, ctl, cmd, arg);
if (err != -ENOIOCTLCMD) {
up_read(&snd_ioctl_rwsem);
return err;
}
}
up_read(&snd_ioctl_rwsem);
dev_dbg(card->dev, "unknown ioctl = 0x%x\n", cmd);
return -ENOTTY;
}
static ssize_t snd_ctl_read(struct file *file, char __user *buffer,
size_t count, loff_t * offset)
{
struct snd_ctl_file *ctl;
int err = 0;
ssize_t result = 0;
ctl = file->private_data;
if (snd_BUG_ON(!ctl || !ctl->card))
return -ENXIO;
if (!ctl->subscribed)
return -EBADFD;
if (count < sizeof(struct snd_ctl_event))
return -EINVAL;
spin_lock_irq(&ctl->read_lock);
while (count >= sizeof(struct snd_ctl_event)) {
struct snd_ctl_event ev;
struct snd_kctl_event *kev;
while (list_empty(&ctl->events)) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto __end_lock;
}
init_waitqueue_entry(&wait, current);
add_wait_queue(&ctl->change_sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&ctl->read_lock);
schedule();
remove_wait_queue(&ctl->change_sleep, &wait);
if (ctl->card->shutdown)
return -ENODEV;
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irq(&ctl->read_lock);
}
kev = snd_kctl_event(ctl->events.next);
ev.type = SNDRV_CTL_EVENT_ELEM;
ev.data.elem.mask = kev->mask;
ev.data.elem.id = kev->id;
list_del(&kev->list);
spin_unlock_irq(&ctl->read_lock);
kfree(kev);
if (copy_to_user(buffer, &ev, sizeof(struct snd_ctl_event))) {
err = -EFAULT;
goto __end;
}
spin_lock_irq(&ctl->read_lock);
buffer += sizeof(struct snd_ctl_event);
count -= sizeof(struct snd_ctl_event);
result += sizeof(struct snd_ctl_event);
}
__end_lock:
spin_unlock_irq(&ctl->read_lock);
__end:
return result > 0 ? result : err;
}
static unsigned int snd_ctl_poll(struct file *file, poll_table * wait)
{
unsigned int mask;
struct snd_ctl_file *ctl;
ctl = file->private_data;
if (!ctl->subscribed)
return 0;
poll_wait(file, &ctl->change_sleep, wait);
mask = 0;
if (!list_empty(&ctl->events))
mask |= POLLIN | POLLRDNORM;
return mask;
}
/*
* register the device-specific control-ioctls.
* called from each device manager like pcm.c, hwdep.c, etc.
*/
static int _snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn, struct list_head *lists)
{
struct snd_kctl_ioctl *pn;
pn = kzalloc(sizeof(struct snd_kctl_ioctl), GFP_KERNEL);
if (pn == NULL)
return -ENOMEM;
pn->fioctl = fcn;
down_write(&snd_ioctl_rwsem);
list_add_tail(&pn->list, lists);
up_write(&snd_ioctl_rwsem);
return 0;
}
int snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_register_ioctl(fcn, &snd_control_ioctls);
}
EXPORT_SYMBOL(snd_ctl_register_ioctl);
#ifdef CONFIG_COMPAT
int snd_ctl_register_ioctl_compat(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_register_ioctl(fcn, &snd_control_compat_ioctls);
}
EXPORT_SYMBOL(snd_ctl_register_ioctl_compat);
#endif
/*
* de-register the device-specific control-ioctls.
*/
static int _snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn,
struct list_head *lists)
{
struct snd_kctl_ioctl *p;
if (snd_BUG_ON(!fcn))
return -EINVAL;
down_write(&snd_ioctl_rwsem);
list_for_each_entry(p, lists, list) {
if (p->fioctl == fcn) {
list_del(&p->list);
up_write(&snd_ioctl_rwsem);
kfree(p);
return 0;
}
}
up_write(&snd_ioctl_rwsem);
snd_BUG();
return -EINVAL;
}
int snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_unregister_ioctl(fcn, &snd_control_ioctls);
}
EXPORT_SYMBOL(snd_ctl_unregister_ioctl);
#ifdef CONFIG_COMPAT
int snd_ctl_unregister_ioctl_compat(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_unregister_ioctl(fcn, &snd_control_compat_ioctls);
}
EXPORT_SYMBOL(snd_ctl_unregister_ioctl_compat);
#endif
static int snd_ctl_fasync(int fd, struct file * file, int on)
{
struct snd_ctl_file *ctl;
ctl = file->private_data;
return fasync_helper(fd, file, on, &ctl->fasync);
}
/*
* ioctl32 compat
*/
#ifdef CONFIG_COMPAT
#include "control_compat.c"
#else
#define snd_ctl_ioctl_compat NULL
#endif
/*
* INIT PART
*/
static const struct file_operations snd_ctl_f_ops =
{
.owner = THIS_MODULE,
.read = snd_ctl_read,
.open = snd_ctl_open,
.release = snd_ctl_release,
.llseek = no_llseek,
.poll = snd_ctl_poll,
.unlocked_ioctl = snd_ctl_ioctl,
.compat_ioctl = snd_ctl_ioctl_compat,
.fasync = snd_ctl_fasync,
};
/*
* registration of the control device
*/
static int snd_ctl_dev_register(struct snd_device *device)
{
struct snd_card *card = device->device_data;
int err, cardnum;
char name[16];
if (snd_BUG_ON(!card))
return -ENXIO;
cardnum = card->number;
if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS))
return -ENXIO;
sprintf(name, "controlC%i", cardnum);
if ((err = snd_register_device(SNDRV_DEVICE_TYPE_CONTROL, card, -1,
&snd_ctl_f_ops, card, name)) < 0)
return err;
return 0;
}
/*
* disconnection of the control device
*/
static int snd_ctl_dev_disconnect(struct snd_device *device)
{
struct snd_card *card = device->device_data;
struct snd_ctl_file *ctl;
int err, cardnum;
if (snd_BUG_ON(!card))
return -ENXIO;
cardnum = card->number;
if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS))
return -ENXIO;
read_lock(&card->ctl_files_rwlock);
list_for_each_entry(ctl, &card->ctl_files, list) {
wake_up(&ctl->change_sleep);
kill_fasync(&ctl->fasync, SIGIO, POLL_ERR);
}
read_unlock(&card->ctl_files_rwlock);
if ((err = snd_unregister_device(SNDRV_DEVICE_TYPE_CONTROL,
card, -1)) < 0)
return err;
return 0;
}
/*
* free all controls
*/
static int snd_ctl_dev_free(struct snd_device *device)
{
struct snd_card *card = device->device_data;
struct snd_kcontrol *control;
down_write(&card->controls_rwsem);
while (!list_empty(&card->controls)) {
control = snd_kcontrol(card->controls.next);
snd_ctl_remove(card, control);
}
up_write(&card->controls_rwsem);
return 0;
}
/*
* create control core:
* called from init.c
*/
int snd_ctl_create(struct snd_card *card)
{
static struct snd_device_ops ops = {
.dev_free = snd_ctl_dev_free,
.dev_register = snd_ctl_dev_register,
.dev_disconnect = snd_ctl_dev_disconnect,
};
if (snd_BUG_ON(!card))
return -ENXIO;
return snd_device_new(card, SNDRV_DEV_CONTROL, card, &ops);
}
/*
* Frequently used control callbacks/helpers
*/
int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
EXPORT_SYMBOL(snd_ctl_boolean_mono_info);
int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
EXPORT_SYMBOL(snd_ctl_boolean_stereo_info);
/**
* snd_ctl_enum_info - fills the info structure for an enumerated control
* @info: the structure to be filled
* @channels: the number of the control's channels; often one
* @items: the number of control values; also the size of @names
* @names: an array containing the names of all control values
*
* Sets all required fields in @info to their appropriate values.
* If the control's accessibility is not the default (readable and writable),
* the caller has to fill @info->access.
*
* Return: Zero.
*/
int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
unsigned int items, const char *const names[])
{
info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
info->count = channels;
info->value.enumerated.items = items;
if (info->value.enumerated.item >= items)
info->value.enumerated.item = items - 1;
strlcpy(info->value.enumerated.name,
names[info->value.enumerated.item],
sizeof(info->value.enumerated.name));
return 0;
}
EXPORT_SYMBOL(snd_ctl_enum_info);
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2218_0 |
crossvul-cpp_data_bad_3770_0 | /*
* TCP Illinois congestion control.
* Home page:
* http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
*
* The algorithm is described in:
* "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
* for High-Speed Networks"
* http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf
*
* Implemented from description in paper and ns-2 simulation.
* Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/inet_diag.h>
#include <asm/div64.h>
#include <net/tcp.h>
#define ALPHA_SHIFT 7
#define ALPHA_SCALE (1u<<ALPHA_SHIFT)
#define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */
#define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */
#define ALPHA_BASE ALPHA_SCALE /* 1.0 */
#define U32_MAX ((u32)~0U)
#define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */
#define BETA_SHIFT 6
#define BETA_SCALE (1u<<BETA_SHIFT)
#define BETA_MIN (BETA_SCALE/8) /* 0.125 */
#define BETA_MAX (BETA_SCALE/2) /* 0.5 */
#define BETA_BASE BETA_MAX
static int win_thresh __read_mostly = 15;
module_param(win_thresh, int, 0);
MODULE_PARM_DESC(win_thresh, "Window threshold for starting adaptive sizing");
static int theta __read_mostly = 5;
module_param(theta, int, 0);
MODULE_PARM_DESC(theta, "# of fast RTT's before full growth");
/* TCP Illinois Parameters */
struct illinois {
u64 sum_rtt; /* sum of rtt's measured within last rtt */
u16 cnt_rtt; /* # of rtts measured within last rtt */
u32 base_rtt; /* min of all rtt in usec */
u32 max_rtt; /* max of all rtt in usec */
u32 end_seq; /* right edge of current RTT */
u32 alpha; /* Additive increase */
u32 beta; /* Muliplicative decrease */
u16 acked; /* # packets acked by current ACK */
u8 rtt_above; /* average rtt has gone above threshold */
u8 rtt_low; /* # of rtts measurements below threshold */
};
static void rtt_reset(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct illinois *ca = inet_csk_ca(sk);
ca->end_seq = tp->snd_nxt;
ca->cnt_rtt = 0;
ca->sum_rtt = 0;
/* TODO: age max_rtt? */
}
static void tcp_illinois_init(struct sock *sk)
{
struct illinois *ca = inet_csk_ca(sk);
ca->alpha = ALPHA_MAX;
ca->beta = BETA_BASE;
ca->base_rtt = 0x7fffffff;
ca->max_rtt = 0;
ca->acked = 0;
ca->rtt_low = 0;
ca->rtt_above = 0;
rtt_reset(sk);
}
/* Measure RTT for each ack. */
static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt)
{
struct illinois *ca = inet_csk_ca(sk);
ca->acked = pkts_acked;
/* dup ack, no rtt sample */
if (rtt < 0)
return;
/* ignore bogus values, this prevents wraparound in alpha math */
if (rtt > RTT_MAX)
rtt = RTT_MAX;
/* keep track of minimum RTT seen so far */
if (ca->base_rtt > rtt)
ca->base_rtt = rtt;
/* and max */
if (ca->max_rtt < rtt)
ca->max_rtt = rtt;
++ca->cnt_rtt;
ca->sum_rtt += rtt;
}
/* Maximum queuing delay */
static inline u32 max_delay(const struct illinois *ca)
{
return ca->max_rtt - ca->base_rtt;
}
/* Average queuing delay */
static inline u32 avg_delay(const struct illinois *ca)
{
u64 t = ca->sum_rtt;
do_div(t, ca->cnt_rtt);
return t - ca->base_rtt;
}
/*
* Compute value of alpha used for additive increase.
* If small window then use 1.0, equivalent to Reno.
*
* For larger windows, adjust based on average delay.
* A. If average delay is at minimum (we are uncongested),
* then use large alpha (10.0) to increase faster.
* B. If average delay is at maximum (getting congested)
* then use small alpha (0.3)
*
* The result is a convex window growth curve.
*/
static u32 alpha(struct illinois *ca, u32 da, u32 dm)
{
u32 d1 = dm / 100; /* Low threshold */
if (da <= d1) {
/* If never got out of low delay zone, then use max */
if (!ca->rtt_above)
return ALPHA_MAX;
/* Wait for 5 good RTT's before allowing alpha to go alpha max.
* This prevents one good RTT from causing sudden window increase.
*/
if (++ca->rtt_low < theta)
return ca->alpha;
ca->rtt_low = 0;
ca->rtt_above = 0;
return ALPHA_MAX;
}
ca->rtt_above = 1;
/*
* Based on:
*
* (dm - d1) amin amax
* k1 = -------------------
* amax - amin
*
* (dm - d1) amin
* k2 = ---------------- - d1
* amax - amin
*
* k1
* alpha = ----------
* k2 + da
*/
dm -= d1;
da -= d1;
return (dm * ALPHA_MAX) /
(dm + (da * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN);
}
/*
* Beta used for multiplicative decrease.
* For small window sizes returns same value as Reno (0.5)
*
* If delay is small (10% of max) then beta = 1/8
* If delay is up to 80% of max then beta = 1/2
* In between is a linear function
*/
static u32 beta(u32 da, u32 dm)
{
u32 d2, d3;
d2 = dm / 10;
if (da <= d2)
return BETA_MIN;
d3 = (8 * dm) / 10;
if (da >= d3 || d3 <= d2)
return BETA_MAX;
/*
* Based on:
*
* bmin d3 - bmax d2
* k3 = -------------------
* d3 - d2
*
* bmax - bmin
* k4 = -------------
* d3 - d2
*
* b = k3 + k4 da
*/
return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da)
/ (d3 - d2);
}
/* Update alpha and beta values once per RTT */
static void update_params(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct illinois *ca = inet_csk_ca(sk);
if (tp->snd_cwnd < win_thresh) {
ca->alpha = ALPHA_BASE;
ca->beta = BETA_BASE;
} else if (ca->cnt_rtt > 0) {
u32 dm = max_delay(ca);
u32 da = avg_delay(ca);
ca->alpha = alpha(ca, da, dm);
ca->beta = beta(da, dm);
}
rtt_reset(sk);
}
/*
* In case of loss, reset to default values
*/
static void tcp_illinois_state(struct sock *sk, u8 new_state)
{
struct illinois *ca = inet_csk_ca(sk);
if (new_state == TCP_CA_Loss) {
ca->alpha = ALPHA_BASE;
ca->beta = BETA_BASE;
ca->rtt_low = 0;
ca->rtt_above = 0;
rtt_reset(sk);
}
}
/*
* Increase window in response to successful acknowledgment.
*/
static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
{
struct tcp_sock *tp = tcp_sk(sk);
struct illinois *ca = inet_csk_ca(sk);
if (after(ack, ca->end_seq))
update_params(sk);
/* RFC2861 only increase cwnd if fully utilized */
if (!tcp_is_cwnd_limited(sk, in_flight))
return;
/* In slow start */
if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp);
else {
u32 delta;
/* snd_cwnd_cnt is # of packets since last cwnd increment */
tp->snd_cwnd_cnt += ca->acked;
ca->acked = 1;
/* This is close approximation of:
* tp->snd_cwnd += alpha/tp->snd_cwnd
*/
delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
if (delta >= tp->snd_cwnd) {
tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd,
(u32) tp->snd_cwnd_clamp);
tp->snd_cwnd_cnt = 0;
}
}
}
static u32 tcp_illinois_ssthresh(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct illinois *ca = inet_csk_ca(sk);
/* Multiplicative decrease */
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
}
/* Extract info for Tcp socket info provided via netlink. */
static void tcp_illinois_info(struct sock *sk, u32 ext,
struct sk_buff *skb)
{
const struct illinois *ca = inet_csk_ca(sk);
if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
struct tcpvegas_info info = {
.tcpv_enabled = 1,
.tcpv_rttcnt = ca->cnt_rtt,
.tcpv_minrtt = ca->base_rtt,
};
u64 t = ca->sum_rtt;
do_div(t, ca->cnt_rtt);
info.tcpv_rtt = t;
nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
}
}
static struct tcp_congestion_ops tcp_illinois __read_mostly = {
.flags = TCP_CONG_RTT_STAMP,
.init = tcp_illinois_init,
.ssthresh = tcp_illinois_ssthresh,
.min_cwnd = tcp_reno_min_cwnd,
.cong_avoid = tcp_illinois_cong_avoid,
.set_state = tcp_illinois_state,
.get_info = tcp_illinois_info,
.pkts_acked = tcp_illinois_acked,
.owner = THIS_MODULE,
.name = "illinois",
};
static int __init tcp_illinois_register(void)
{
BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE);
return tcp_register_congestion_control(&tcp_illinois);
}
static void __exit tcp_illinois_unregister(void)
{
tcp_unregister_congestion_control(&tcp_illinois);
}
module_init(tcp_illinois_register);
module_exit(tcp_illinois_unregister);
MODULE_AUTHOR("Stephen Hemminger, Shao Liu");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TCP Illinois");
MODULE_VERSION("1.0");
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3770_0 |
crossvul-cpp_data_bad_4822_0 | /*
+----------------------------------------------------------------------+
| PHP Version 5 |
+----------------------------------------------------------------------+
| Copyright (c) 1997-2016 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Rasmus Lerdorf <rasmus@php.net> |
| Marcus Boerger <helly@php.net> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
/* ToDos
*
* See if example images from http://www.exif.org have illegal
* thumbnail sizes or if code is corrupt.
* Create/Update exif headers.
* Create/Remove/Update image thumbnails.
*/
/* Security
*
* At current time i do not see any security problems but a potential
* attacker could generate an image with recursive ifd pointers...(Marcus)
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "ext/standard/file.h"
#if HAVE_EXIF
/* When EXIF_DEBUG is defined the module generates a lot of debug messages
* that help understanding what is going on. This can and should be used
* while extending the module as it shows if you are at the right position.
* You are always considered to have a copy of TIFF6.0 and EXIF2.10 standard.
*/
#undef EXIF_DEBUG
#ifdef EXIF_DEBUG
#define EXIFERR_DC , const char *_file, size_t _line TSRMLS_DC
#define EXIFERR_CC , __FILE__, __LINE__ TSRMLS_CC
#else
#define EXIFERR_DC TSRMLS_DC
#define EXIFERR_CC TSRMLS_CC
#endif
#undef EXIF_JPEG2000
#include "php_exif.h"
#include <math.h>
#include "php_ini.h"
#include "ext/standard/php_string.h"
#include "ext/standard/php_image.h"
#include "ext/standard/info.h"
/* needed for ssize_t definition */
#include <sys/types.h>
typedef unsigned char uchar;
#ifndef safe_emalloc
# define safe_emalloc(a,b,c) emalloc((a)*(b)+(c))
#endif
#ifndef safe_erealloc
# define safe_erealloc(p,a,b,c) erealloc(p, (a)*(b)+(c))
#endif
#ifndef TRUE
# define TRUE 1
# define FALSE 0
#endif
#ifndef max
# define max(a,b) ((a)>(b) ? (a) : (b))
#endif
#define EFREE_IF(ptr) if (ptr) efree(ptr)
#define MAX_IFD_NESTING_LEVEL 100
/* {{{ arginfo */
ZEND_BEGIN_ARG_INFO(arginfo_exif_tagname, 0)
ZEND_ARG_INFO(0, index)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_exif_read_data, 0, 0, 1)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(0, sections_needed)
ZEND_ARG_INFO(0, sub_arrays)
ZEND_ARG_INFO(0, read_thumbnail)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_exif_thumbnail, 0, 0, 1)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(1, width)
ZEND_ARG_INFO(1, height)
ZEND_ARG_INFO(1, imagetype)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_exif_imagetype, 0)
ZEND_ARG_INFO(0, imagefile)
ZEND_END_ARG_INFO()
/* }}} */
/* {{{ exif_functions[]
*/
const zend_function_entry exif_functions[] = {
PHP_FE(exif_read_data, arginfo_exif_read_data)
PHP_FALIAS(read_exif_data, exif_read_data, arginfo_exif_read_data)
PHP_FE(exif_tagname, arginfo_exif_tagname)
PHP_FE(exif_thumbnail, arginfo_exif_thumbnail)
PHP_FE(exif_imagetype, arginfo_exif_imagetype)
PHP_FE_END
};
/* }}} */
#define EXIF_VERSION "1.4 $Id$"
/* {{{ PHP_MINFO_FUNCTION
*/
PHP_MINFO_FUNCTION(exif)
{
php_info_print_table_start();
php_info_print_table_row(2, "EXIF Support", "enabled");
php_info_print_table_row(2, "EXIF Version", EXIF_VERSION);
php_info_print_table_row(2, "Supported EXIF Version", "0220");
php_info_print_table_row(2, "Supported filetypes", "JPEG,TIFF");
php_info_print_table_end();
DISPLAY_INI_ENTRIES();
}
/* }}} */
ZEND_BEGIN_MODULE_GLOBALS(exif)
char * encode_unicode;
char * decode_unicode_be;
char * decode_unicode_le;
char * encode_jis;
char * decode_jis_be;
char * decode_jis_le;
ZEND_END_MODULE_GLOBALS(exif)
ZEND_DECLARE_MODULE_GLOBALS(exif)
#ifdef ZTS
#define EXIF_G(v) TSRMG(exif_globals_id, zend_exif_globals *, v)
#else
#define EXIF_G(v) (exif_globals.v)
#endif
/* {{{ PHP_INI
*/
ZEND_INI_MH(OnUpdateEncode)
{
if (new_value && new_value_length) {
const zend_encoding **return_list;
size_t return_size;
if (FAILURE == zend_multibyte_parse_encoding_list(new_value, new_value_length,
&return_list, &return_size, 0 TSRMLS_CC)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Illegal encoding ignored: '%s'", new_value);
return FAILURE;
}
efree(return_list);
}
return OnUpdateString(entry, new_value, new_value_length, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC);
}
ZEND_INI_MH(OnUpdateDecode)
{
if (new_value) {
const zend_encoding **return_list;
size_t return_size;
if (FAILURE == zend_multibyte_parse_encoding_list(new_value, new_value_length,
&return_list, &return_size, 0 TSRMLS_CC)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Illegal encoding ignored: '%s'", new_value);
return FAILURE;
}
efree(return_list);
}
return OnUpdateString(entry, new_value, new_value_length, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC);
}
PHP_INI_BEGIN()
STD_PHP_INI_ENTRY("exif.encode_unicode", "ISO-8859-15", PHP_INI_ALL, OnUpdateEncode, encode_unicode, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_unicode_motorola", "UCS-2BE", PHP_INI_ALL, OnUpdateDecode, decode_unicode_be, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_unicode_intel", "UCS-2LE", PHP_INI_ALL, OnUpdateDecode, decode_unicode_le, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.encode_jis", "", PHP_INI_ALL, OnUpdateEncode, encode_jis, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_jis_motorola", "JIS", PHP_INI_ALL, OnUpdateDecode, decode_jis_be, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_jis_intel", "JIS", PHP_INI_ALL, OnUpdateDecode, decode_jis_le, zend_exif_globals, exif_globals)
PHP_INI_END()
/* }}} */
/* {{{ PHP_GINIT_FUNCTION
*/
static PHP_GINIT_FUNCTION(exif)
{
exif_globals->encode_unicode = NULL;
exif_globals->decode_unicode_be = NULL;
exif_globals->decode_unicode_le = NULL;
exif_globals->encode_jis = NULL;
exif_globals->decode_jis_be = NULL;
exif_globals->decode_jis_le = NULL;
}
/* }}} */
/* {{{ PHP_MINIT_FUNCTION(exif)
Get the size of an image as 4-element array */
PHP_MINIT_FUNCTION(exif)
{
REGISTER_INI_ENTRIES();
if (zend_hash_exists(&module_registry, "mbstring", sizeof("mbstring"))) {
REGISTER_LONG_CONSTANT("EXIF_USE_MBSTRING", 1, CONST_CS | CONST_PERSISTENT);
} else {
REGISTER_LONG_CONSTANT("EXIF_USE_MBSTRING", 0, CONST_CS | CONST_PERSISTENT);
}
return SUCCESS;
}
/* }}} */
/* {{{ PHP_MSHUTDOWN_FUNCTION
*/
PHP_MSHUTDOWN_FUNCTION(exif)
{
UNREGISTER_INI_ENTRIES();
return SUCCESS;
}
/* }}} */
/* {{{ exif dependencies */
static const zend_module_dep exif_module_deps[] = {
ZEND_MOD_REQUIRED("standard")
ZEND_MOD_OPTIONAL("mbstring")
ZEND_MOD_END
};
/* }}} */
/* {{{ exif_module_entry
*/
zend_module_entry exif_module_entry = {
STANDARD_MODULE_HEADER_EX, NULL,
exif_module_deps,
"exif",
exif_functions,
PHP_MINIT(exif),
PHP_MSHUTDOWN(exif),
NULL, NULL,
PHP_MINFO(exif),
#if ZEND_MODULE_API_NO >= 20010901
EXIF_VERSION,
#endif
#if ZEND_MODULE_API_NO >= 20060613
PHP_MODULE_GLOBALS(exif),
PHP_GINIT(exif),
NULL,
NULL,
STANDARD_MODULE_PROPERTIES_EX
#else
STANDARD_MODULE_PROPERTIES
#endif
};
/* }}} */
#ifdef COMPILE_DL_EXIF
ZEND_GET_MODULE(exif)
#endif
/* {{{ php_strnlen
* get length of string if buffer if less than buffer size or buffer size */
static size_t php_strnlen(char* str, size_t maxlen) {
size_t len = 0;
if (str && maxlen && *str) {
do {
len++;
} while (--maxlen && *(++str));
}
return len;
}
/* }}} */
/* {{{ error messages
*/
static const char * EXIF_ERROR_FILEEOF = "Unexpected end of file reached";
static const char * EXIF_ERROR_CORRUPT = "File structure corrupted";
static const char * EXIF_ERROR_THUMBEOF = "Thumbnail goes IFD boundary or end of file reached";
static const char * EXIF_ERROR_FSREALLOC = "Illegal reallocating of undefined file section";
#define EXIF_ERRLOG_FILEEOF(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_FILEEOF);
#define EXIF_ERRLOG_CORRUPT(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_CORRUPT);
#define EXIF_ERRLOG_THUMBEOF(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_THUMBEOF);
#define EXIF_ERRLOG_FSREALLOC(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_FSREALLOC);
/* }}} */
/* {{{ format description defines
Describes format descriptor
*/
static int php_tiff_bytes_per_format[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8, 1};
#define NUM_FORMATS 13
#define TAG_FMT_BYTE 1
#define TAG_FMT_STRING 2
#define TAG_FMT_USHORT 3
#define TAG_FMT_ULONG 4
#define TAG_FMT_URATIONAL 5
#define TAG_FMT_SBYTE 6
#define TAG_FMT_UNDEFINED 7
#define TAG_FMT_SSHORT 8
#define TAG_FMT_SLONG 9
#define TAG_FMT_SRATIONAL 10
#define TAG_FMT_SINGLE 11
#define TAG_FMT_DOUBLE 12
#define TAG_FMT_IFD 13
#ifdef EXIF_DEBUG
static char *exif_get_tagformat(int format)
{
switch(format) {
case TAG_FMT_BYTE: return "BYTE";
case TAG_FMT_STRING: return "STRING";
case TAG_FMT_USHORT: return "USHORT";
case TAG_FMT_ULONG: return "ULONG";
case TAG_FMT_URATIONAL: return "URATIONAL";
case TAG_FMT_SBYTE: return "SBYTE";
case TAG_FMT_UNDEFINED: return "UNDEFINED";
case TAG_FMT_SSHORT: return "SSHORT";
case TAG_FMT_SLONG: return "SLONG";
case TAG_FMT_SRATIONAL: return "SRATIONAL";
case TAG_FMT_SINGLE: return "SINGLE";
case TAG_FMT_DOUBLE: return "DOUBLE";
case TAG_FMT_IFD: return "IFD";
}
return "*Illegal";
}
#endif
/* Describes tag values */
#define TAG_GPS_VERSION_ID 0x0000
#define TAG_GPS_LATITUDE_REF 0x0001
#define TAG_GPS_LATITUDE 0x0002
#define TAG_GPS_LONGITUDE_REF 0x0003
#define TAG_GPS_LONGITUDE 0x0004
#define TAG_GPS_ALTITUDE_REF 0x0005
#define TAG_GPS_ALTITUDE 0x0006
#define TAG_GPS_TIME_STAMP 0x0007
#define TAG_GPS_SATELLITES 0x0008
#define TAG_GPS_STATUS 0x0009
#define TAG_GPS_MEASURE_MODE 0x000A
#define TAG_GPS_DOP 0x000B
#define TAG_GPS_SPEED_REF 0x000C
#define TAG_GPS_SPEED 0x000D
#define TAG_GPS_TRACK_REF 0x000E
#define TAG_GPS_TRACK 0x000F
#define TAG_GPS_IMG_DIRECTION_REF 0x0010
#define TAG_GPS_IMG_DIRECTION 0x0011
#define TAG_GPS_MAP_DATUM 0x0012
#define TAG_GPS_DEST_LATITUDE_REF 0x0013
#define TAG_GPS_DEST_LATITUDE 0x0014
#define TAG_GPS_DEST_LONGITUDE_REF 0x0015
#define TAG_GPS_DEST_LONGITUDE 0x0016
#define TAG_GPS_DEST_BEARING_REF 0x0017
#define TAG_GPS_DEST_BEARING 0x0018
#define TAG_GPS_DEST_DISTANCE_REF 0x0019
#define TAG_GPS_DEST_DISTANCE 0x001A
#define TAG_GPS_PROCESSING_METHOD 0x001B
#define TAG_GPS_AREA_INFORMATION 0x001C
#define TAG_GPS_DATE_STAMP 0x001D
#define TAG_GPS_DIFFERENTIAL 0x001E
#define TAG_TIFF_COMMENT 0x00FE /* SHOUDLNT HAPPEN */
#define TAG_NEW_SUBFILE 0x00FE /* New version of subfile tag */
#define TAG_SUBFILE_TYPE 0x00FF /* Old version of subfile tag */
#define TAG_IMAGEWIDTH 0x0100
#define TAG_IMAGEHEIGHT 0x0101
#define TAG_BITS_PER_SAMPLE 0x0102
#define TAG_COMPRESSION 0x0103
#define TAG_PHOTOMETRIC_INTERPRETATION 0x0106
#define TAG_TRESHHOLDING 0x0107
#define TAG_CELL_WIDTH 0x0108
#define TAG_CELL_HEIGHT 0x0109
#define TAG_FILL_ORDER 0x010A
#define TAG_DOCUMENT_NAME 0x010D
#define TAG_IMAGE_DESCRIPTION 0x010E
#define TAG_MAKE 0x010F
#define TAG_MODEL 0x0110
#define TAG_STRIP_OFFSETS 0x0111
#define TAG_ORIENTATION 0x0112
#define TAG_SAMPLES_PER_PIXEL 0x0115
#define TAG_ROWS_PER_STRIP 0x0116
#define TAG_STRIP_BYTE_COUNTS 0x0117
#define TAG_MIN_SAMPPLE_VALUE 0x0118
#define TAG_MAX_SAMPLE_VALUE 0x0119
#define TAG_X_RESOLUTION 0x011A
#define TAG_Y_RESOLUTION 0x011B
#define TAG_PLANAR_CONFIGURATION 0x011C
#define TAG_PAGE_NAME 0x011D
#define TAG_X_POSITION 0x011E
#define TAG_Y_POSITION 0x011F
#define TAG_FREE_OFFSETS 0x0120
#define TAG_FREE_BYTE_COUNTS 0x0121
#define TAG_GRAY_RESPONSE_UNIT 0x0122
#define TAG_GRAY_RESPONSE_CURVE 0x0123
#define TAG_RESOLUTION_UNIT 0x0128
#define TAG_PAGE_NUMBER 0x0129
#define TAG_TRANSFER_FUNCTION 0x012D
#define TAG_SOFTWARE 0x0131
#define TAG_DATETIME 0x0132
#define TAG_ARTIST 0x013B
#define TAG_HOST_COMPUTER 0x013C
#define TAG_PREDICTOR 0x013D
#define TAG_WHITE_POINT 0x013E
#define TAG_PRIMARY_CHROMATICITIES 0x013F
#define TAG_COLOR_MAP 0x0140
#define TAG_HALFTONE_HINTS 0x0141
#define TAG_TILE_WIDTH 0x0142
#define TAG_TILE_LENGTH 0x0143
#define TAG_TILE_OFFSETS 0x0144
#define TAG_TILE_BYTE_COUNTS 0x0145
#define TAG_SUB_IFD 0x014A
#define TAG_INK_SETMPUTER 0x014C
#define TAG_INK_NAMES 0x014D
#define TAG_NUMBER_OF_INKS 0x014E
#define TAG_DOT_RANGE 0x0150
#define TAG_TARGET_PRINTER 0x0151
#define TAG_EXTRA_SAMPLE 0x0152
#define TAG_SAMPLE_FORMAT 0x0153
#define TAG_S_MIN_SAMPLE_VALUE 0x0154
#define TAG_S_MAX_SAMPLE_VALUE 0x0155
#define TAG_TRANSFER_RANGE 0x0156
#define TAG_JPEG_TABLES 0x015B
#define TAG_JPEG_PROC 0x0200
#define TAG_JPEG_INTERCHANGE_FORMAT 0x0201
#define TAG_JPEG_INTERCHANGE_FORMAT_LEN 0x0202
#define TAG_JPEG_RESTART_INTERVAL 0x0203
#define TAG_JPEG_LOSSLESS_PREDICTOR 0x0205
#define TAG_JPEG_POINT_TRANSFORMS 0x0206
#define TAG_JPEG_Q_TABLES 0x0207
#define TAG_JPEG_DC_TABLES 0x0208
#define TAG_JPEG_AC_TABLES 0x0209
#define TAG_YCC_COEFFICIENTS 0x0211
#define TAG_YCC_SUB_SAMPLING 0x0212
#define TAG_YCC_POSITIONING 0x0213
#define TAG_REFERENCE_BLACK_WHITE 0x0214
/* 0x0301 - 0x0302 */
/* 0x0320 */
/* 0x0343 */
/* 0x5001 - 0x501B */
/* 0x5021 - 0x503B */
/* 0x5090 - 0x5091 */
/* 0x5100 - 0x5101 */
/* 0x5110 - 0x5113 */
/* 0x80E3 - 0x80E6 */
/* 0x828d - 0x828F */
#define TAG_COPYRIGHT 0x8298
#define TAG_EXPOSURETIME 0x829A
#define TAG_FNUMBER 0x829D
#define TAG_EXIF_IFD_POINTER 0x8769
#define TAG_ICC_PROFILE 0x8773
#define TAG_EXPOSURE_PROGRAM 0x8822
#define TAG_SPECTRAL_SENSITY 0x8824
#define TAG_GPS_IFD_POINTER 0x8825
#define TAG_ISOSPEED 0x8827
#define TAG_OPTOELECTRIC_CONVERSION_F 0x8828
/* 0x8829 - 0x882b */
#define TAG_EXIFVERSION 0x9000
#define TAG_DATE_TIME_ORIGINAL 0x9003
#define TAG_DATE_TIME_DIGITIZED 0x9004
#define TAG_COMPONENT_CONFIG 0x9101
#define TAG_COMPRESSED_BITS_PER_PIXEL 0x9102
#define TAG_SHUTTERSPEED 0x9201
#define TAG_APERTURE 0x9202
#define TAG_BRIGHTNESS_VALUE 0x9203
#define TAG_EXPOSURE_BIAS_VALUE 0x9204
#define TAG_MAX_APERTURE 0x9205
#define TAG_SUBJECT_DISTANCE 0x9206
#define TAG_METRIC_MODULE 0x9207
#define TAG_LIGHT_SOURCE 0x9208
#define TAG_FLASH 0x9209
#define TAG_FOCAL_LENGTH 0x920A
/* 0x920B - 0x920D */
/* 0x9211 - 0x9216 */
#define TAG_SUBJECT_AREA 0x9214
#define TAG_MAKER_NOTE 0x927C
#define TAG_USERCOMMENT 0x9286
#define TAG_SUB_SEC_TIME 0x9290
#define TAG_SUB_SEC_TIME_ORIGINAL 0x9291
#define TAG_SUB_SEC_TIME_DIGITIZED 0x9292
/* 0x923F */
/* 0x935C */
#define TAG_XP_TITLE 0x9C9B
#define TAG_XP_COMMENTS 0x9C9C
#define TAG_XP_AUTHOR 0x9C9D
#define TAG_XP_KEYWORDS 0x9C9E
#define TAG_XP_SUBJECT 0x9C9F
#define TAG_FLASH_PIX_VERSION 0xA000
#define TAG_COLOR_SPACE 0xA001
#define TAG_COMP_IMAGE_WIDTH 0xA002 /* compressed images only */
#define TAG_COMP_IMAGE_HEIGHT 0xA003
#define TAG_RELATED_SOUND_FILE 0xA004
#define TAG_INTEROP_IFD_POINTER 0xA005 /* IFD pointer */
#define TAG_FLASH_ENERGY 0xA20B
#define TAG_SPATIAL_FREQUENCY_RESPONSE 0xA20C
#define TAG_FOCALPLANE_X_RES 0xA20E
#define TAG_FOCALPLANE_Y_RES 0xA20F
#define TAG_FOCALPLANE_RESOLUTION_UNIT 0xA210
#define TAG_SUBJECT_LOCATION 0xA214
#define TAG_EXPOSURE_INDEX 0xA215
#define TAG_SENSING_METHOD 0xA217
#define TAG_FILE_SOURCE 0xA300
#define TAG_SCENE_TYPE 0xA301
#define TAG_CFA_PATTERN 0xA302
#define TAG_CUSTOM_RENDERED 0xA401
#define TAG_EXPOSURE_MODE 0xA402
#define TAG_WHITE_BALANCE 0xA403
#define TAG_DIGITAL_ZOOM_RATIO 0xA404
#define TAG_FOCAL_LENGTH_IN_35_MM_FILM 0xA405
#define TAG_SCENE_CAPTURE_TYPE 0xA406
#define TAG_GAIN_CONTROL 0xA407
#define TAG_CONTRAST 0xA408
#define TAG_SATURATION 0xA409
#define TAG_SHARPNESS 0xA40A
#define TAG_DEVICE_SETTING_DESCRIPTION 0xA40B
#define TAG_SUBJECT_DISTANCE_RANGE 0xA40C
#define TAG_IMAGE_UNIQUE_ID 0xA420
/* Olympus specific tags */
#define TAG_OLYMPUS_SPECIALMODE 0x0200
#define TAG_OLYMPUS_JPEGQUAL 0x0201
#define TAG_OLYMPUS_MACRO 0x0202
#define TAG_OLYMPUS_DIGIZOOM 0x0204
#define TAG_OLYMPUS_SOFTWARERELEASE 0x0207
#define TAG_OLYMPUS_PICTINFO 0x0208
#define TAG_OLYMPUS_CAMERAID 0x0209
/* end Olympus specific tags */
/* Internal */
#define TAG_NONE -1 /* note that -1 <> 0xFFFF */
#define TAG_COMPUTED_VALUE -2
#define TAG_END_OF_LIST 0xFFFD
/* Values for TAG_PHOTOMETRIC_INTERPRETATION */
#define PMI_BLACK_IS_ZERO 0
#define PMI_WHITE_IS_ZERO 1
#define PMI_RGB 2
#define PMI_PALETTE_COLOR 3
#define PMI_TRANSPARENCY_MASK 4
#define PMI_SEPARATED 5
#define PMI_YCBCR 6
#define PMI_CIELAB 8
/* }}} */
/* {{{ TabTable[]
*/
typedef const struct {
unsigned short Tag;
char *Desc;
} tag_info_type;
typedef tag_info_type tag_info_array[];
typedef tag_info_type *tag_table_type;
#define TAG_TABLE_END \
{TAG_NONE, "No tag value"},\
{TAG_COMPUTED_VALUE, "Computed value"},\
{TAG_END_OF_LIST, ""} /* Important for exif_get_tagname() IF value != "" function result is != false */
static tag_info_array tag_table_IFD = {
{ 0x000B, "ACDComment"},
{ 0x00FE, "NewSubFile"}, /* better name it 'ImageType' ? */
{ 0x00FF, "SubFile"},
{ 0x0100, "ImageWidth"},
{ 0x0101, "ImageLength"},
{ 0x0102, "BitsPerSample"},
{ 0x0103, "Compression"},
{ 0x0106, "PhotometricInterpretation"},
{ 0x010A, "FillOrder"},
{ 0x010D, "DocumentName"},
{ 0x010E, "ImageDescription"},
{ 0x010F, "Make"},
{ 0x0110, "Model"},
{ 0x0111, "StripOffsets"},
{ 0x0112, "Orientation"},
{ 0x0115, "SamplesPerPixel"},
{ 0x0116, "RowsPerStrip"},
{ 0x0117, "StripByteCounts"},
{ 0x0118, "MinSampleValue"},
{ 0x0119, "MaxSampleValue"},
{ 0x011A, "XResolution"},
{ 0x011B, "YResolution"},
{ 0x011C, "PlanarConfiguration"},
{ 0x011D, "PageName"},
{ 0x011E, "XPosition"},
{ 0x011F, "YPosition"},
{ 0x0120, "FreeOffsets"},
{ 0x0121, "FreeByteCounts"},
{ 0x0122, "GrayResponseUnit"},
{ 0x0123, "GrayResponseCurve"},
{ 0x0124, "T4Options"},
{ 0x0125, "T6Options"},
{ 0x0128, "ResolutionUnit"},
{ 0x0129, "PageNumber"},
{ 0x012D, "TransferFunction"},
{ 0x0131, "Software"},
{ 0x0132, "DateTime"},
{ 0x013B, "Artist"},
{ 0x013C, "HostComputer"},
{ 0x013D, "Predictor"},
{ 0x013E, "WhitePoint"},
{ 0x013F, "PrimaryChromaticities"},
{ 0x0140, "ColorMap"},
{ 0x0141, "HalfToneHints"},
{ 0x0142, "TileWidth"},
{ 0x0143, "TileLength"},
{ 0x0144, "TileOffsets"},
{ 0x0145, "TileByteCounts"},
{ 0x014A, "SubIFD"},
{ 0x014C, "InkSet"},
{ 0x014D, "InkNames"},
{ 0x014E, "NumberOfInks"},
{ 0x0150, "DotRange"},
{ 0x0151, "TargetPrinter"},
{ 0x0152, "ExtraSample"},
{ 0x0153, "SampleFormat"},
{ 0x0154, "SMinSampleValue"},
{ 0x0155, "SMaxSampleValue"},
{ 0x0156, "TransferRange"},
{ 0x0157, "ClipPath"},
{ 0x0158, "XClipPathUnits"},
{ 0x0159, "YClipPathUnits"},
{ 0x015A, "Indexed"},
{ 0x015B, "JPEGTables"},
{ 0x015F, "OPIProxy"},
{ 0x0200, "JPEGProc"},
{ 0x0201, "JPEGInterchangeFormat"},
{ 0x0202, "JPEGInterchangeFormatLength"},
{ 0x0203, "JPEGRestartInterval"},
{ 0x0205, "JPEGLosslessPredictors"},
{ 0x0206, "JPEGPointTransforms"},
{ 0x0207, "JPEGQTables"},
{ 0x0208, "JPEGDCTables"},
{ 0x0209, "JPEGACTables"},
{ 0x0211, "YCbCrCoefficients"},
{ 0x0212, "YCbCrSubSampling"},
{ 0x0213, "YCbCrPositioning"},
{ 0x0214, "ReferenceBlackWhite"},
{ 0x02BC, "ExtensibleMetadataPlatform"}, /* XAP: Extensible Authoring Publishing, obsoleted by XMP: Extensible Metadata Platform */
{ 0x0301, "Gamma"},
{ 0x0302, "ICCProfileDescriptor"},
{ 0x0303, "SRGBRenderingIntent"},
{ 0x0320, "ImageTitle"},
{ 0x5001, "ResolutionXUnit"},
{ 0x5002, "ResolutionYUnit"},
{ 0x5003, "ResolutionXLengthUnit"},
{ 0x5004, "ResolutionYLengthUnit"},
{ 0x5005, "PrintFlags"},
{ 0x5006, "PrintFlagsVersion"},
{ 0x5007, "PrintFlagsCrop"},
{ 0x5008, "PrintFlagsBleedWidth"},
{ 0x5009, "PrintFlagsBleedWidthScale"},
{ 0x500A, "HalftoneLPI"},
{ 0x500B, "HalftoneLPIUnit"},
{ 0x500C, "HalftoneDegree"},
{ 0x500D, "HalftoneShape"},
{ 0x500E, "HalftoneMisc"},
{ 0x500F, "HalftoneScreen"},
{ 0x5010, "JPEGQuality"},
{ 0x5011, "GridSize"},
{ 0x5012, "ThumbnailFormat"},
{ 0x5013, "ThumbnailWidth"},
{ 0x5014, "ThumbnailHeight"},
{ 0x5015, "ThumbnailColorDepth"},
{ 0x5016, "ThumbnailPlanes"},
{ 0x5017, "ThumbnailRawBytes"},
{ 0x5018, "ThumbnailSize"},
{ 0x5019, "ThumbnailCompressedSize"},
{ 0x501A, "ColorTransferFunction"},
{ 0x501B, "ThumbnailData"},
{ 0x5020, "ThumbnailImageWidth"},
{ 0x5021, "ThumbnailImageHeight"},
{ 0x5022, "ThumbnailBitsPerSample"},
{ 0x5023, "ThumbnailCompression"},
{ 0x5024, "ThumbnailPhotometricInterp"},
{ 0x5025, "ThumbnailImageDescription"},
{ 0x5026, "ThumbnailEquipMake"},
{ 0x5027, "ThumbnailEquipModel"},
{ 0x5028, "ThumbnailStripOffsets"},
{ 0x5029, "ThumbnailOrientation"},
{ 0x502A, "ThumbnailSamplesPerPixel"},
{ 0x502B, "ThumbnailRowsPerStrip"},
{ 0x502C, "ThumbnailStripBytesCount"},
{ 0x502D, "ThumbnailResolutionX"},
{ 0x502E, "ThumbnailResolutionY"},
{ 0x502F, "ThumbnailPlanarConfig"},
{ 0x5030, "ThumbnailResolutionUnit"},
{ 0x5031, "ThumbnailTransferFunction"},
{ 0x5032, "ThumbnailSoftwareUsed"},
{ 0x5033, "ThumbnailDateTime"},
{ 0x5034, "ThumbnailArtist"},
{ 0x5035, "ThumbnailWhitePoint"},
{ 0x5036, "ThumbnailPrimaryChromaticities"},
{ 0x5037, "ThumbnailYCbCrCoefficients"},
{ 0x5038, "ThumbnailYCbCrSubsampling"},
{ 0x5039, "ThumbnailYCbCrPositioning"},
{ 0x503A, "ThumbnailRefBlackWhite"},
{ 0x503B, "ThumbnailCopyRight"},
{ 0x5090, "LuminanceTable"},
{ 0x5091, "ChrominanceTable"},
{ 0x5100, "FrameDelay"},
{ 0x5101, "LoopCount"},
{ 0x5110, "PixelUnit"},
{ 0x5111, "PixelPerUnitX"},
{ 0x5112, "PixelPerUnitY"},
{ 0x5113, "PaletteHistogram"},
{ 0x1000, "RelatedImageFileFormat"},
{ 0x800D, "ImageID"},
{ 0x80E3, "Matteing"}, /* obsoleted by ExtraSamples */
{ 0x80E4, "DataType"}, /* obsoleted by SampleFormat */
{ 0x80E5, "ImageDepth"},
{ 0x80E6, "TileDepth"},
{ 0x828D, "CFARepeatPatternDim"},
{ 0x828E, "CFAPattern"},
{ 0x828F, "BatteryLevel"},
{ 0x8298, "Copyright"},
{ 0x829A, "ExposureTime"},
{ 0x829D, "FNumber"},
{ 0x83BB, "IPTC/NAA"},
{ 0x84E3, "IT8RasterPadding"},
{ 0x84E5, "IT8ColorTable"},
{ 0x8649, "ImageResourceInformation"}, /* PhotoShop */
{ 0x8769, "Exif_IFD_Pointer"},
{ 0x8773, "ICC_Profile"},
{ 0x8822, "ExposureProgram"},
{ 0x8824, "SpectralSensity"},
{ 0x8828, "OECF"},
{ 0x8825, "GPS_IFD_Pointer"},
{ 0x8827, "ISOSpeedRatings"},
{ 0x8828, "OECF"},
{ 0x9000, "ExifVersion"},
{ 0x9003, "DateTimeOriginal"},
{ 0x9004, "DateTimeDigitized"},
{ 0x9101, "ComponentsConfiguration"},
{ 0x9102, "CompressedBitsPerPixel"},
{ 0x9201, "ShutterSpeedValue"},
{ 0x9202, "ApertureValue"},
{ 0x9203, "BrightnessValue"},
{ 0x9204, "ExposureBiasValue"},
{ 0x9205, "MaxApertureValue"},
{ 0x9206, "SubjectDistance"},
{ 0x9207, "MeteringMode"},
{ 0x9208, "LightSource"},
{ 0x9209, "Flash"},
{ 0x920A, "FocalLength"},
{ 0x920B, "FlashEnergy"}, /* 0xA20B in JPEG */
{ 0x920C, "SpatialFrequencyResponse"}, /* 0xA20C - - */
{ 0x920D, "Noise"},
{ 0x920E, "FocalPlaneXResolution"}, /* 0xA20E - - */
{ 0x920F, "FocalPlaneYResolution"}, /* 0xA20F - - */
{ 0x9210, "FocalPlaneResolutionUnit"}, /* 0xA210 - - */
{ 0x9211, "ImageNumber"},
{ 0x9212, "SecurityClassification"},
{ 0x9213, "ImageHistory"},
{ 0x9214, "SubjectLocation"}, /* 0xA214 - - */
{ 0x9215, "ExposureIndex"}, /* 0xA215 - - */
{ 0x9216, "TIFF/EPStandardID"},
{ 0x9217, "SensingMethod"}, /* 0xA217 - - */
{ 0x923F, "StoNits"},
{ 0x927C, "MakerNote"},
{ 0x9286, "UserComment"},
{ 0x9290, "SubSecTime"},
{ 0x9291, "SubSecTimeOriginal"},
{ 0x9292, "SubSecTimeDigitized"},
{ 0x935C, "ImageSourceData"}, /* "Adobe Photoshop Document Data Block": 8BIM... */
{ 0x9c9b, "Title" }, /* Win XP specific, Unicode */
{ 0x9c9c, "Comments" }, /* Win XP specific, Unicode */
{ 0x9c9d, "Author" }, /* Win XP specific, Unicode */
{ 0x9c9e, "Keywords" }, /* Win XP specific, Unicode */
{ 0x9c9f, "Subject" }, /* Win XP specific, Unicode, not to be confused with SubjectDistance and SubjectLocation */
{ 0xA000, "FlashPixVersion"},
{ 0xA001, "ColorSpace"},
{ 0xA002, "ExifImageWidth"},
{ 0xA003, "ExifImageLength"},
{ 0xA004, "RelatedSoundFile"},
{ 0xA005, "InteroperabilityOffset"},
{ 0xA20B, "FlashEnergy"}, /* 0x920B in TIFF/EP */
{ 0xA20C, "SpatialFrequencyResponse"}, /* 0x920C - - */
{ 0xA20D, "Noise"},
{ 0xA20E, "FocalPlaneXResolution"}, /* 0x920E - - */
{ 0xA20F, "FocalPlaneYResolution"}, /* 0x920F - - */
{ 0xA210, "FocalPlaneResolutionUnit"}, /* 0x9210 - - */
{ 0xA211, "ImageNumber"},
{ 0xA212, "SecurityClassification"},
{ 0xA213, "ImageHistory"},
{ 0xA214, "SubjectLocation"}, /* 0x9214 - - */
{ 0xA215, "ExposureIndex"}, /* 0x9215 - - */
{ 0xA216, "TIFF/EPStandardID"},
{ 0xA217, "SensingMethod"}, /* 0x9217 - - */
{ 0xA300, "FileSource"},
{ 0xA301, "SceneType"},
{ 0xA302, "CFAPattern"},
{ 0xA401, "CustomRendered"},
{ 0xA402, "ExposureMode"},
{ 0xA403, "WhiteBalance"},
{ 0xA404, "DigitalZoomRatio"},
{ 0xA405, "FocalLengthIn35mmFilm"},
{ 0xA406, "SceneCaptureType"},
{ 0xA407, "GainControl"},
{ 0xA408, "Contrast"},
{ 0xA409, "Saturation"},
{ 0xA40A, "Sharpness"},
{ 0xA40B, "DeviceSettingDescription"},
{ 0xA40C, "SubjectDistanceRange"},
{ 0xA420, "ImageUniqueID"},
TAG_TABLE_END
} ;
static tag_info_array tag_table_GPS = {
{ 0x0000, "GPSVersion"},
{ 0x0001, "GPSLatitudeRef"},
{ 0x0002, "GPSLatitude"},
{ 0x0003, "GPSLongitudeRef"},
{ 0x0004, "GPSLongitude"},
{ 0x0005, "GPSAltitudeRef"},
{ 0x0006, "GPSAltitude"},
{ 0x0007, "GPSTimeStamp"},
{ 0x0008, "GPSSatellites"},
{ 0x0009, "GPSStatus"},
{ 0x000A, "GPSMeasureMode"},
{ 0x000B, "GPSDOP"},
{ 0x000C, "GPSSpeedRef"},
{ 0x000D, "GPSSpeed"},
{ 0x000E, "GPSTrackRef"},
{ 0x000F, "GPSTrack"},
{ 0x0010, "GPSImgDirectionRef"},
{ 0x0011, "GPSImgDirection"},
{ 0x0012, "GPSMapDatum"},
{ 0x0013, "GPSDestLatitudeRef"},
{ 0x0014, "GPSDestLatitude"},
{ 0x0015, "GPSDestLongitudeRef"},
{ 0x0016, "GPSDestLongitude"},
{ 0x0017, "GPSDestBearingRef"},
{ 0x0018, "GPSDestBearing"},
{ 0x0019, "GPSDestDistanceRef"},
{ 0x001A, "GPSDestDistance"},
{ 0x001B, "GPSProcessingMode"},
{ 0x001C, "GPSAreaInformation"},
{ 0x001D, "GPSDateStamp"},
{ 0x001E, "GPSDifferential"},
TAG_TABLE_END
};
static tag_info_array tag_table_IOP = {
{ 0x0001, "InterOperabilityIndex"}, /* should be 'R98' or 'THM' */
{ 0x0002, "InterOperabilityVersion"},
{ 0x1000, "RelatedFileFormat"},
{ 0x1001, "RelatedImageWidth"},
{ 0x1002, "RelatedImageHeight"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_CANON = {
{ 0x0001, "ModeArray"}, /* guess */
{ 0x0004, "ImageInfo"}, /* guess */
{ 0x0006, "ImageType"},
{ 0x0007, "FirmwareVersion"},
{ 0x0008, "ImageNumber"},
{ 0x0009, "OwnerName"},
{ 0x000C, "Camera"},
{ 0x000F, "CustomFunctions"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_CASIO = {
{ 0x0001, "RecordingMode"},
{ 0x0002, "Quality"},
{ 0x0003, "FocusingMode"},
{ 0x0004, "FlashMode"},
{ 0x0005, "FlashIntensity"},
{ 0x0006, "ObjectDistance"},
{ 0x0007, "WhiteBalance"},
{ 0x000A, "DigitalZoom"},
{ 0x000B, "Sharpness"},
{ 0x000C, "Contrast"},
{ 0x000D, "Saturation"},
{ 0x0014, "CCDSensitivity"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_FUJI = {
{ 0x0000, "Version"},
{ 0x1000, "Quality"},
{ 0x1001, "Sharpness"},
{ 0x1002, "WhiteBalance"},
{ 0x1003, "Color"},
{ 0x1004, "Tone"},
{ 0x1010, "FlashMode"},
{ 0x1011, "FlashStrength"},
{ 0x1020, "Macro"},
{ 0x1021, "FocusMode"},
{ 0x1030, "SlowSync"},
{ 0x1031, "PictureMode"},
{ 0x1100, "ContTake"},
{ 0x1300, "BlurWarning"},
{ 0x1301, "FocusWarning"},
{ 0x1302, "AEWarning "},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_NIKON = {
{ 0x0003, "Quality"},
{ 0x0004, "ColorMode"},
{ 0x0005, "ImageAdjustment"},
{ 0x0006, "CCDSensitivity"},
{ 0x0007, "WhiteBalance"},
{ 0x0008, "Focus"},
{ 0x000a, "DigitalZoom"},
{ 0x000b, "Converter"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_NIKON_990 = {
{ 0x0001, "Version"},
{ 0x0002, "ISOSetting"},
{ 0x0003, "ColorMode"},
{ 0x0004, "Quality"},
{ 0x0005, "WhiteBalance"},
{ 0x0006, "ImageSharpening"},
{ 0x0007, "FocusMode"},
{ 0x0008, "FlashSetting"},
{ 0x000F, "ISOSelection"},
{ 0x0080, "ImageAdjustment"},
{ 0x0082, "AuxiliaryLens"},
{ 0x0085, "ManualFocusDistance"},
{ 0x0086, "DigitalZoom"},
{ 0x0088, "AFFocusPosition"},
{ 0x0010, "DataDump"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_OLYMPUS = {
{ 0x0200, "SpecialMode"},
{ 0x0201, "JPEGQuality"},
{ 0x0202, "Macro"},
{ 0x0204, "DigitalZoom"},
{ 0x0207, "SoftwareRelease"},
{ 0x0208, "PictureInfo"},
{ 0x0209, "CameraId"},
{ 0x0F00, "DataDump"},
TAG_TABLE_END
};
typedef enum mn_byte_order_t {
MN_ORDER_INTEL = 0,
MN_ORDER_MOTOROLA = 1,
MN_ORDER_NORMAL
} mn_byte_order_t;
typedef enum mn_offset_mode_t {
MN_OFFSET_NORMAL,
MN_OFFSET_MAKER,
MN_OFFSET_GUESS
} mn_offset_mode_t;
typedef struct {
tag_table_type tag_table;
char * make;
char * model;
char * id_string;
int id_string_len;
int offset;
mn_byte_order_t byte_order;
mn_offset_mode_t offset_mode;
} maker_note_type;
static const maker_note_type maker_note_array[] = {
{ tag_table_VND_CANON, "Canon", NULL, NULL, 0, 0, MN_ORDER_INTEL, MN_OFFSET_GUESS},
/* { tag_table_VND_CANON, "Canon", NULL, NULL, 0, 0, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},*/
{ tag_table_VND_CASIO, "CASIO", NULL, NULL, 0, 0, MN_ORDER_MOTOROLA, MN_OFFSET_NORMAL},
{ tag_table_VND_FUJI, "FUJIFILM", NULL, "FUJIFILM\x0C\x00\x00\x00", 12, 12, MN_ORDER_INTEL, MN_OFFSET_MAKER},
{ tag_table_VND_NIKON, "NIKON", NULL, "Nikon\x00\x01\x00", 8, 8, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},
{ tag_table_VND_NIKON_990, "NIKON", NULL, NULL, 0, 0, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},
{ tag_table_VND_OLYMPUS, "OLYMPUS OPTICAL CO.,LTD", NULL, "OLYMP\x00\x01\x00", 8, 8, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},
};
/* }}} */
/* {{{ exif_get_tagname
Get headername for tag_num or NULL if not defined */
static char * exif_get_tagname(int tag_num, char *ret, int len, tag_table_type tag_table TSRMLS_DC)
{
int i, t;
char tmp[32];
for (i = 0; (t = tag_table[i].Tag) != TAG_END_OF_LIST; i++) {
if (t == tag_num) {
if (ret && len) {
strlcpy(ret, tag_table[i].Desc, abs(len));
if (len < 0) {
memset(ret + strlen(ret), ' ', -len - strlen(ret) - 1);
ret[-len - 1] = '\0';
}
return ret;
}
return tag_table[i].Desc;
}
}
if (ret && len) {
snprintf(tmp, sizeof(tmp), "UndefinedTag:0x%04X", tag_num);
strlcpy(ret, tmp, abs(len));
if (len < 0) {
memset(ret + strlen(ret), ' ', -len - strlen(ret) - 1);
ret[-len - 1] = '\0';
}
return ret;
}
return "";
}
/* }}} */
/* {{{ exif_char_dump
* Do not use! This is a debug function... */
#ifdef EXIF_DEBUG
static unsigned char* exif_char_dump(unsigned char * addr, int len, int offset)
{
static unsigned char buf[4096+1];
static unsigned char tmp[20];
int c, i, p=0, n = 5+31;
p += slprintf(buf+p, sizeof(buf)-p, "\nDump Len: %08X (%d)", len, len);
if (len) {
for(i=0; i<len+15 && p+n<=sizeof(buf); i++) {
if (i%16==0) {
p += slprintf(buf+p, sizeof(buf)-p, "\n%08X: ", i+offset);
}
if (i<len) {
c = *addr++;
p += slprintf(buf+p, sizeof(buf)-p, "%02X ", c);
tmp[i%16] = c>=32 ? c : '.';
tmp[(i%16)+1] = '\0';
} else {
p += slprintf(buf+p, sizeof(buf)-p, " ");
}
if (i%16==15) {
p += slprintf(buf+p, sizeof(buf)-p, " %s", tmp);
if (i>=len) {
break;
}
}
}
}
buf[sizeof(buf)-1] = '\0';
return buf;
}
#endif
/* }}} */
/* {{{ php_jpg_get16
Get 16 bits motorola order (always) for jpeg header stuff.
*/
static int php_jpg_get16(void *value)
{
return (((uchar *)value)[0] << 8) | ((uchar *)value)[1];
}
/* }}} */
/* {{{ php_ifd_get16u
* Convert a 16 bit unsigned value from file's native byte order */
static int php_ifd_get16u(void *value, int motorola_intel)
{
if (motorola_intel) {
return (((uchar *)value)[0] << 8) | ((uchar *)value)[1];
} else {
return (((uchar *)value)[1] << 8) | ((uchar *)value)[0];
}
}
/* }}} */
/* {{{ php_ifd_get16s
* Convert a 16 bit signed value from file's native byte order */
static signed short php_ifd_get16s(void *value, int motorola_intel)
{
return (signed short)php_ifd_get16u(value, motorola_intel);
}
/* }}} */
/* {{{ php_ifd_get32s
* Convert a 32 bit signed value from file's native byte order */
static int php_ifd_get32s(void *value, int motorola_intel)
{
if (motorola_intel) {
return (((char *)value)[0] << 24)
| (((uchar *)value)[1] << 16)
| (((uchar *)value)[2] << 8 )
| (((uchar *)value)[3] );
} else {
return (((char *)value)[3] << 24)
| (((uchar *)value)[2] << 16)
| (((uchar *)value)[1] << 8 )
| (((uchar *)value)[0] );
}
}
/* }}} */
/* {{{ php_ifd_get32u
* Write 32 bit unsigned value to data */
static unsigned php_ifd_get32u(void *value, int motorola_intel)
{
return (unsigned)php_ifd_get32s(value, motorola_intel) & 0xffffffff;
}
/* }}} */
/* {{{ php_ifd_set16u
* Write 16 bit unsigned value to data */
static void php_ifd_set16u(char *data, unsigned int value, int motorola_intel)
{
if (motorola_intel) {
data[0] = (value & 0xFF00) >> 8;
data[1] = (value & 0x00FF);
} else {
data[1] = (value & 0xFF00) >> 8;
data[0] = (value & 0x00FF);
}
}
/* }}} */
/* {{{ php_ifd_set32u
* Convert a 32 bit unsigned value from file's native byte order */
static void php_ifd_set32u(char *data, size_t value, int motorola_intel)
{
if (motorola_intel) {
data[0] = (value & 0xFF000000) >> 24;
data[1] = (value & 0x00FF0000) >> 16;
data[2] = (value & 0x0000FF00) >> 8;
data[3] = (value & 0x000000FF);
} else {
data[3] = (value & 0xFF000000) >> 24;
data[2] = (value & 0x00FF0000) >> 16;
data[1] = (value & 0x0000FF00) >> 8;
data[0] = (value & 0x000000FF);
}
}
/* }}} */
#ifdef EXIF_DEBUG
char * exif_dump_data(int *dump_free, int format, int components, int length, int motorola_intel, char *value_ptr TSRMLS_DC) /* {{{ */
{
char *dump;
int len;
*dump_free = 0;
if (format == TAG_FMT_STRING) {
return value_ptr ? value_ptr : "<no data>";
}
if (format == TAG_FMT_UNDEFINED) {
return "<undefined>\n";
}
if (format == TAG_FMT_IFD) {
return "";
}
if (format == TAG_FMT_SINGLE || format == TAG_FMT_DOUBLE) {
return "<not implemented>";
}
*dump_free = 1;
if (components > 1) {
len = spprintf(&dump, 0, "(%d,%d) {", components, length);
} else {
len = spprintf(&dump, 0, "{");
}
while(components > 0) {
switch(format) {
case TAG_FMT_BYTE:
case TAG_FMT_UNDEFINED:
case TAG_FMT_STRING:
case TAG_FMT_SBYTE:
dump = erealloc(dump, len + 4 + 1);
snprintf(dump + len, 4 + 1, "0x%02X", *value_ptr);
len += 4;
value_ptr++;
break;
case TAG_FMT_USHORT:
case TAG_FMT_SSHORT:
dump = erealloc(dump, len + 6 + 1);
snprintf(dump + len, 6 + 1, "0x%04X", php_ifd_get16s(value_ptr, motorola_intel));
len += 6;
value_ptr += 2;
break;
case TAG_FMT_ULONG:
case TAG_FMT_SLONG:
dump = erealloc(dump, len + 6 + 1);
snprintf(dump + len, 6 + 1, "0x%04X", php_ifd_get32s(value_ptr, motorola_intel));
len += 6;
value_ptr += 4;
break;
case TAG_FMT_URATIONAL:
case TAG_FMT_SRATIONAL:
dump = erealloc(dump, len + 13 + 1);
snprintf(dump + len, 13 + 1, "0x%04X/0x%04X", php_ifd_get32s(value_ptr, motorola_intel), php_ifd_get32s(value_ptr+4, motorola_intel));
len += 13;
value_ptr += 8;
break;
}
if (components > 0) {
dump = erealloc(dump, len + 2 + 1);
snprintf(dump + len, 2 + 1, ", ");
len += 2;
components--;
} else{
break;
}
}
dump = erealloc(dump, len + 1 + 1);
snprintf(dump + len, 1 + 1, "}");
return dump;
}
/* }}} */
#endif
/* {{{ exif_convert_any_format
* Evaluate number, be it int, rational, or float from directory. */
static double exif_convert_any_format(void *value, int format, int motorola_intel TSRMLS_DC)
{
int s_den;
unsigned u_den;
switch(format) {
case TAG_FMT_SBYTE: return *(signed char *)value;
case TAG_FMT_BYTE: return *(uchar *)value;
case TAG_FMT_USHORT: return php_ifd_get16u(value, motorola_intel);
case TAG_FMT_ULONG: return php_ifd_get32u(value, motorola_intel);
case TAG_FMT_URATIONAL:
u_den = php_ifd_get32u(4+(char *)value, motorola_intel);
if (u_den == 0) {
return 0;
} else {
return (double)php_ifd_get32u(value, motorola_intel) / u_den;
}
case TAG_FMT_SRATIONAL:
s_den = php_ifd_get32s(4+(char *)value, motorola_intel);
if (s_den == 0) {
return 0;
} else {
return (double)php_ifd_get32s(value, motorola_intel) / s_den;
}
case TAG_FMT_SSHORT: return (signed short)php_ifd_get16u(value, motorola_intel);
case TAG_FMT_SLONG: return php_ifd_get32s(value, motorola_intel);
/* Not sure if this is correct (never seen float used in Exif format) */
case TAG_FMT_SINGLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type single");
#endif
return (double)*(float *)value;
case TAG_FMT_DOUBLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type double");
#endif
return *(double *)value;
}
return 0;
}
/* }}} */
/* {{{ exif_convert_any_to_int
* Evaluate number, be it int, rational, or float from directory. */
static size_t exif_convert_any_to_int(void *value, int format, int motorola_intel TSRMLS_DC)
{
int s_den;
unsigned u_den;
switch(format) {
case TAG_FMT_SBYTE: return *(signed char *)value;
case TAG_FMT_BYTE: return *(uchar *)value;
case TAG_FMT_USHORT: return php_ifd_get16u(value, motorola_intel);
case TAG_FMT_ULONG: return php_ifd_get32u(value, motorola_intel);
case TAG_FMT_URATIONAL:
u_den = php_ifd_get32u(4+(char *)value, motorola_intel);
if (u_den == 0) {
return 0;
} else {
return php_ifd_get32u(value, motorola_intel) / u_den;
}
case TAG_FMT_SRATIONAL:
s_den = php_ifd_get32s(4+(char *)value, motorola_intel);
if (s_den == 0) {
return 0;
} else {
return php_ifd_get32s(value, motorola_intel) / s_den;
}
case TAG_FMT_SSHORT: return php_ifd_get16u(value, motorola_intel);
case TAG_FMT_SLONG: return php_ifd_get32s(value, motorola_intel);
/* Not sure if this is correct (never seen float used in Exif format) */
case TAG_FMT_SINGLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type single");
#endif
return (size_t)*(float *)value;
case TAG_FMT_DOUBLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type double");
#endif
return (size_t)*(double *)value;
}
return 0;
}
/* }}} */
/* {{{ struct image_info_value, image_info_list
*/
#ifndef WORD
#define WORD unsigned short
#endif
#ifndef DWORD
#define DWORD unsigned int
#endif
typedef struct {
int num;
int den;
} signed_rational;
typedef struct {
unsigned int num;
unsigned int den;
} unsigned_rational;
typedef union _image_info_value {
char *s;
unsigned u;
int i;
float f;
double d;
signed_rational sr;
unsigned_rational ur;
union _image_info_value *list;
} image_info_value;
typedef struct {
WORD tag;
WORD format;
DWORD length;
DWORD dummy; /* value ptr of tiff directory entry */
char *name;
image_info_value value;
} image_info_data;
typedef struct {
int count;
image_info_data *list;
} image_info_list;
/* }}} */
/* {{{ exif_get_sectionname
Returns the name of a section
*/
#define SECTION_FILE 0
#define SECTION_COMPUTED 1
#define SECTION_ANY_TAG 2
#define SECTION_IFD0 3
#define SECTION_THUMBNAIL 4
#define SECTION_COMMENT 5
#define SECTION_APP0 6
#define SECTION_EXIF 7
#define SECTION_FPIX 8
#define SECTION_GPS 9
#define SECTION_INTEROP 10
#define SECTION_APP12 11
#define SECTION_WINXP 12
#define SECTION_MAKERNOTE 13
#define SECTION_COUNT 14
#define FOUND_FILE (1<<SECTION_FILE)
#define FOUND_COMPUTED (1<<SECTION_COMPUTED)
#define FOUND_ANY_TAG (1<<SECTION_ANY_TAG)
#define FOUND_IFD0 (1<<SECTION_IFD0)
#define FOUND_THUMBNAIL (1<<SECTION_THUMBNAIL)
#define FOUND_COMMENT (1<<SECTION_COMMENT)
#define FOUND_APP0 (1<<SECTION_APP0)
#define FOUND_EXIF (1<<SECTION_EXIF)
#define FOUND_FPIX (1<<SECTION_FPIX)
#define FOUND_GPS (1<<SECTION_GPS)
#define FOUND_INTEROP (1<<SECTION_INTEROP)
#define FOUND_APP12 (1<<SECTION_APP12)
#define FOUND_WINXP (1<<SECTION_WINXP)
#define FOUND_MAKERNOTE (1<<SECTION_MAKERNOTE)
static char *exif_get_sectionname(int section)
{
switch(section) {
case SECTION_FILE: return "FILE";
case SECTION_COMPUTED: return "COMPUTED";
case SECTION_ANY_TAG: return "ANY_TAG";
case SECTION_IFD0: return "IFD0";
case SECTION_THUMBNAIL: return "THUMBNAIL";
case SECTION_COMMENT: return "COMMENT";
case SECTION_APP0: return "APP0";
case SECTION_EXIF: return "EXIF";
case SECTION_FPIX: return "FPIX";
case SECTION_GPS: return "GPS";
case SECTION_INTEROP: return "INTEROP";
case SECTION_APP12: return "APP12";
case SECTION_WINXP: return "WINXP";
case SECTION_MAKERNOTE: return "MAKERNOTE";
}
return "";
}
static tag_table_type exif_get_tag_table(int section)
{
switch(section) {
case SECTION_FILE: return &tag_table_IFD[0];
case SECTION_COMPUTED: return &tag_table_IFD[0];
case SECTION_ANY_TAG: return &tag_table_IFD[0];
case SECTION_IFD0: return &tag_table_IFD[0];
case SECTION_THUMBNAIL: return &tag_table_IFD[0];
case SECTION_COMMENT: return &tag_table_IFD[0];
case SECTION_APP0: return &tag_table_IFD[0];
case SECTION_EXIF: return &tag_table_IFD[0];
case SECTION_FPIX: return &tag_table_IFD[0];
case SECTION_GPS: return &tag_table_GPS[0];
case SECTION_INTEROP: return &tag_table_IOP[0];
case SECTION_APP12: return &tag_table_IFD[0];
case SECTION_WINXP: return &tag_table_IFD[0];
}
return &tag_table_IFD[0];
}
/* }}} */
/* {{{ exif_get_sectionlist
Return list of sectionnames specified by sectionlist. Return value must be freed
*/
static char *exif_get_sectionlist(int sectionlist TSRMLS_DC)
{
int i, len, ml = 0;
char *sections;
for(i=0; i<SECTION_COUNT; i++) {
ml += strlen(exif_get_sectionname(i))+2;
}
sections = safe_emalloc(ml, 1, 1);
sections[0] = '\0';
len = 0;
for(i=0; i<SECTION_COUNT; i++) {
if (sectionlist&(1<<i)) {
snprintf(sections+len, ml-len, "%s, ", exif_get_sectionname(i));
len = strlen(sections);
}
}
if (len>2)
sections[len-2] = '\0';
return sections;
}
/* }}} */
/* {{{ struct image_info_type
This structure stores Exif header image elements in a simple manner
Used to store camera data as extracted from the various ways that it can be
stored in a nexif header
*/
typedef struct {
int type;
size_t size;
uchar *data;
} file_section;
typedef struct {
int count;
file_section *list;
} file_section_list;
typedef struct {
image_filetype filetype;
size_t width, height;
size_t size;
size_t offset;
char *data;
} thumbnail_data;
typedef struct {
char *value;
size_t size;
int tag;
} xp_field_type;
typedef struct {
int count;
xp_field_type *list;
} xp_field_list;
/* This structure is used to store a section of a Jpeg file. */
typedef struct {
php_stream *infile;
char *FileName;
time_t FileDateTime;
size_t FileSize;
image_filetype FileType;
int Height, Width;
int IsColor;
char *make;
char *model;
float ApertureFNumber;
float ExposureTime;
double FocalplaneUnits;
float CCDWidth;
double FocalplaneXRes;
size_t ExifImageWidth;
float FocalLength;
float Distance;
int motorola_intel; /* 1 Motorola; 0 Intel */
char *UserComment;
int UserCommentLength;
char *UserCommentEncoding;
char *encode_unicode;
char *decode_unicode_be;
char *decode_unicode_le;
char *encode_jis;
char *decode_jis_be;
char *decode_jis_le;
char *Copyright;/* EXIF standard defines Copyright as "<Photographer> [ '\0' <Editor> ] ['\0']" */
char *CopyrightPhotographer;
char *CopyrightEditor;
xp_field_list xp_fields;
thumbnail_data Thumbnail;
/* other */
int sections_found; /* FOUND_<marker> */
image_info_list info_list[SECTION_COUNT];
/* for parsing */
int read_thumbnail;
int read_all;
int ifd_nesting_level;
/* internal */
file_section_list file;
} image_info_type;
/* }}} */
/* {{{ exif_error_docref */
static void exif_error_docref(const char *docref EXIFERR_DC, const image_info_type *ImageInfo, int type, const char *format, ...)
{
va_list args;
va_start(args, format);
#ifdef EXIF_DEBUG
{
char *buf;
spprintf(&buf, 0, "%s(%d): %s", _file, _line, format);
php_verror(docref, ImageInfo->FileName?ImageInfo->FileName:"", type, buf, args TSRMLS_CC);
efree(buf);
}
#else
php_verror(docref, ImageInfo->FileName?ImageInfo->FileName:"", type, format, args TSRMLS_CC);
#endif
va_end(args);
}
/* }}} */
/* {{{ jpeg_sof_info
*/
typedef struct {
int bits_per_sample;
size_t width;
size_t height;
int num_components;
} jpeg_sof_info;
/* }}} */
/* {{{ exif_file_sections_add
Add a file_section to image_info
returns the used block or -1. if size>0 and data == NULL buffer of size is allocated
*/
static int exif_file_sections_add(image_info_type *ImageInfo, int type, size_t size, uchar *data)
{
file_section *tmp;
int count = ImageInfo->file.count;
tmp = safe_erealloc(ImageInfo->file.list, (count+1), sizeof(file_section), 0);
ImageInfo->file.list = tmp;
ImageInfo->file.list[count].type = 0xFFFF;
ImageInfo->file.list[count].data = NULL;
ImageInfo->file.list[count].size = 0;
ImageInfo->file.count = count+1;
if (!size) {
data = NULL;
} else if (data == NULL) {
data = safe_emalloc(size, 1, 0);
}
ImageInfo->file.list[count].type = type;
ImageInfo->file.list[count].data = data;
ImageInfo->file.list[count].size = size;
return count;
}
/* }}} */
/* {{{ exif_file_sections_realloc
Reallocate a file section returns 0 on success and -1 on failure
*/
static int exif_file_sections_realloc(image_info_type *ImageInfo, int section_index, size_t size TSRMLS_DC)
{
void *tmp;
/* This is not a malloc/realloc check. It is a plausibility check for the
* function parameters (requirements engineering).
*/
if (section_index >= ImageInfo->file.count) {
EXIF_ERRLOG_FSREALLOC(ImageInfo)
return -1;
}
tmp = safe_erealloc(ImageInfo->file.list[section_index].data, 1, size, 0);
ImageInfo->file.list[section_index].data = tmp;
ImageInfo->file.list[section_index].size = size;
return 0;
}
/* }}} */
/* {{{ exif_file_section_free
Discard all file_sections in ImageInfo
*/
static int exif_file_sections_free(image_info_type *ImageInfo)
{
int i;
if (ImageInfo->file.count) {
for (i=0; i<ImageInfo->file.count; i++) {
EFREE_IF(ImageInfo->file.list[i].data);
}
}
EFREE_IF(ImageInfo->file.list);
ImageInfo->file.count = 0;
return TRUE;
}
/* }}} */
/* {{{ exif_iif_add_value
Add a value to image_info
*/
static void exif_iif_add_value(image_info_type *image_info, int section_index, char *name, int tag, int format, int length, void* value, int motorola_intel TSRMLS_DC)
{
size_t idex;
void *vptr;
image_info_value *info_value;
image_info_data *info_data;
image_info_data *list;
if (length < 0) {
return;
}
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
memset(info_data, 0, sizeof(image_info_data));
info_data->tag = tag;
info_data->format = format;
info_data->length = length;
info_data->name = estrdup(name);
info_value = &info_data->value;
switch (format) {
case TAG_FMT_STRING:
if (value) {
length = php_strnlen(value, length);
info_value->s = estrndup(value, length);
info_data->length = length;
} else {
info_data->length = 0;
info_value->s = estrdup("");
}
break;
default:
/* Standard says more types possible but skip them...
* but allow users to handle data if they know how to
* So not return but use type UNDEFINED
* return;
*/
info_data->tag = TAG_FMT_UNDEFINED;/* otherwise not freed from memory */
case TAG_FMT_SBYTE:
case TAG_FMT_BYTE:
/* in contrast to strings bytes do not need to allocate buffer for NULL if length==0 */
if (!length)
break;
case TAG_FMT_UNDEFINED:
if (value) {
if (tag == TAG_MAKER_NOTE) {
length = MIN(length, strlen(value));
}
/* do not recompute length here */
info_value->s = estrndup(value, length);
info_data->length = length;
} else {
info_data->length = 0;
info_value->s = estrdup("");
}
break;
case TAG_FMT_USHORT:
case TAG_FMT_ULONG:
case TAG_FMT_URATIONAL:
case TAG_FMT_SSHORT:
case TAG_FMT_SLONG:
case TAG_FMT_SRATIONAL:
case TAG_FMT_SINGLE:
case TAG_FMT_DOUBLE:
if (length==0) {
break;
} else
if (length>1) {
info_value->list = safe_emalloc(length, sizeof(image_info_value), 0);
} else {
info_value = &info_data->value;
}
for (idex=0,vptr=value; idex<(size_t)length; idex++,vptr=(char *) vptr + php_tiff_bytes_per_format[format]) {
if (length>1) {
info_value = &info_data->value.list[idex];
}
switch (format) {
case TAG_FMT_USHORT:
info_value->u = php_ifd_get16u(vptr, motorola_intel);
break;
case TAG_FMT_ULONG:
info_value->u = php_ifd_get32u(vptr, motorola_intel);
break;
case TAG_FMT_URATIONAL:
info_value->ur.num = php_ifd_get32u(vptr, motorola_intel);
info_value->ur.den = php_ifd_get32u(4+(char *)vptr, motorola_intel);
break;
case TAG_FMT_SSHORT:
info_value->i = php_ifd_get16s(vptr, motorola_intel);
break;
case TAG_FMT_SLONG:
info_value->i = php_ifd_get32s(vptr, motorola_intel);
break;
case TAG_FMT_SRATIONAL:
info_value->sr.num = php_ifd_get32u(vptr, motorola_intel);
info_value->sr.den = php_ifd_get32u(4+(char *)vptr, motorola_intel);
break;
case TAG_FMT_SINGLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Found value of type single");
#endif
info_value->f = *(float *)value;
case TAG_FMT_DOUBLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Found value of type double");
#endif
info_value->d = *(double *)value;
break;
}
}
}
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
/* }}} */
/* {{{ exif_iif_add_tag
Add a tag from IFD to image_info
*/
static void exif_iif_add_tag(image_info_type *image_info, int section_index, char *name, int tag, int format, size_t length, void* value TSRMLS_DC)
{
exif_iif_add_value(image_info, section_index, name, tag, format, (int)length, value, image_info->motorola_intel TSRMLS_CC);
}
/* }}} */
/* {{{ exif_iif_add_int
Add an int value to image_info
*/
static void exif_iif_add_int(image_info_type *image_info, int section_index, char *name, int value TSRMLS_DC)
{
image_info_data *info_data;
image_info_data *list;
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
info_data->tag = TAG_NONE;
info_data->format = TAG_FMT_SLONG;
info_data->length = 1;
info_data->name = estrdup(name);
info_data->value.i = value;
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
/* }}} */
/* {{{ exif_iif_add_str
Add a string value to image_info MUST BE NUL TERMINATED
*/
static void exif_iif_add_str(image_info_type *image_info, int section_index, char *name, char *value TSRMLS_DC)
{
image_info_data *info_data;
image_info_data *list;
if (value) {
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
info_data->tag = TAG_NONE;
info_data->format = TAG_FMT_STRING;
info_data->length = 1;
info_data->name = estrdup(name);
info_data->value.s = estrdup(value);
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
}
/* }}} */
/* {{{ exif_iif_add_fmt
Add a format string value to image_info MUST BE NUL TERMINATED
*/
static void exif_iif_add_fmt(image_info_type *image_info, int section_index, char *name TSRMLS_DC, char *value, ...)
{
char *tmp;
va_list arglist;
va_start(arglist, value);
if (value) {
vspprintf(&tmp, 0, value, arglist);
exif_iif_add_str(image_info, section_index, name, tmp TSRMLS_CC);
efree(tmp);
}
va_end(arglist);
}
/* }}} */
/* {{{ exif_iif_add_str
Add a string value to image_info MUST BE NUL TERMINATED
*/
static void exif_iif_add_buffer(image_info_type *image_info, int section_index, char *name, int length, char *value TSRMLS_DC)
{
image_info_data *info_data;
image_info_data *list;
if (value) {
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
info_data->tag = TAG_NONE;
info_data->format = TAG_FMT_UNDEFINED;
info_data->length = length;
info_data->name = estrdup(name);
info_data->value.s = safe_emalloc(length, 1, 1);
memcpy(info_data->value.s, value, length);
info_data->value.s[length] = 0;
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
}
/* }}} */
/* {{{ exif_iif_free
Free memory allocated for image_info
*/
static void exif_iif_free(image_info_type *image_info, int section_index) {
int i;
void *f; /* faster */
if (image_info->info_list[section_index].count) {
for (i=0; i < image_info->info_list[section_index].count; i++) {
if ((f=image_info->info_list[section_index].list[i].name) != NULL) {
efree(f);
}
switch(image_info->info_list[section_index].list[i].format) {
case TAG_FMT_SBYTE:
case TAG_FMT_BYTE:
/* in contrast to strings bytes do not need to allocate buffer for NULL if length==0 */
if (image_info->info_list[section_index].list[i].length<1)
break;
default:
case TAG_FMT_UNDEFINED:
case TAG_FMT_STRING:
if ((f=image_info->info_list[section_index].list[i].value.s) != NULL) {
efree(f);
}
break;
case TAG_FMT_USHORT:
case TAG_FMT_ULONG:
case TAG_FMT_URATIONAL:
case TAG_FMT_SSHORT:
case TAG_FMT_SLONG:
case TAG_FMT_SRATIONAL:
case TAG_FMT_SINGLE:
case TAG_FMT_DOUBLE:
/* nothing to do here */
if (image_info->info_list[section_index].list[i].length > 1) {
if ((f=image_info->info_list[section_index].list[i].value.list) != NULL) {
efree(f);
}
}
break;
}
}
}
EFREE_IF(image_info->info_list[section_index].list);
}
/* }}} */
/* {{{ add_assoc_image_info
* Add image_info to associative array value. */
static void add_assoc_image_info(zval *value, int sub_array, image_info_type *image_info, int section_index TSRMLS_DC)
{
char buffer[64], *val, *name, uname[64];
int i, ap, l, b, idx=0, unknown=0;
#ifdef EXIF_DEBUG
int info_tag;
#endif
image_info_value *info_value;
image_info_data *info_data;
zval *tmpi, *array = NULL;
#ifdef EXIF_DEBUG
/* php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Adding %d infos from section %s", image_info->info_list[section_index].count, exif_get_sectionname(section_index));*/
#endif
if (image_info->info_list[section_index].count) {
if (sub_array) {
MAKE_STD_ZVAL(tmpi);
array_init(tmpi);
} else {
tmpi = value;
}
for(i=0; i<image_info->info_list[section_index].count; i++) {
info_data = &image_info->info_list[section_index].list[i];
#ifdef EXIF_DEBUG
info_tag = info_data->tag; /* conversion */
#endif
info_value = &info_data->value;
if (!(name = info_data->name)) {
snprintf(uname, sizeof(uname), "%d", unknown++);
name = uname;
}
#ifdef EXIF_DEBUG
/* php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Adding infos: tag(0x%04X,%12s,L=0x%04X): %s", info_tag, exif_get_tagname(info_tag, buffer, -12, exif_get_tag_table(section_index) TSRMLS_CC), info_data->length, info_data->format==TAG_FMT_STRING?(info_value&&info_value->s?info_value->s:"<no data>"):exif_get_tagformat(info_data->format));*/
#endif
if (info_data->length==0) {
add_assoc_null(tmpi, name);
} else {
switch (info_data->format) {
default:
/* Standard says more types possible but skip them...
* but allow users to handle data if they know how to
* So not return but use type UNDEFINED
* return;
*/
case TAG_FMT_BYTE:
case TAG_FMT_SBYTE:
case TAG_FMT_UNDEFINED:
if (!info_value->s) {
add_assoc_stringl(tmpi, name, "", 0, 1);
} else {
add_assoc_stringl(tmpi, name, info_value->s, info_data->length, 1);
}
break;
case TAG_FMT_STRING:
if (!(val = info_value->s)) {
val = "";
}
if (section_index==SECTION_COMMENT) {
add_index_string(tmpi, idx++, val, 1);
} else {
add_assoc_string(tmpi, name, val, 1);
}
break;
case TAG_FMT_URATIONAL:
case TAG_FMT_SRATIONAL:
/*case TAG_FMT_BYTE:
case TAG_FMT_SBYTE:*/
case TAG_FMT_USHORT:
case TAG_FMT_SSHORT:
case TAG_FMT_SINGLE:
case TAG_FMT_DOUBLE:
case TAG_FMT_ULONG:
case TAG_FMT_SLONG:
/* now the rest, first see if it becomes an array */
if ((l = info_data->length) > 1) {
array = NULL;
MAKE_STD_ZVAL(array);
array_init(array);
}
for(ap=0; ap<l; ap++) {
if (l>1) {
info_value = &info_data->value.list[ap];
}
switch (info_data->format) {
case TAG_FMT_BYTE:
if (l>1) {
info_value = &info_data->value;
for (b=0;b<l;b++) {
add_index_long(array, b, (int)(info_value->s[b]));
}
break;
}
case TAG_FMT_USHORT:
case TAG_FMT_ULONG:
if (l==1) {
add_assoc_long(tmpi, name, (int)info_value->u);
} else {
add_index_long(array, ap, (int)info_value->u);
}
break;
case TAG_FMT_URATIONAL:
snprintf(buffer, sizeof(buffer), "%i/%i", info_value->ur.num, info_value->ur.den);
if (l==1) {
add_assoc_string(tmpi, name, buffer, 1);
} else {
add_index_string(array, ap, buffer, 1);
}
break;
case TAG_FMT_SBYTE:
if (l>1) {
info_value = &info_data->value;
for (b=0;b<l;b++) {
add_index_long(array, ap, (int)info_value->s[b]);
}
break;
}
case TAG_FMT_SSHORT:
case TAG_FMT_SLONG:
if (l==1) {
add_assoc_long(tmpi, name, info_value->i);
} else {
add_index_long(array, ap, info_value->i);
}
break;
case TAG_FMT_SRATIONAL:
snprintf(buffer, sizeof(buffer), "%i/%i", info_value->sr.num, info_value->sr.den);
if (l==1) {
add_assoc_string(tmpi, name, buffer, 1);
} else {
add_index_string(array, ap, buffer, 1);
}
break;
case TAG_FMT_SINGLE:
if (l==1) {
add_assoc_double(tmpi, name, info_value->f);
} else {
add_index_double(array, ap, info_value->f);
}
break;
case TAG_FMT_DOUBLE:
if (l==1) {
add_assoc_double(tmpi, name, info_value->d);
} else {
add_index_double(array, ap, info_value->d);
}
break;
}
info_value = &info_data->value.list[ap];
}
if (l>1) {
add_assoc_zval(tmpi, name, array);
}
break;
}
}
}
if (sub_array) {
add_assoc_zval(value, exif_get_sectionname(section_index), tmpi);
}
}
}
/* }}} */
/* {{{ Markers
JPEG markers consist of one or more 0xFF bytes, followed by a marker
code byte (which is not an FF). Here are the marker codes of interest
in this program. (See jdmarker.c for a more complete list.)
*/
#define M_TEM 0x01 /* temp for arithmetic coding */
#define M_RES 0x02 /* reserved */
#define M_SOF0 0xC0 /* Start Of Frame N */
#define M_SOF1 0xC1 /* N indicates which compression process */
#define M_SOF2 0xC2 /* Only SOF0-SOF2 are now in common use */
#define M_SOF3 0xC3
#define M_DHT 0xC4
#define M_SOF5 0xC5 /* NB: codes C4 and CC are NOT SOF markers */
#define M_SOF6 0xC6
#define M_SOF7 0xC7
#define M_JPEG 0x08 /* reserved for extensions */
#define M_SOF9 0xC9
#define M_SOF10 0xCA
#define M_SOF11 0xCB
#define M_DAC 0xCC /* arithmetic table */
#define M_SOF13 0xCD
#define M_SOF14 0xCE
#define M_SOF15 0xCF
#define M_RST0 0xD0 /* restart segment */
#define M_RST1 0xD1
#define M_RST2 0xD2
#define M_RST3 0xD3
#define M_RST4 0xD4
#define M_RST5 0xD5
#define M_RST6 0xD6
#define M_RST7 0xD7
#define M_SOI 0xD8 /* Start Of Image (beginning of datastream) */
#define M_EOI 0xD9 /* End Of Image (end of datastream) */
#define M_SOS 0xDA /* Start Of Scan (begins compressed data) */
#define M_DQT 0xDB
#define M_DNL 0xDC
#define M_DRI 0xDD
#define M_DHP 0xDE
#define M_EXP 0xDF
#define M_APP0 0xE0 /* JPEG: 'JFIFF' AND (additional 'JFXX') */
#define M_EXIF 0xE1 /* Exif Attribute Information */
#define M_APP2 0xE2 /* Flash Pix Extension Data? */
#define M_APP3 0xE3
#define M_APP4 0xE4
#define M_APP5 0xE5
#define M_APP6 0xE6
#define M_APP7 0xE7
#define M_APP8 0xE8
#define M_APP9 0xE9
#define M_APP10 0xEA
#define M_APP11 0xEB
#define M_APP12 0xEC
#define M_APP13 0xED /* IPTC International Press Telecommunications Council */
#define M_APP14 0xEE /* Software, Copyright? */
#define M_APP15 0xEF
#define M_JPG0 0xF0
#define M_JPG1 0xF1
#define M_JPG2 0xF2
#define M_JPG3 0xF3
#define M_JPG4 0xF4
#define M_JPG5 0xF5
#define M_JPG6 0xF6
#define M_JPG7 0xF7
#define M_JPG8 0xF8
#define M_JPG9 0xF9
#define M_JPG10 0xFA
#define M_JPG11 0xFB
#define M_JPG12 0xFC
#define M_JPG13 0xFD
#define M_COM 0xFE /* COMment */
#define M_PSEUDO 0x123 /* Extra value. */
/* }}} */
/* {{{ jpeg2000 markers
*/
/* Markers x30 - x3F do not have a segment */
/* Markers x00, x01, xFE, xC0 - xDF ISO/IEC 10918-1 -> M_<xx> */
/* Markers xF0 - xF7 ISO/IEC 10918-3 */
/* Markers xF7 - xF8 ISO/IEC 14495-1 */
/* XY=Main/Tile-header:(R:required, N:not_allowed, O:optional, L:last_marker) */
#define JC_SOC 0x4F /* NN, Start of codestream */
#define JC_SIZ 0x51 /* RN, Image and tile size */
#define JC_COD 0x52 /* RO, Codeing style defaulte */
#define JC_COC 0x53 /* OO, Coding style component */
#define JC_TLM 0x55 /* ON, Tile part length main header */
#define JC_PLM 0x57 /* ON, Packet length main header */
#define JC_PLT 0x58 /* NO, Packet length tile part header */
#define JC_QCD 0x5C /* RO, Quantization default */
#define JC_QCC 0x5D /* OO, Quantization component */
#define JC_RGN 0x5E /* OO, Region of interest */
#define JC_POD 0x5F /* OO, Progression order default */
#define JC_PPM 0x60 /* ON, Packed packet headers main header */
#define JC_PPT 0x61 /* NO, Packet packet headers tile part header */
#define JC_CME 0x64 /* OO, Comment: "LL E <text>" E=0:binary, E=1:ascii */
#define JC_SOT 0x90 /* NR, Start of tile */
#define JC_SOP 0x91 /* NO, Start of packeter default */
#define JC_EPH 0x92 /* NO, End of packet header */
#define JC_SOD 0x93 /* NL, Start of data */
#define JC_EOC 0xD9 /* NN, End of codestream */
/* }}} */
/* {{{ exif_process_COM
Process a COM marker.
We want to print out the marker contents as legible text;
we must guard against random junk and varying newline representations.
*/
static void exif_process_COM (image_info_type *image_info, char *value, size_t length TSRMLS_DC)
{
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_STRING, length-2, value+2 TSRMLS_CC);
}
/* }}} */
/* {{{ exif_process_CME
Process a CME marker.
We want to print out the marker contents as legible text;
we must guard against random junk and varying newline representations.
*/
#ifdef EXIF_JPEG2000
static void exif_process_CME (image_info_type *image_info, char *value, size_t length TSRMLS_DC)
{
if (length>3) {
switch(value[2]) {
case 0:
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_UNDEFINED, length, value TSRMLS_CC);
break;
case 1:
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_STRING, length, value);
break;
default:
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Undefined JPEG2000 comment encoding");
break;
}
} else {
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_UNDEFINED, 0, NULL);
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "JPEG2000 comment section too small");
}
}
#endif
/* }}} */
/* {{{ exif_process_SOFn
* Process a SOFn marker. This is useful for the image dimensions */
static void exif_process_SOFn (uchar *Data, int marker, jpeg_sof_info *result)
{
/* 0xFF SOSn SectLen(2) Bits(1) Height(2) Width(2) Channels(1) 3*Channels (1) */
result->bits_per_sample = Data[2];
result->height = php_jpg_get16(Data+3);
result->width = php_jpg_get16(Data+5);
result->num_components = Data[7];
/* switch (marker) {
case M_SOF0: process = "Baseline"; break;
case M_SOF1: process = "Extended sequential"; break;
case M_SOF2: process = "Progressive"; break;
case M_SOF3: process = "Lossless"; break;
case M_SOF5: process = "Differential sequential"; break;
case M_SOF6: process = "Differential progressive"; break;
case M_SOF7: process = "Differential lossless"; break;
case M_SOF9: process = "Extended sequential, arithmetic coding"; break;
case M_SOF10: process = "Progressive, arithmetic coding"; break;
case M_SOF11: process = "Lossless, arithmetic coding"; break;
case M_SOF13: process = "Differential sequential, arithmetic coding"; break;
case M_SOF14: process = "Differential progressive, arithmetic coding"; break;
case M_SOF15: process = "Differential lossless, arithmetic coding"; break;
default: process = "Unknown"; break;
}*/
}
/* }}} */
/* forward declarations */
static int exif_process_IFD_in_JPEG(image_info_type *ImageInfo, char *dir_start, char *offset_base, size_t IFDlength, size_t displacement, int section_index TSRMLS_DC);
static int exif_process_IFD_TAG( image_info_type *ImageInfo, char *dir_entry, char *offset_base, size_t IFDlength, size_t displacement, int section_index, int ReadNextIFD, tag_table_type tag_table TSRMLS_DC);
/* {{{ exif_get_markername
Get name of marker */
#ifdef EXIF_DEBUG
static char * exif_get_markername(int marker)
{
switch(marker) {
case 0xC0: return "SOF0";
case 0xC1: return "SOF1";
case 0xC2: return "SOF2";
case 0xC3: return "SOF3";
case 0xC4: return "DHT";
case 0xC5: return "SOF5";
case 0xC6: return "SOF6";
case 0xC7: return "SOF7";
case 0xC9: return "SOF9";
case 0xCA: return "SOF10";
case 0xCB: return "SOF11";
case 0xCD: return "SOF13";
case 0xCE: return "SOF14";
case 0xCF: return "SOF15";
case 0xD8: return "SOI";
case 0xD9: return "EOI";
case 0xDA: return "SOS";
case 0xDB: return "DQT";
case 0xDC: return "DNL";
case 0xDD: return "DRI";
case 0xDE: return "DHP";
case 0xDF: return "EXP";
case 0xE0: return "APP0";
case 0xE1: return "EXIF";
case 0xE2: return "FPIX";
case 0xE3: return "APP3";
case 0xE4: return "APP4";
case 0xE5: return "APP5";
case 0xE6: return "APP6";
case 0xE7: return "APP7";
case 0xE8: return "APP8";
case 0xE9: return "APP9";
case 0xEA: return "APP10";
case 0xEB: return "APP11";
case 0xEC: return "APP12";
case 0xED: return "APP13";
case 0xEE: return "APP14";
case 0xEF: return "APP15";
case 0xF0: return "JPG0";
case 0xFD: return "JPG13";
case 0xFE: return "COM";
case 0x01: return "TEM";
}
return "Unknown";
}
#endif
/* }}} */
/* {{{ proto string exif_tagname(index)
Get headername for index or false if not defined */
PHP_FUNCTION(exif_tagname)
{
long tag;
char *szTemp;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &tag) == FAILURE) {
return;
}
szTemp = exif_get_tagname(tag, NULL, 0, tag_table_IFD TSRMLS_CC);
if (tag < 0 || !szTemp || !szTemp[0]) {
RETURN_FALSE;
}
RETURN_STRING(szTemp, 1)
}
/* }}} */
/* {{{ exif_ifd_make_value
* Create a value for an ifd from an info_data pointer */
static void* exif_ifd_make_value(image_info_data *info_data, int motorola_intel TSRMLS_DC) {
size_t byte_count;
char *value_ptr, *data_ptr;
size_t i;
image_info_value *info_value;
byte_count = php_tiff_bytes_per_format[info_data->format] * info_data->length;
value_ptr = safe_emalloc(max(byte_count, 4), 1, 0);
memset(value_ptr, 0, 4);
if (!info_data->length) {
return value_ptr;
}
if (info_data->format == TAG_FMT_UNDEFINED || info_data->format == TAG_FMT_STRING
|| (byte_count>1 && (info_data->format == TAG_FMT_BYTE || info_data->format == TAG_FMT_SBYTE))
) {
memmove(value_ptr, info_data->value.s, byte_count);
return value_ptr;
} else if (info_data->format == TAG_FMT_BYTE) {
*value_ptr = info_data->value.u;
return value_ptr;
} else if (info_data->format == TAG_FMT_SBYTE) {
*value_ptr = info_data->value.i;
return value_ptr;
} else {
data_ptr = value_ptr;
for(i=0; i<info_data->length; i++) {
if (info_data->length==1) {
info_value = &info_data->value;
} else {
info_value = &info_data->value.list[i];
}
switch(info_data->format) {
case TAG_FMT_USHORT:
php_ifd_set16u(data_ptr, info_value->u, motorola_intel);
data_ptr += 2;
break;
case TAG_FMT_ULONG:
php_ifd_set32u(data_ptr, info_value->u, motorola_intel);
data_ptr += 4;
break;
case TAG_FMT_SSHORT:
php_ifd_set16u(data_ptr, info_value->i, motorola_intel);
data_ptr += 2;
break;
case TAG_FMT_SLONG:
php_ifd_set32u(data_ptr, info_value->i, motorola_intel);
data_ptr += 4;
break;
case TAG_FMT_URATIONAL:
php_ifd_set32u(data_ptr, info_value->sr.num, motorola_intel);
php_ifd_set32u(data_ptr+4, info_value->sr.den, motorola_intel);
data_ptr += 8;
break;
case TAG_FMT_SRATIONAL:
php_ifd_set32u(data_ptr, info_value->ur.num, motorola_intel);
php_ifd_set32u(data_ptr+4, info_value->ur.den, motorola_intel);
data_ptr += 8;
break;
case TAG_FMT_SINGLE:
memmove(data_ptr, &info_value->f, 4);
data_ptr += 4;
break;
case TAG_FMT_DOUBLE:
memmove(data_ptr, &info_value->d, 8);
data_ptr += 8;
break;
}
}
}
return value_ptr;
}
/* }}} */
/* {{{ exif_thumbnail_build
* Check and build thumbnail */
static void exif_thumbnail_build(image_info_type *ImageInfo TSRMLS_DC) {
size_t new_size, new_move, new_value;
char *new_data;
void *value_ptr;
int i, byte_count;
image_info_list *info_list;
image_info_data *info_data;
#ifdef EXIF_DEBUG
char tagname[64];
#endif
if (!ImageInfo->read_thumbnail || !ImageInfo->Thumbnail.offset || !ImageInfo->Thumbnail.size) {
return; /* ignore this call */
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: filetype = %d", ImageInfo->Thumbnail.filetype);
#endif
switch(ImageInfo->Thumbnail.filetype) {
default:
case IMAGE_FILETYPE_JPEG:
/* done */
break;
case IMAGE_FILETYPE_TIFF_II:
case IMAGE_FILETYPE_TIFF_MM:
info_list = &ImageInfo->info_list[SECTION_THUMBNAIL];
new_size = 8 + 2 + info_list->count * 12 + 4;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: size of signature + directory(%d): 0x%02X", info_list->count, new_size);
#endif
new_value= new_size; /* offset for ifd values outside ifd directory */
for (i=0; i<info_list->count; i++) {
info_data = &info_list->list[i];
byte_count = php_tiff_bytes_per_format[info_data->format] * info_data->length;
if (byte_count > 4) {
new_size += byte_count;
}
}
new_move = new_size;
new_data = safe_erealloc(ImageInfo->Thumbnail.data, 1, ImageInfo->Thumbnail.size, new_size);
ImageInfo->Thumbnail.data = new_data;
memmove(ImageInfo->Thumbnail.data + new_move, ImageInfo->Thumbnail.data, ImageInfo->Thumbnail.size);
ImageInfo->Thumbnail.size += new_size;
/* fill in data */
if (ImageInfo->motorola_intel) {
memmove(new_data, "MM\x00\x2a\x00\x00\x00\x08", 8);
} else {
memmove(new_data, "II\x2a\x00\x08\x00\x00\x00", 8);
}
new_data += 8;
php_ifd_set16u(new_data, info_list->count, ImageInfo->motorola_intel);
new_data += 2;
for (i=0; i<info_list->count; i++) {
info_data = &info_list->list[i];
byte_count = php_tiff_bytes_per_format[info_data->format] * info_data->length;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: process tag(x%04X=%s): %s%s (%d bytes)", info_data->tag, exif_get_tagname(info_data->tag, tagname, -12, tag_table_IFD TSRMLS_CC), (info_data->length>1)&&info_data->format!=TAG_FMT_UNDEFINED&&info_data->format!=TAG_FMT_STRING?"ARRAY OF ":"", exif_get_tagformat(info_data->format), byte_count);
#endif
if (info_data->tag==TAG_STRIP_OFFSETS || info_data->tag==TAG_JPEG_INTERCHANGE_FORMAT) {
php_ifd_set16u(new_data + 0, info_data->tag, ImageInfo->motorola_intel);
php_ifd_set16u(new_data + 2, TAG_FMT_ULONG, ImageInfo->motorola_intel);
php_ifd_set32u(new_data + 4, 1, ImageInfo->motorola_intel);
php_ifd_set32u(new_data + 8, new_move, ImageInfo->motorola_intel);
} else {
php_ifd_set16u(new_data + 0, info_data->tag, ImageInfo->motorola_intel);
php_ifd_set16u(new_data + 2, info_data->format, ImageInfo->motorola_intel);
php_ifd_set32u(new_data + 4, info_data->length, ImageInfo->motorola_intel);
value_ptr = exif_ifd_make_value(info_data, ImageInfo->motorola_intel TSRMLS_CC);
if (byte_count <= 4) {
memmove(new_data+8, value_ptr, 4);
} else {
php_ifd_set32u(new_data+8, new_value, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: writing with value offset: 0x%04X + 0x%02X", new_value, byte_count);
#endif
memmove(ImageInfo->Thumbnail.data+new_value, value_ptr, byte_count);
new_value += byte_count;
}
efree(value_ptr);
}
new_data += 12;
}
memset(new_data, 0, 4); /* next ifd pointer */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: created");
#endif
break;
}
}
/* }}} */
/* {{{ exif_thumbnail_extract
* Grab the thumbnail, corrected */
static void exif_thumbnail_extract(image_info_type *ImageInfo, char *offset, size_t length TSRMLS_DC) {
if (ImageInfo->Thumbnail.data) {
exif_error_docref("exif_read_data#error_mult_thumb" EXIFERR_CC, ImageInfo, E_WARNING, "Multiple possible thumbnails");
return; /* Should not happen */
}
if (!ImageInfo->read_thumbnail) {
return; /* ignore this call */
}
/* according to exif2.1, the thumbnail is not supposed to be greater than 64K */
if (ImageInfo->Thumbnail.size >= 65536
|| ImageInfo->Thumbnail.size <= 0
|| ImageInfo->Thumbnail.offset <= 0
) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Illegal thumbnail size/offset");
return;
}
/* Check to make sure we are not going to go past the ExifLength */
if ((ImageInfo->Thumbnail.offset + ImageInfo->Thumbnail.size) > length) {
EXIF_ERRLOG_THUMBEOF(ImageInfo)
return;
}
ImageInfo->Thumbnail.data = estrndup(offset + ImageInfo->Thumbnail.offset, ImageInfo->Thumbnail.size);
exif_thumbnail_build(ImageInfo TSRMLS_CC);
}
/* }}} */
/* {{{ exif_process_undefined
* Copy a string/buffer in Exif header to a character string and return length of allocated buffer if any. */
static int exif_process_undefined(char **result, char *value, size_t byte_count TSRMLS_DC) {
/* we cannot use strlcpy - here the problem is that we have to copy NUL
* chars up to byte_count, we also have to add a single NUL character to
* force end of string.
* estrndup does not return length
*/
if (byte_count) {
(*result) = estrndup(value, byte_count); /* NULL @ byte_count!!! */
return byte_count+1;
}
return 0;
}
/* }}} */
/* {{{ exif_process_string_raw
* Copy a string in Exif header to a character string returns length of allocated buffer if any. */
static int exif_process_string_raw(char **result, char *value, size_t byte_count) {
/* we cannot use strlcpy - here the problem is that we have to copy NUL
* chars up to byte_count, we also have to add a single NUL character to
* force end of string.
*/
if (byte_count) {
(*result) = safe_emalloc(byte_count, 1, 1);
memcpy(*result, value, byte_count);
(*result)[byte_count] = '\0';
return byte_count+1;
}
return 0;
}
/* }}} */
/* {{{ exif_process_string
* Copy a string in Exif header to a character string and return length of allocated buffer if any.
* In contrast to exif_process_string this function does always return a string buffer */
static int exif_process_string(char **result, char *value, size_t byte_count TSRMLS_DC) {
/* we cannot use strlcpy - here the problem is that we cannot use strlen to
* determin length of string and we cannot use strlcpy with len=byte_count+1
* because then we might get into an EXCEPTION if we exceed an allocated
* memory page...so we use php_strnlen in conjunction with memcpy and add the NUL
* char.
* estrdup would sometimes allocate more memory and does not return length
*/
if ((byte_count=php_strnlen(value, byte_count)) > 0) {
return exif_process_undefined(result, value, byte_count TSRMLS_CC);
}
(*result) = estrndup("", 1); /* force empty string */
return byte_count+1;
}
/* }}} */
/* {{{ exif_process_user_comment
* Process UserComment in IFD. */
static int exif_process_user_comment(image_info_type *ImageInfo, char **pszInfoPtr, char **pszEncoding, char *szValuePtr, int ByteCount TSRMLS_DC)
{
int a;
char *decode;
size_t len;;
*pszEncoding = NULL;
/* Copy the comment */
if (ByteCount>=8) {
const zend_encoding *from, *to;
if (!memcmp(szValuePtr, "UNICODE\0", 8)) {
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
/* First try to detect BOM: ZERO WIDTH NOBREAK SPACE (FEFF 16)
* since we have no encoding support for the BOM yet we skip that.
*/
if (!memcmp(szValuePtr, "\xFE\xFF", 2)) {
decode = "UCS-2BE";
szValuePtr = szValuePtr+2;
ByteCount -= 2;
} else if (!memcmp(szValuePtr, "\xFF\xFE", 2)) {
decode = "UCS-2LE";
szValuePtr = szValuePtr+2;
ByteCount -= 2;
} else if (ImageInfo->motorola_intel) {
decode = ImageInfo->decode_unicode_be;
} else {
decode = ImageInfo->decode_unicode_le;
}
to = zend_multibyte_fetch_encoding(ImageInfo->encode_unicode TSRMLS_CC);
from = zend_multibyte_fetch_encoding(decode TSRMLS_CC);
/* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */
if (!to || !from || zend_multibyte_encoding_converter(
(unsigned char**)pszInfoPtr,
&len,
(unsigned char*)szValuePtr,
ByteCount,
to,
from
TSRMLS_CC) == (size_t)-1) {
len = exif_process_string_raw(pszInfoPtr, szValuePtr, ByteCount);
}
return len;
} else if (!memcmp(szValuePtr, "ASCII\0\0\0", 8)) {
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
} else if (!memcmp(szValuePtr, "JIS\0\0\0\0\0", 8)) {
/* JIS should be tanslated to MB or we leave it to the user - leave it to the user */
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
/* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */
to = zend_multibyte_fetch_encoding(ImageInfo->encode_jis TSRMLS_CC);
from = zend_multibyte_fetch_encoding(ImageInfo->motorola_intel ? ImageInfo->decode_jis_be : ImageInfo->decode_jis_le TSRMLS_CC);
if (!to || !from || zend_multibyte_encoding_converter(
(unsigned char**)pszInfoPtr,
&len,
(unsigned char*)szValuePtr,
ByteCount,
to,
from
TSRMLS_CC) == (size_t)-1) {
len = exif_process_string_raw(pszInfoPtr, szValuePtr, ByteCount);
}
return len;
} else if (!memcmp(szValuePtr, "\0\0\0\0\0\0\0\0", 8)) {
/* 8 NULL means undefined and should be ASCII... */
*pszEncoding = estrdup("UNDEFINED");
szValuePtr = szValuePtr+8;
ByteCount -= 8;
}
}
/* Olympus has this padded with trailing spaces. Remove these first. */
if (ByteCount>0) {
for (a=ByteCount-1;a && szValuePtr[a]==' ';a--) {
(szValuePtr)[a] = '\0';
}
}
/* normal text without encoding */
exif_process_string(pszInfoPtr, szValuePtr, ByteCount TSRMLS_CC);
return strlen(*pszInfoPtr);
}
/* }}} */
/* {{{ exif_process_unicode
* Process unicode field in IFD. */
static int exif_process_unicode(image_info_type *ImageInfo, xp_field_type *xp_field, int tag, char *szValuePtr, int ByteCount TSRMLS_DC)
{
xp_field->tag = tag;
xp_field->value = NULL;
/* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */
if (zend_multibyte_encoding_converter(
(unsigned char**)&xp_field->value,
&xp_field->size,
(unsigned char*)szValuePtr,
ByteCount,
zend_multibyte_fetch_encoding(ImageInfo->encode_unicode TSRMLS_CC),
zend_multibyte_fetch_encoding(ImageInfo->motorola_intel ? ImageInfo->decode_unicode_be : ImageInfo->decode_unicode_le TSRMLS_CC)
TSRMLS_CC) == (size_t)-1) {
xp_field->size = exif_process_string_raw(&xp_field->value, szValuePtr, ByteCount);
}
return xp_field->size;
}
/* }}} */
/* {{{ exif_process_IFD_in_MAKERNOTE
* Process nested IFDs directories in Maker Note. */
static int exif_process_IFD_in_MAKERNOTE(image_info_type *ImageInfo, char * value_ptr, int value_len, char *offset_base, size_t IFDlength, size_t displacement TSRMLS_DC)
{
int de, i=0, section_index = SECTION_MAKERNOTE;
int NumDirEntries, old_motorola_intel, offset_diff;
const maker_note_type *maker_note;
char *dir_start;
for (i=0; i<=sizeof(maker_note_array)/sizeof(maker_note_type); i++) {
if (i==sizeof(maker_note_array)/sizeof(maker_note_type)) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "No maker note data found. Detected maker: %s (length = %d)", ImageInfo->make, strlen(ImageInfo->make));
#endif
/* unknown manufacturer, not an error, use it as a string */
return TRUE;
}
maker_note = maker_note_array+i;
/*exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "check (%s,%s)", maker_note->make?maker_note->make:"", maker_note->model?maker_note->model:"");*/
if (maker_note->make && (!ImageInfo->make || strcmp(maker_note->make, ImageInfo->make)))
continue;
if (maker_note->model && (!ImageInfo->model || strcmp(maker_note->model, ImageInfo->model)))
continue;
if (maker_note->id_string && strncmp(maker_note->id_string, value_ptr, maker_note->id_string_len))
continue;
break;
}
if (maker_note->offset >= value_len) {
/* Do not go past the value end */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "IFD data too short: 0x%04X offset 0x%04X", value_len, maker_note->offset);
return FALSE;
}
dir_start = value_ptr + maker_note->offset;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process %s @x%04X + 0x%04X=%d: %s", exif_get_sectionname(section_index), (int)dir_start-(int)offset_base+maker_note->offset+displacement, value_len, value_len, exif_char_dump(value_ptr, value_len, (int)dir_start-(int)offset_base+maker_note->offset+displacement));
#endif
ImageInfo->sections_found |= FOUND_MAKERNOTE;
old_motorola_intel = ImageInfo->motorola_intel;
switch (maker_note->byte_order) {
case MN_ORDER_INTEL:
ImageInfo->motorola_intel = 0;
break;
case MN_ORDER_MOTOROLA:
ImageInfo->motorola_intel = 1;
break;
default:
case MN_ORDER_NORMAL:
break;
}
NumDirEntries = php_ifd_get16u(dir_start, ImageInfo->motorola_intel);
switch (maker_note->offset_mode) {
case MN_OFFSET_MAKER:
offset_base = value_ptr;
break;
case MN_OFFSET_GUESS:
if (maker_note->offset + 10 + 4 >= value_len) {
/* Can not read dir_start+10 since it's beyond value end */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "IFD data too short: 0x%04X", value_len);
return FALSE;
}
offset_diff = 2 + NumDirEntries*12 + 4 - php_ifd_get32u(dir_start+10, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Using automatic offset correction: 0x%04X", ((int)dir_start-(int)offset_base+maker_note->offset+displacement) + offset_diff);
#endif
if (offset_diff < 0 || offset_diff >= value_len ) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "IFD data bad offset: 0x%04X length 0x%04X", offset_diff, value_len);
return FALSE;
}
offset_base = value_ptr + offset_diff;
break;
default:
case MN_OFFSET_NORMAL:
break;
}
if ((2+NumDirEntries*12) > value_len) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size: 2 + 0x%04X*12 = 0x%04X > 0x%04X", NumDirEntries, 2+NumDirEntries*12, value_len);
return FALSE;
}
for (de=0;de<NumDirEntries;de++) {
if (!exif_process_IFD_TAG(ImageInfo, dir_start + 2 + 12 * de,
offset_base, IFDlength, displacement, section_index, 0, maker_note->tag_table TSRMLS_CC)) {
return FALSE;
}
}
ImageInfo->motorola_intel = old_motorola_intel;
/* NextDirOffset (must be NULL) = php_ifd_get32u(dir_start+2+12*de, ImageInfo->motorola_intel);*/
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Subsection %s done", exif_get_sectionname(SECTION_MAKERNOTE));
#endif
return TRUE;
}
/* }}} */
/* {{{ exif_process_IFD_TAG
* Process one of the nested IFDs directories. */
static int exif_process_IFD_TAG(image_info_type *ImageInfo, char *dir_entry, char *offset_base, size_t IFDlength, size_t displacement, int section_index, int ReadNextIFD, tag_table_type tag_table TSRMLS_DC)
{
size_t length;
int tag, format, components;
char *value_ptr, tagname[64], cbuf[32], *outside=NULL;
size_t byte_count, offset_val, fpos, fgot;
int64_t byte_count_signed;
xp_field_type *tmp_xp;
#ifdef EXIF_DEBUG
char *dump_data;
int dump_free;
#endif /* EXIF_DEBUG */
/* Protect against corrupt headers */
if (ImageInfo->ifd_nesting_level > MAX_IFD_NESTING_LEVEL) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "corrupt EXIF header: maximum directory nesting level reached");
return FALSE;
}
ImageInfo->ifd_nesting_level++;
tag = php_ifd_get16u(dir_entry, ImageInfo->motorola_intel);
format = php_ifd_get16u(dir_entry+2, ImageInfo->motorola_intel);
components = php_ifd_get32u(dir_entry+4, ImageInfo->motorola_intel);
if (!format || format > NUM_FORMATS) {
/* (-1) catches illegal zero case as unsigned underflows to positive large. */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal format code 0x%04X, suppose BYTE", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), format);
format = TAG_FMT_BYTE;
/*return TRUE;*/
}
if (components < 0) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal components(%ld)", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), components);
return FALSE;
}
byte_count_signed = (int64_t)components * php_tiff_bytes_per_format[format];
if (byte_count_signed < 0 || (byte_count_signed > INT32_MAX)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal byte_count", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC));
return FALSE;
}
byte_count = (size_t)byte_count_signed;
if (byte_count > 4) {
offset_val = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
/* If its bigger than 4 bytes, the dir entry contains an offset. */
value_ptr = offset_base+offset_val;
/*
dir_entry is ImageInfo->file.list[sn].data+2+i*12
offset_base is ImageInfo->file.list[sn].data-dir_offset
dir_entry - offset_base is dir_offset+2+i*12
*/
if (byte_count > IFDlength || offset_val > IFDlength-byte_count || value_ptr < dir_entry || offset_val < (size_t)(dir_entry-offset_base)) {
/* It is important to check for IMAGE_FILETYPE_TIFF
* JPEG does not use absolute pointers instead its pointers are
* relative to the start of the TIFF header in APP1 section. */
if (byte_count > ImageInfo->FileSize || offset_val>ImageInfo->FileSize-byte_count || (ImageInfo->FileType!=IMAGE_FILETYPE_TIFF_II && ImageInfo->FileType!=IMAGE_FILETYPE_TIFF_MM && ImageInfo->FileType!=IMAGE_FILETYPE_JPEG)) {
if (value_ptr < dir_entry) {
/* we can read this if offset_val > 0 */
/* some files have their values in other parts of the file */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal pointer offset(x%04X < x%04X)", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), offset_val, dir_entry);
} else {
/* this is for sure not allowed */
/* exception are IFD pointers */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal pointer offset(x%04X + x%04X = x%04X > x%04X)", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), offset_val, byte_count, offset_val+byte_count, IFDlength);
}
return FALSE;
}
if (byte_count>sizeof(cbuf)) {
/* mark as outside range and get buffer */
value_ptr = safe_emalloc(byte_count, 1, 0);
outside = value_ptr;
} else {
/* In most cases we only access a small range so
* it is faster to use a static buffer there
* BUT it offers also the possibility to have
* pointers read without the need to free them
* explicitley before returning. */
memset(&cbuf, 0, sizeof(cbuf));
value_ptr = cbuf;
}
fpos = php_stream_tell(ImageInfo->infile);
php_stream_seek(ImageInfo->infile, displacement+offset_val, SEEK_SET);
fgot = php_stream_tell(ImageInfo->infile);
if (fgot!=displacement+offset_val) {
EFREE_IF(outside);
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Wrong file pointer: 0x%08X != 0x%08X", fgot, displacement+offset_val);
return FALSE;
}
fgot = php_stream_read(ImageInfo->infile, value_ptr, byte_count);
php_stream_seek(ImageInfo->infile, fpos, SEEK_SET);
if (fgot<byte_count) {
EFREE_IF(outside);
EXIF_ERRLOG_FILEEOF(ImageInfo)
return FALSE;
}
}
} else {
/* 4 bytes or less and value is in the dir entry itself */
value_ptr = dir_entry+8;
offset_val= value_ptr-offset_base;
}
ImageInfo->sections_found |= FOUND_ANY_TAG;
#ifdef EXIF_DEBUG
dump_data = exif_dump_data(&dump_free, format, components, length, ImageInfo->motorola_intel, value_ptr TSRMLS_CC);
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process tag(x%04X=%s,@x%04X + x%04X(=%d)): %s%s %s", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), offset_val+displacement, byte_count, byte_count, (components>1)&&format!=TAG_FMT_UNDEFINED&&format!=TAG_FMT_STRING?"ARRAY OF ":"", exif_get_tagformat(format), dump_data);
if (dump_free) {
efree(dump_data);
}
#endif
if (section_index==SECTION_THUMBNAIL) {
if (!ImageInfo->Thumbnail.data) {
switch(tag) {
case TAG_IMAGEWIDTH:
case TAG_COMP_IMAGE_WIDTH:
ImageInfo->Thumbnail.width = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_IMAGEHEIGHT:
case TAG_COMP_IMAGE_HEIGHT:
ImageInfo->Thumbnail.height = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_STRIP_OFFSETS:
case TAG_JPEG_INTERCHANGE_FORMAT:
/* accept both formats */
ImageInfo->Thumbnail.offset = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_STRIP_BYTE_COUNTS:
if (ImageInfo->FileType == IMAGE_FILETYPE_TIFF_II || ImageInfo->FileType == IMAGE_FILETYPE_TIFF_MM) {
ImageInfo->Thumbnail.filetype = ImageInfo->FileType;
} else {
/* motorola is easier to read */
ImageInfo->Thumbnail.filetype = IMAGE_FILETYPE_TIFF_MM;
}
ImageInfo->Thumbnail.size = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_JPEG_INTERCHANGE_FORMAT_LEN:
if (ImageInfo->Thumbnail.filetype == IMAGE_FILETYPE_UNKNOWN) {
ImageInfo->Thumbnail.filetype = IMAGE_FILETYPE_JPEG;
ImageInfo->Thumbnail.size = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
}
break;
}
}
} else {
if (section_index==SECTION_IFD0 || section_index==SECTION_EXIF)
switch(tag) {
case TAG_COPYRIGHT:
/* check for "<photographer> NUL <editor> NUL" */
if (byte_count>1 && (length=php_strnlen(value_ptr, byte_count)) > 0) {
if (length<byte_count-1) {
/* When there are any characters after the first NUL */
ImageInfo->CopyrightPhotographer = estrdup(value_ptr);
ImageInfo->CopyrightEditor = estrndup(value_ptr+length+1, byte_count-length-1);
spprintf(&ImageInfo->Copyright, 0, "%s, %s", ImageInfo->CopyrightPhotographer, ImageInfo->CopyrightEditor);
/* format = TAG_FMT_UNDEFINED; this musn't be ASCII */
/* but we are not supposed to change this */
/* keep in mind that image_info does not store editor value */
} else {
ImageInfo->Copyright = estrndup(value_ptr, byte_count);
}
}
break;
case TAG_USERCOMMENT:
ImageInfo->UserCommentLength = exif_process_user_comment(ImageInfo, &(ImageInfo->UserComment), &(ImageInfo->UserCommentEncoding), value_ptr, byte_count TSRMLS_CC);
break;
case TAG_XP_TITLE:
case TAG_XP_COMMENTS:
case TAG_XP_AUTHOR:
case TAG_XP_KEYWORDS:
case TAG_XP_SUBJECT:
tmp_xp = (xp_field_type*)safe_erealloc(ImageInfo->xp_fields.list, (ImageInfo->xp_fields.count+1), sizeof(xp_field_type), 0);
ImageInfo->sections_found |= FOUND_WINXP;
ImageInfo->xp_fields.list = tmp_xp;
ImageInfo->xp_fields.count++;
exif_process_unicode(ImageInfo, &(ImageInfo->xp_fields.list[ImageInfo->xp_fields.count-1]), tag, value_ptr, byte_count TSRMLS_CC);
break;
case TAG_FNUMBER:
/* Simplest way of expressing aperture, so I trust it the most.
(overwrite previously computed value if there is one) */
ImageInfo->ApertureFNumber = (float)exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_APERTURE:
case TAG_MAX_APERTURE:
/* More relevant info always comes earlier, so only use this field if we don't
have appropriate aperture information yet. */
if (ImageInfo->ApertureFNumber == 0) {
ImageInfo->ApertureFNumber
= (float)exp(exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC)*log(2)*0.5);
}
break;
case TAG_SHUTTERSPEED:
/* More complicated way of expressing exposure time, so only use
this value if we don't already have it from somewhere else.
SHUTTERSPEED comes after EXPOSURE TIME
*/
if (ImageInfo->ExposureTime == 0) {
ImageInfo->ExposureTime
= (float)(1/exp(exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC)*log(2)));
}
break;
case TAG_EXPOSURETIME:
ImageInfo->ExposureTime = -1;
break;
case TAG_COMP_IMAGE_WIDTH:
ImageInfo->ExifImageWidth = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_FOCALPLANE_X_RES:
ImageInfo->FocalplaneXRes = exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_SUBJECT_DISTANCE:
/* Inidcates the distacne the autofocus camera is focused to.
Tends to be less accurate as distance increases. */
ImageInfo->Distance = (float)exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_FOCALPLANE_RESOLUTION_UNIT:
switch((int)exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC)) {
case 1: ImageInfo->FocalplaneUnits = 25.4; break; /* inch */
case 2:
/* According to the information I was using, 2 measn meters.
But looking at the Cannon powershot's files, inches is the only
sensible value. */
ImageInfo->FocalplaneUnits = 25.4;
break;
case 3: ImageInfo->FocalplaneUnits = 10; break; /* centimeter */
case 4: ImageInfo->FocalplaneUnits = 1; break; /* milimeter */
case 5: ImageInfo->FocalplaneUnits = .001; break; /* micrometer */
}
break;
case TAG_SUB_IFD:
if (format==TAG_FMT_IFD) {
/* If this is called we are either in a TIFFs thumbnail or a JPEG where we cannot handle it */
/* TIFF thumbnail: our data structure cannot store a thumbnail of a thumbnail */
/* JPEG do we have the data area and what to do with it */
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Skip SUB IFD");
}
break;
case TAG_MAKE:
ImageInfo->make = estrndup(value_ptr, byte_count);
break;
case TAG_MODEL:
ImageInfo->model = estrndup(value_ptr, byte_count);
break;
case TAG_MAKER_NOTE:
if (!exif_process_IFD_in_MAKERNOTE(ImageInfo, value_ptr, byte_count, offset_base, IFDlength, displacement TSRMLS_CC)) {
EFREE_IF(outside);
return FALSE;
}
break;
case TAG_EXIF_IFD_POINTER:
case TAG_GPS_IFD_POINTER:
case TAG_INTEROP_IFD_POINTER:
if (ReadNextIFD) {
char *Subdir_start;
int sub_section_index = 0;
switch(tag) {
case TAG_EXIF_IFD_POINTER:
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Found EXIF");
#endif
ImageInfo->sections_found |= FOUND_EXIF;
sub_section_index = SECTION_EXIF;
break;
case TAG_GPS_IFD_POINTER:
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Found GPS");
#endif
ImageInfo->sections_found |= FOUND_GPS;
sub_section_index = SECTION_GPS;
break;
case TAG_INTEROP_IFD_POINTER:
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Found INTEROPERABILITY");
#endif
ImageInfo->sections_found |= FOUND_INTEROP;
sub_section_index = SECTION_INTEROP;
break;
}
Subdir_start = offset_base + php_ifd_get32u(value_ptr, ImageInfo->motorola_intel);
if (Subdir_start < offset_base || Subdir_start > offset_base+IFDlength) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD Pointer");
return FALSE;
}
if (!exif_process_IFD_in_JPEG(ImageInfo, Subdir_start, offset_base, IFDlength, displacement, sub_section_index TSRMLS_CC)) {
return FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Subsection %s done", exif_get_sectionname(sub_section_index));
#endif
}
}
}
exif_iif_add_tag(ImageInfo, section_index, exif_get_tagname(tag, tagname, sizeof(tagname), tag_table TSRMLS_CC), tag, format, components, value_ptr TSRMLS_CC);
EFREE_IF(outside);
return TRUE;
}
/* }}} */
/* {{{ exif_process_IFD_in_JPEG
* Process one of the nested IFDs directories. */
static int exif_process_IFD_in_JPEG(image_info_type *ImageInfo, char *dir_start, char *offset_base, size_t IFDlength, size_t displacement, int section_index TSRMLS_DC)
{
int de;
int NumDirEntries;
int NextDirOffset;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process %s (x%04X(=%d))", exif_get_sectionname(section_index), IFDlength, IFDlength);
#endif
ImageInfo->sections_found |= FOUND_IFD0;
if ((dir_start + 2) >= (offset_base+IFDlength)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size");
return FALSE;
}
NumDirEntries = php_ifd_get16u(dir_start, ImageInfo->motorola_intel);
if ((dir_start+2+NumDirEntries*12) > (offset_base+IFDlength)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size: x%04X + 2 + x%04X*12 = x%04X > x%04X", (int)((size_t)dir_start+2-(size_t)offset_base), NumDirEntries, (int)((size_t)dir_start+2+NumDirEntries*12-(size_t)offset_base), IFDlength);
return FALSE;
}
for (de=0;de<NumDirEntries;de++) {
if (!exif_process_IFD_TAG(ImageInfo, dir_start + 2 + 12 * de,
offset_base, IFDlength, displacement, section_index, 1, exif_get_tag_table(section_index) TSRMLS_CC)) {
return FALSE;
}
}
/*
* Ignore IFD2 if it purportedly exists
*/
if (section_index == SECTION_THUMBNAIL) {
return TRUE;
}
/*
* Hack to make it process IDF1 I hope
* There are 2 IDFs, the second one holds the keys (0x0201 and 0x0202) to the thumbnail
*/
if ((dir_start+2+12*de + 4) >= (offset_base+IFDlength)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size");
return FALSE;
}
NextDirOffset = php_ifd_get32u(dir_start+2+12*de, ImageInfo->motorola_intel);
if (NextDirOffset) {
/* the next line seems false but here IFDlength means length of all IFDs */
if (offset_base + NextDirOffset < offset_base || offset_base + NextDirOffset > offset_base+IFDlength) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD offset");
return FALSE;
}
/* That is the IFD for the first thumbnail */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Expect next IFD to be thumbnail");
#endif
if (exif_process_IFD_in_JPEG(ImageInfo, offset_base + NextDirOffset, offset_base, IFDlength, displacement, SECTION_THUMBNAIL TSRMLS_CC)) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail size: 0x%04X", ImageInfo->Thumbnail.size);
#endif
if (ImageInfo->Thumbnail.filetype != IMAGE_FILETYPE_UNKNOWN
&& ImageInfo->Thumbnail.size
&& ImageInfo->Thumbnail.offset
&& ImageInfo->read_thumbnail
) {
exif_thumbnail_extract(ImageInfo, offset_base, IFDlength TSRMLS_CC);
}
return TRUE;
} else {
return FALSE;
}
}
return TRUE;
}
/* }}} */
/* {{{ exif_process_TIFF_in_JPEG
Process a TIFF header in a JPEG file
*/
static void exif_process_TIFF_in_JPEG(image_info_type *ImageInfo, char *CharBuf, size_t length, size_t displacement TSRMLS_DC)
{
unsigned exif_value_2a, offset_of_ifd;
/* set the thumbnail stuff to nothing so we can test to see if they get set up */
if (memcmp(CharBuf, "II", 2) == 0) {
ImageInfo->motorola_intel = 0;
} else if (memcmp(CharBuf, "MM", 2) == 0) {
ImageInfo->motorola_intel = 1;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF alignment marker");
return;
}
/* Check the next two values for correctness. */
if (length < 8) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF start (1)");
return;
}
exif_value_2a = php_ifd_get16u(CharBuf+2, ImageInfo->motorola_intel);
offset_of_ifd = php_ifd_get32u(CharBuf+4, ImageInfo->motorola_intel);
if (exif_value_2a != 0x2a || offset_of_ifd < 0x08) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF start (1)");
return;
}
if (offset_of_ifd > length) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid IFD start");
return;
}
ImageInfo->sections_found |= FOUND_IFD0;
/* First directory starts at offset 8. Offsets starts at 0. */
exif_process_IFD_in_JPEG(ImageInfo, CharBuf+offset_of_ifd, CharBuf, length/*-14*/, displacement, SECTION_IFD0 TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process TIFF in JPEG done");
#endif
/* Compute the CCD width, in milimeters. */
if (ImageInfo->FocalplaneXRes != 0) {
ImageInfo->CCDWidth = (float)(ImageInfo->ExifImageWidth * ImageInfo->FocalplaneUnits / ImageInfo->FocalplaneXRes);
}
}
/* }}} */
/* {{{ exif_process_APP1
Process an JPEG APP1 block marker
Describes all the drivel that most digital cameras include...
*/
static void exif_process_APP1(image_info_type *ImageInfo, char *CharBuf, size_t length, size_t displacement TSRMLS_DC)
{
/* Check the APP1 for Exif Identifier Code */
static const uchar ExifHeader[] = {0x45, 0x78, 0x69, 0x66, 0x00, 0x00};
if (length <= 8 || memcmp(CharBuf+2, ExifHeader, 6)) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Incorrect APP1 Exif Identifier Code");
return;
}
exif_process_TIFF_in_JPEG(ImageInfo, CharBuf + 8, length - 8, displacement+8 TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process APP1/EXIF done");
#endif
}
/* }}} */
/* {{{ exif_process_APP12
Process an JPEG APP12 block marker used by OLYMPUS
*/
static void exif_process_APP12(image_info_type *ImageInfo, char *buffer, size_t length TSRMLS_DC)
{
size_t l1, l2=0;
if ((l1 = php_strnlen(buffer+2, length-2)) > 0) {
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Company", TAG_NONE, TAG_FMT_STRING, l1, buffer+2 TSRMLS_CC);
if (length > 2+l1+1) {
l2 = php_strnlen(buffer+2+l1+1, length-2-l1-1);
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Info", TAG_NONE, TAG_FMT_STRING, l2, buffer+2+l1+1 TSRMLS_CC);
}
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process section APP12 with l1=%d, l2=%d done", l1, l2);
#endif
}
/* }}} */
/* {{{ exif_scan_JPEG_header
* Parse the marker stream until SOS or EOI is seen; */
static int exif_scan_JPEG_header(image_info_type *ImageInfo TSRMLS_DC)
{
int section, sn;
int marker = 0, last_marker = M_PSEUDO, comment_correction=1;
unsigned int ll, lh;
uchar *Data;
size_t fpos, size, got, itemlen;
jpeg_sof_info sof_info;
for(section=0;;section++) {
#ifdef EXIF_DEBUG
fpos = php_stream_tell(ImageInfo->infile);
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Needing section %d @ 0x%08X", ImageInfo->file.count, fpos);
#endif
/* get marker byte, swallowing possible padding */
/* some software does not count the length bytes of COM section */
/* one company doing so is very much envolved in JPEG... so we accept too */
if (last_marker==M_COM && comment_correction) {
comment_correction = 2;
}
do {
if ((marker = php_stream_getc(ImageInfo->infile)) == EOF) {
EXIF_ERRLOG_CORRUPT(ImageInfo)
return FALSE;
}
if (last_marker==M_COM && comment_correction>0) {
if (marker!=0xFF) {
marker = 0xff;
comment_correction--;
} else {
last_marker = M_PSEUDO; /* stop skipping 0 for M_COM */
}
}
} while (marker == 0xff);
if (last_marker==M_COM && !comment_correction) {
exif_error_docref("exif_read_data#error_mcom" EXIFERR_CC, ImageInfo, E_NOTICE, "Image has corrupt COM section: some software set wrong length information");
}
if (last_marker==M_COM && comment_correction)
return M_EOI; /* ah illegal: char after COM section not 0xFF */
fpos = php_stream_tell(ImageInfo->infile);
if (marker == 0xff) {
/* 0xff is legal padding, but if we get that many, something's wrong. */
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "To many padding bytes");
return FALSE;
}
/* Read the length of the section. */
if ((lh = php_stream_getc(ImageInfo->infile)) == EOF) {
EXIF_ERRLOG_CORRUPT(ImageInfo)
return FALSE;
}
if ((ll = php_stream_getc(ImageInfo->infile)) == EOF) {
EXIF_ERRLOG_CORRUPT(ImageInfo)
return FALSE;
}
itemlen = (lh << 8) | ll;
if (itemlen < 2) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s, Section length: 0x%02X%02X", EXIF_ERROR_CORRUPT, lh, ll);
#else
EXIF_ERRLOG_CORRUPT(ImageInfo)
#endif
return FALSE;
}
sn = exif_file_sections_add(ImageInfo, marker, itemlen+1, NULL);
Data = ImageInfo->file.list[sn].data;
/* Store first two pre-read bytes. */
Data[0] = (uchar)lh;
Data[1] = (uchar)ll;
got = php_stream_read(ImageInfo->infile, (char*)(Data+2), itemlen-2); /* Read the whole section. */
if (got != itemlen-2) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error reading from file: got=x%04X(=%d) != itemlen-2=x%04X(=%d)", got, got, itemlen-2, itemlen-2);
return FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process section(x%02X=%s) @ x%04X + x%04X(=%d)", marker, exif_get_markername(marker), fpos, itemlen, itemlen);
#endif
switch(marker) {
case M_SOS: /* stop before hitting compressed data */
/* If reading entire image is requested, read the rest of the data. */
if (ImageInfo->read_all) {
/* Determine how much file is left. */
fpos = php_stream_tell(ImageInfo->infile);
size = ImageInfo->FileSize - fpos;
sn = exif_file_sections_add(ImageInfo, M_PSEUDO, size, NULL);
Data = ImageInfo->file.list[sn].data;
got = php_stream_read(ImageInfo->infile, (char*)Data, size);
if (got != size) {
EXIF_ERRLOG_FILEEOF(ImageInfo)
return FALSE;
}
}
return TRUE;
case M_EOI: /* in case it's a tables-only JPEG stream */
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "No image in jpeg!");
return (ImageInfo->sections_found&(~FOUND_COMPUTED)) ? TRUE : FALSE;
case M_COM: /* Comment section */
exif_process_COM(ImageInfo, (char *)Data, itemlen TSRMLS_CC);
break;
case M_EXIF:
if (!(ImageInfo->sections_found&FOUND_IFD0)) {
/*ImageInfo->sections_found |= FOUND_EXIF;*/
/* Seen files from some 'U-lead' software with Vivitar scanner
that uses marker 31 later in the file (no clue what for!) */
exif_process_APP1(ImageInfo, (char *)Data, itemlen, fpos TSRMLS_CC);
}
break;
case M_APP12:
exif_process_APP12(ImageInfo, (char *)Data, itemlen TSRMLS_CC);
break;
case M_SOF0:
case M_SOF1:
case M_SOF2:
case M_SOF3:
case M_SOF5:
case M_SOF6:
case M_SOF7:
case M_SOF9:
case M_SOF10:
case M_SOF11:
case M_SOF13:
case M_SOF14:
case M_SOF15:
if ((itemlen - 2) < 6) {
return FALSE;
}
exif_process_SOFn(Data, marker, &sof_info);
ImageInfo->Width = sof_info.width;
ImageInfo->Height = sof_info.height;
if (sof_info.num_components == 3) {
ImageInfo->IsColor = 1;
} else {
ImageInfo->IsColor = 0;
}
break;
default:
/* skip any other marker silently. */
break;
}
/* keep track of last marker */
last_marker = marker;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Done");
#endif
return TRUE;
}
/* }}} */
/* {{{ exif_scan_thumbnail
* scan JPEG in thumbnail (memory) */
static int exif_scan_thumbnail(image_info_type *ImageInfo TSRMLS_DC)
{
uchar c, *data = (uchar*)ImageInfo->Thumbnail.data;
int n, marker;
size_t length=2, pos=0;
jpeg_sof_info sof_info;
if (!data) {
return FALSE; /* nothing to do here */
}
if (memcmp(data, "\xFF\xD8\xFF", 3)) {
if (!ImageInfo->Thumbnail.width && !ImageInfo->Thumbnail.height) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Thumbnail is not a JPEG image");
}
return FALSE;
}
for (;;) {
pos += length;
if (pos>=ImageInfo->Thumbnail.size)
return FALSE;
c = data[pos++];
if (pos>=ImageInfo->Thumbnail.size)
return FALSE;
if (c != 0xFF) {
return FALSE;
}
n = 8;
while ((c = data[pos++]) == 0xFF && n--) {
if (pos+3>=ImageInfo->Thumbnail.size)
return FALSE;
/* +3 = pos++ of next check when reaching marker + 2 bytes for length */
}
if (c == 0xFF)
return FALSE;
marker = c;
length = php_jpg_get16(data+pos);
if (pos+length>=ImageInfo->Thumbnail.size) {
return FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: process section(x%02X=%s) @ x%04X + x%04X", marker, exif_get_markername(marker), pos, length);
#endif
switch (marker) {
case M_SOF0:
case M_SOF1:
case M_SOF2:
case M_SOF3:
case M_SOF5:
case M_SOF6:
case M_SOF7:
case M_SOF9:
case M_SOF10:
case M_SOF11:
case M_SOF13:
case M_SOF14:
case M_SOF15:
/* handle SOFn block */
exif_process_SOFn(data+pos, marker, &sof_info);
ImageInfo->Thumbnail.height = sof_info.height;
ImageInfo->Thumbnail.width = sof_info.width;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: size: %d * %d", sof_info.width, sof_info.height);
#endif
return TRUE;
case M_SOS:
case M_EOI:
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Could not compute size of thumbnail");
return FALSE;
break;
default:
/* just skip */
break;
}
}
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Could not compute size of thumbnail");
return FALSE;
}
/* }}} */
/* {{{ exif_process_IFD_in_TIFF
* Parse the TIFF header; */
static int exif_process_IFD_in_TIFF(image_info_type *ImageInfo, size_t dir_offset, int section_index TSRMLS_DC)
{
int i, sn, num_entries, sub_section_index = 0;
unsigned char *dir_entry;
char tagname[64];
size_t ifd_size, dir_size, entry_offset, next_offset, entry_length, entry_value=0, fgot;
int entry_tag , entry_type;
tag_table_type tag_table = exif_get_tag_table(section_index);
if (ImageInfo->ifd_nesting_level > MAX_IFD_NESTING_LEVEL) {
return FALSE;
}
if (ImageInfo->FileSize >= dir_offset+2) {
sn = exif_file_sections_add(ImageInfo, M_PSEUDO, 2, NULL);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD dir(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, 2);
#endif
php_stream_seek(ImageInfo->infile, dir_offset, SEEK_SET); /* we do not know the order of sections */
php_stream_read(ImageInfo->infile, (char*)ImageInfo->file.list[sn].data, 2);
num_entries = php_ifd_get16u(ImageInfo->file.list[sn].data, ImageInfo->motorola_intel);
dir_size = 2/*num dir entries*/ +12/*length of entry*/*num_entries +4/* offset to next ifd (points to thumbnail or NULL)*/;
if (ImageInfo->FileSize >= dir_offset+dir_size) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD dir(x%04X + x%04X), IFD entries(%d)", ImageInfo->FileSize, dir_offset+2, dir_size-2, num_entries);
#endif
if (exif_file_sections_realloc(ImageInfo, sn, dir_size TSRMLS_CC)) {
return FALSE;
}
php_stream_read(ImageInfo->infile, (char*)(ImageInfo->file.list[sn].data+2), dir_size-2);
/*exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Dump: %s", exif_char_dump(ImageInfo->file.list[sn].data, dir_size, 0));*/
next_offset = php_ifd_get32u(ImageInfo->file.list[sn].data + dir_size - 4, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF done, next offset x%04X", next_offset);
#endif
/* now we have the directory we can look how long it should be */
ifd_size = dir_size;
for(i=0;i<num_entries;i++) {
dir_entry = ImageInfo->file.list[sn].data+2+i*12;
entry_tag = php_ifd_get16u(dir_entry+0, ImageInfo->motorola_intel);
entry_type = php_ifd_get16u(dir_entry+2, ImageInfo->motorola_intel);
if (entry_type > NUM_FORMATS) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: tag(0x%04X,%12s): Illegal format code 0x%04X, switching to BYTE", entry_tag, exif_get_tagname(entry_tag, tagname, -12, tag_table TSRMLS_CC), entry_type);
/* Since this is repeated in exif_process_IFD_TAG make it a notice here */
/* and make it a warning in the exif_process_IFD_TAG which is called */
/* elsewhere. */
entry_type = TAG_FMT_BYTE;
/*The next line would break the image on writeback: */
/* php_ifd_set16u(dir_entry+2, entry_type, ImageInfo->motorola_intel);*/
}
entry_length = php_ifd_get32u(dir_entry+4, ImageInfo->motorola_intel) * php_tiff_bytes_per_format[entry_type];
if (entry_length <= 4) {
switch(entry_type) {
case TAG_FMT_USHORT:
entry_value = php_ifd_get16u(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_SSHORT:
entry_value = php_ifd_get16s(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_ULONG:
entry_value = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_SLONG:
entry_value = php_ifd_get32s(dir_entry+8, ImageInfo->motorola_intel);
break;
}
switch(entry_tag) {
case TAG_IMAGEWIDTH:
case TAG_COMP_IMAGE_WIDTH:
ImageInfo->Width = entry_value;
break;
case TAG_IMAGEHEIGHT:
case TAG_COMP_IMAGE_HEIGHT:
ImageInfo->Height = entry_value;
break;
case TAG_PHOTOMETRIC_INTERPRETATION:
switch (entry_value) {
case PMI_BLACK_IS_ZERO:
case PMI_WHITE_IS_ZERO:
case PMI_TRANSPARENCY_MASK:
ImageInfo->IsColor = 0;
break;
case PMI_RGB:
case PMI_PALETTE_COLOR:
case PMI_SEPARATED:
case PMI_YCBCR:
case PMI_CIELAB:
ImageInfo->IsColor = 1;
break;
}
break;
}
} else {
entry_offset = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
/* if entry needs expading ifd cache and entry is at end of current ifd cache. */
/* otherwise there may be huge holes between two entries */
if (entry_offset + entry_length > dir_offset + ifd_size
&& entry_offset == dir_offset + ifd_size) {
ifd_size = entry_offset + entry_length - dir_offset;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Resize struct: x%04X + x%04X - x%04X = x%04X", entry_offset, entry_length, dir_offset, ifd_size);
#endif
}
}
}
if (ImageInfo->FileSize >= dir_offset + ImageInfo->file.list[sn].size) {
if (ifd_size > dir_size) {
if (dir_offset + ifd_size > ImageInfo->FileSize) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, ifd_size);
return FALSE;
}
if (exif_file_sections_realloc(ImageInfo, sn, ifd_size TSRMLS_CC)) {
return FALSE;
}
/* read values not stored in directory itself */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, ifd_size);
#endif
php_stream_read(ImageInfo->infile, (char*)(ImageInfo->file.list[sn].data+dir_size), ifd_size-dir_size);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF, done");
#endif
}
/* now process the tags */
for(i=0;i<num_entries;i++) {
dir_entry = ImageInfo->file.list[sn].data+2+i*12;
entry_tag = php_ifd_get16u(dir_entry+0, ImageInfo->motorola_intel);
entry_type = php_ifd_get16u(dir_entry+2, ImageInfo->motorola_intel);
/*entry_length = php_ifd_get32u(dir_entry+4, ImageInfo->motorola_intel);*/
if (entry_tag == TAG_EXIF_IFD_POINTER ||
entry_tag == TAG_INTEROP_IFD_POINTER ||
entry_tag == TAG_GPS_IFD_POINTER ||
entry_tag == TAG_SUB_IFD
) {
switch(entry_tag) {
case TAG_EXIF_IFD_POINTER:
ImageInfo->sections_found |= FOUND_EXIF;
sub_section_index = SECTION_EXIF;
break;
case TAG_GPS_IFD_POINTER:
ImageInfo->sections_found |= FOUND_GPS;
sub_section_index = SECTION_GPS;
break;
case TAG_INTEROP_IFD_POINTER:
ImageInfo->sections_found |= FOUND_INTEROP;
sub_section_index = SECTION_INTEROP;
break;
case TAG_SUB_IFD:
ImageInfo->sections_found |= FOUND_THUMBNAIL;
sub_section_index = SECTION_THUMBNAIL;
break;
}
entry_offset = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Next IFD: %s @x%04X", exif_get_sectionname(sub_section_index), entry_offset);
#endif
ImageInfo->ifd_nesting_level++;
exif_process_IFD_in_TIFF(ImageInfo, entry_offset, sub_section_index TSRMLS_CC);
if (section_index!=SECTION_THUMBNAIL && entry_tag==TAG_SUB_IFD) {
if (ImageInfo->Thumbnail.filetype != IMAGE_FILETYPE_UNKNOWN
&& ImageInfo->Thumbnail.size
&& ImageInfo->Thumbnail.offset
&& ImageInfo->read_thumbnail
) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "%s THUMBNAIL @0x%04X + 0x%04X", ImageInfo->Thumbnail.data ? "Ignore" : "Read", ImageInfo->Thumbnail.offset, ImageInfo->Thumbnail.size);
#endif
if (!ImageInfo->Thumbnail.data) {
ImageInfo->Thumbnail.data = safe_emalloc(ImageInfo->Thumbnail.size, 1, 0);
php_stream_seek(ImageInfo->infile, ImageInfo->Thumbnail.offset, SEEK_SET);
fgot = php_stream_read(ImageInfo->infile, ImageInfo->Thumbnail.data, ImageInfo->Thumbnail.size);
if (fgot < ImageInfo->Thumbnail.size) {
EXIF_ERRLOG_THUMBEOF(ImageInfo)
efree(ImageInfo->Thumbnail.data);
ImageInfo->Thumbnail.data = NULL;
} else {
exif_thumbnail_build(ImageInfo TSRMLS_CC);
}
}
}
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Next IFD: %s done", exif_get_sectionname(sub_section_index));
#endif
} else {
if (!exif_process_IFD_TAG(ImageInfo, (char*)dir_entry,
(char*)(ImageInfo->file.list[sn].data-dir_offset),
ifd_size, 0, section_index, 0, tag_table TSRMLS_CC)) {
return FALSE;
}
}
}
/* If we had a thumbnail in a SUB_IFD we have ANOTHER image in NEXT IFD */
if (next_offset && section_index != SECTION_THUMBNAIL) {
/* this should be a thumbnail IFD */
/* the thumbnail itself is stored at Tag=StripOffsets */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read next IFD (THUMBNAIL) at x%04X", next_offset);
#endif
ImageInfo->ifd_nesting_level++;
exif_process_IFD_in_TIFF(ImageInfo, next_offset, SECTION_THUMBNAIL TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "%s THUMBNAIL @0x%04X + 0x%04X", ImageInfo->Thumbnail.data ? "Ignore" : "Read", ImageInfo->Thumbnail.offset, ImageInfo->Thumbnail.size);
#endif
if (!ImageInfo->Thumbnail.data && ImageInfo->Thumbnail.offset && ImageInfo->Thumbnail.size && ImageInfo->read_thumbnail) {
ImageInfo->Thumbnail.data = safe_emalloc(ImageInfo->Thumbnail.size, 1, 0);
php_stream_seek(ImageInfo->infile, ImageInfo->Thumbnail.offset, SEEK_SET);
fgot = php_stream_read(ImageInfo->infile, ImageInfo->Thumbnail.data, ImageInfo->Thumbnail.size);
if (fgot < ImageInfo->Thumbnail.size) {
EXIF_ERRLOG_THUMBEOF(ImageInfo)
efree(ImageInfo->Thumbnail.data);
ImageInfo->Thumbnail.data = NULL;
} else {
exif_thumbnail_build(ImageInfo TSRMLS_CC);
}
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read next IFD (THUMBNAIL) done");
#endif
}
return TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD(x%04X)", ImageInfo->FileSize, dir_offset+ImageInfo->file.list[sn].size);
return FALSE;
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD dir(x%04X)", ImageInfo->FileSize, dir_offset+dir_size);
return FALSE;
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than start of IFD dir(x%04X)", ImageInfo->FileSize, dir_offset+2);
return FALSE;
}
}
/* }}} */
/* {{{ exif_scan_FILE_header
* Parse the marker stream until SOS or EOI is seen; */
static int exif_scan_FILE_header(image_info_type *ImageInfo TSRMLS_DC)
{
unsigned char file_header[8];
int ret = FALSE;
ImageInfo->FileType = IMAGE_FILETYPE_UNKNOWN;
if (ImageInfo->FileSize >= 2) {
php_stream_seek(ImageInfo->infile, 0, SEEK_SET);
if (php_stream_read(ImageInfo->infile, (char*)file_header, 2) != 2) {
return FALSE;
}
if ((file_header[0]==0xff) && (file_header[1]==M_SOI)) {
ImageInfo->FileType = IMAGE_FILETYPE_JPEG;
if (exif_scan_JPEG_header(ImageInfo TSRMLS_CC)) {
ret = TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid JPEG file");
}
} else if (ImageInfo->FileSize >= 8) {
if (php_stream_read(ImageInfo->infile, (char*)(file_header+2), 6) != 6) {
return FALSE;
}
if (!memcmp(file_header, "II\x2A\x00", 4)) {
ImageInfo->FileType = IMAGE_FILETYPE_TIFF_II;
ImageInfo->motorola_intel = 0;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "File has TIFF/II format");
#endif
ImageInfo->sections_found |= FOUND_IFD0;
if (exif_process_IFD_in_TIFF(ImageInfo,
php_ifd_get32u(file_header + 4, ImageInfo->motorola_intel),
SECTION_IFD0 TSRMLS_CC)) {
ret = TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF file");
}
} else if (!memcmp(file_header, "MM\x00\x2a", 4)) {
ImageInfo->FileType = IMAGE_FILETYPE_TIFF_MM;
ImageInfo->motorola_intel = 1;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "File has TIFF/MM format");
#endif
ImageInfo->sections_found |= FOUND_IFD0;
if (exif_process_IFD_in_TIFF(ImageInfo,
php_ifd_get32u(file_header + 4, ImageInfo->motorola_intel),
SECTION_IFD0 TSRMLS_CC)) {
ret = TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF file");
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "File not supported");
return FALSE;
}
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "File too small (%d)", ImageInfo->FileSize);
}
return ret;
}
/* }}} */
/* {{{ exif_discard_imageinfo
Discard data scanned by exif_read_file.
*/
static int exif_discard_imageinfo(image_info_type *ImageInfo)
{
int i;
EFREE_IF(ImageInfo->FileName);
EFREE_IF(ImageInfo->UserComment);
EFREE_IF(ImageInfo->UserCommentEncoding);
EFREE_IF(ImageInfo->Copyright);
EFREE_IF(ImageInfo->CopyrightPhotographer);
EFREE_IF(ImageInfo->CopyrightEditor);
EFREE_IF(ImageInfo->Thumbnail.data);
EFREE_IF(ImageInfo->encode_unicode);
EFREE_IF(ImageInfo->decode_unicode_be);
EFREE_IF(ImageInfo->decode_unicode_le);
EFREE_IF(ImageInfo->encode_jis);
EFREE_IF(ImageInfo->decode_jis_be);
EFREE_IF(ImageInfo->decode_jis_le);
EFREE_IF(ImageInfo->make);
EFREE_IF(ImageInfo->model);
for (i=0; i<ImageInfo->xp_fields.count; i++) {
EFREE_IF(ImageInfo->xp_fields.list[i].value);
}
EFREE_IF(ImageInfo->xp_fields.list);
for (i=0; i<SECTION_COUNT; i++) {
exif_iif_free(ImageInfo, i);
}
exif_file_sections_free(ImageInfo);
memset(ImageInfo, 0, sizeof(*ImageInfo));
return TRUE;
}
/* }}} */
/* {{{ exif_read_file
*/
static int exif_read_file(image_info_type *ImageInfo, char *FileName, int read_thumbnail, int read_all TSRMLS_DC)
{
int ret;
struct stat st;
/* Start with an empty image information structure. */
memset(ImageInfo, 0, sizeof(*ImageInfo));
ImageInfo->motorola_intel = -1; /* flag as unknown */
ImageInfo->infile = php_stream_open_wrapper(FileName, "rb", STREAM_MUST_SEEK|IGNORE_PATH, NULL);
if (!ImageInfo->infile) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Unable to open file");
return FALSE;
}
if (php_stream_is(ImageInfo->infile, PHP_STREAM_IS_STDIO)) {
if (VCWD_STAT(FileName, &st) >= 0) {
if ((st.st_mode & S_IFMT) != S_IFREG) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Not a file");
php_stream_close(ImageInfo->infile);
return FALSE;
}
/* Store file date/time. */
ImageInfo->FileDateTime = st.st_mtime;
ImageInfo->FileSize = st.st_size;
/*exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Opened stream is file: %d", ImageInfo->FileSize);*/
}
} else {
if (!ImageInfo->FileSize) {
php_stream_seek(ImageInfo->infile, 0, SEEK_END);
ImageInfo->FileSize = php_stream_tell(ImageInfo->infile);
php_stream_seek(ImageInfo->infile, 0, SEEK_SET);
}
}
php_basename(FileName, strlen(FileName), NULL, 0, &(ImageInfo->FileName), NULL TSRMLS_CC);
ImageInfo->read_thumbnail = read_thumbnail;
ImageInfo->read_all = read_all;
ImageInfo->Thumbnail.filetype = IMAGE_FILETYPE_UNKNOWN;
ImageInfo->encode_unicode = safe_estrdup(EXIF_G(encode_unicode));
ImageInfo->decode_unicode_be = safe_estrdup(EXIF_G(decode_unicode_be));
ImageInfo->decode_unicode_le = safe_estrdup(EXIF_G(decode_unicode_le));
ImageInfo->encode_jis = safe_estrdup(EXIF_G(encode_jis));
ImageInfo->decode_jis_be = safe_estrdup(EXIF_G(decode_jis_be));
ImageInfo->decode_jis_le = safe_estrdup(EXIF_G(decode_jis_le));
ImageInfo->ifd_nesting_level = 0;
/* Scan the JPEG headers. */
ret = exif_scan_FILE_header(ImageInfo TSRMLS_CC);
php_stream_close(ImageInfo->infile);
return ret;
}
/* }}} */
/* {{{ proto array exif_read_data(string filename [, sections_needed [, sub_arrays[, read_thumbnail]]])
Reads header data from the JPEG/TIFF image filename and optionally reads the internal thumbnails */
PHP_FUNCTION(exif_read_data)
{
char *p_name, *p_sections_needed = NULL;
int p_name_len, p_sections_needed_len = 0;
zend_bool sub_arrays=0, read_thumbnail=0, read_all=0;
int i, ret, sections_needed=0;
image_info_type ImageInfo;
char tmp[64], *sections_str, *s;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sbb", &p_name, &p_name_len, &p_sections_needed, &p_sections_needed_len, &sub_arrays, &read_thumbnail) == FAILURE) {
return;
}
memset(&ImageInfo, 0, sizeof(ImageInfo));
if (p_sections_needed) {
spprintf(§ions_str, 0, ",%s,", p_sections_needed);
/* sections_str DOES start with , and SPACES are NOT allowed in names */
s = sections_str;
while (*++s) {
if (*s == ' ') {
*s = ',';
}
}
for (i = 0; i < SECTION_COUNT; i++) {
snprintf(tmp, sizeof(tmp), ",%s,", exif_get_sectionname(i));
if (strstr(sections_str, tmp)) {
sections_needed |= 1<<i;
}
}
EFREE_IF(sections_str);
/* now see what we need */
#ifdef EXIF_DEBUG
sections_str = exif_get_sectionlist(sections_needed TSRMLS_CC);
if (!sections_str) {
RETURN_FALSE;
}
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Sections needed: %s", sections_str[0] ? sections_str : "None");
EFREE_IF(sections_str);
#endif
}
ret = exif_read_file(&ImageInfo, p_name, read_thumbnail, read_all TSRMLS_CC);
sections_str = exif_get_sectionlist(ImageInfo.sections_found TSRMLS_CC);
#ifdef EXIF_DEBUG
if (sections_str)
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Sections found: %s", sections_str[0] ? sections_str : "None");
#endif
ImageInfo.sections_found |= FOUND_COMPUTED|FOUND_FILE;/* do not inform about in debug*/
if (ret == FALSE || (sections_needed && !(sections_needed&ImageInfo.sections_found))) {
/* array_init must be checked at last! otherwise the array must be freed if a later test fails. */
exif_discard_imageinfo(&ImageInfo);
EFREE_IF(sections_str);
RETURN_FALSE;
}
array_init(return_value);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Generate section FILE");
#endif
/* now we can add our information */
exif_iif_add_str(&ImageInfo, SECTION_FILE, "FileName", ImageInfo.FileName TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_FILE, "FileDateTime", ImageInfo.FileDateTime TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_FILE, "FileSize", ImageInfo.FileSize TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_FILE, "FileType", ImageInfo.FileType TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_FILE, "MimeType", (char*)php_image_type_to_mime_type(ImageInfo.FileType) TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_FILE, "SectionsFound", sections_str ? sections_str : "NONE" TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Generate section COMPUTED");
#endif
if (ImageInfo.Width>0 && ImageInfo.Height>0) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "html" TSRMLS_CC, "width=\"%d\" height=\"%d\"", ImageInfo.Width, ImageInfo.Height);
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Height", ImageInfo.Height TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Width", ImageInfo.Width TSRMLS_CC);
}
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "IsColor", ImageInfo.IsColor TSRMLS_CC);
if (ImageInfo.motorola_intel != -1) {
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "ByteOrderMotorola", ImageInfo.motorola_intel TSRMLS_CC);
}
if (ImageInfo.FocalLength) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "FocalLength" TSRMLS_CC, "%4.1Fmm", ImageInfo.FocalLength);
if(ImageInfo.CCDWidth) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "35mmFocalLength" TSRMLS_CC, "%dmm", (int)(ImageInfo.FocalLength/ImageInfo.CCDWidth*35+0.5));
}
}
if(ImageInfo.CCDWidth) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "CCDWidth" TSRMLS_CC, "%dmm", (int)ImageInfo.CCDWidth);
}
if(ImageInfo.ExposureTime>0) {
if(ImageInfo.ExposureTime <= 0.5) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "ExposureTime" TSRMLS_CC, "%0.3F s (1/%d)", ImageInfo.ExposureTime, (int)(0.5 + 1/ImageInfo.ExposureTime));
} else {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "ExposureTime" TSRMLS_CC, "%0.3F s", ImageInfo.ExposureTime);
}
}
if(ImageInfo.ApertureFNumber) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "ApertureFNumber" TSRMLS_CC, "f/%.1F", ImageInfo.ApertureFNumber);
}
if(ImageInfo.Distance) {
if(ImageInfo.Distance<0) {
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "FocusDistance", "Infinite" TSRMLS_CC);
} else {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "FocusDistance" TSRMLS_CC, "%0.2Fm", ImageInfo.Distance);
}
}
if (ImageInfo.UserComment) {
exif_iif_add_buffer(&ImageInfo, SECTION_COMPUTED, "UserComment", ImageInfo.UserCommentLength, ImageInfo.UserComment TSRMLS_CC);
if (ImageInfo.UserCommentEncoding && strlen(ImageInfo.UserCommentEncoding)) {
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "UserCommentEncoding", ImageInfo.UserCommentEncoding TSRMLS_CC);
}
}
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Copyright", ImageInfo.Copyright TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Copyright.Photographer", ImageInfo.CopyrightPhotographer TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Copyright.Editor", ImageInfo.CopyrightEditor TSRMLS_CC);
for (i=0; i<ImageInfo.xp_fields.count; i++) {
exif_iif_add_str(&ImageInfo, SECTION_WINXP, exif_get_tagname(ImageInfo.xp_fields.list[i].tag, NULL, 0, exif_get_tag_table(SECTION_WINXP) TSRMLS_CC), ImageInfo.xp_fields.list[i].value TSRMLS_CC);
}
if (ImageInfo.Thumbnail.size) {
if (read_thumbnail) {
/* not exif_iif_add_str : this is a buffer */
exif_iif_add_tag(&ImageInfo, SECTION_THUMBNAIL, "THUMBNAIL", TAG_NONE, TAG_FMT_UNDEFINED, ImageInfo.Thumbnail.size, ImageInfo.Thumbnail.data TSRMLS_CC);
}
if (!ImageInfo.Thumbnail.width || !ImageInfo.Thumbnail.height) {
/* try to evaluate if thumbnail data is present */
exif_scan_thumbnail(&ImageInfo TSRMLS_CC);
}
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Thumbnail.FileType", ImageInfo.Thumbnail.filetype TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Thumbnail.MimeType", (char*)php_image_type_to_mime_type(ImageInfo.Thumbnail.filetype) TSRMLS_CC);
}
if (ImageInfo.Thumbnail.width && ImageInfo.Thumbnail.height) {
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Thumbnail.Height", ImageInfo.Thumbnail.height TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Thumbnail.Width", ImageInfo.Thumbnail.width TSRMLS_CC);
}
EFREE_IF(sections_str);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Adding image infos");
#endif
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_FILE TSRMLS_CC);
add_assoc_image_info(return_value, 1, &ImageInfo, SECTION_COMPUTED TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_ANY_TAG TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_IFD0 TSRMLS_CC);
add_assoc_image_info(return_value, 1, &ImageInfo, SECTION_THUMBNAIL TSRMLS_CC);
add_assoc_image_info(return_value, 1, &ImageInfo, SECTION_COMMENT TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_EXIF TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_GPS TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_INTEROP TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_FPIX TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_APP12 TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_WINXP TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_MAKERNOTE TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Discarding info");
#endif
exif_discard_imageinfo(&ImageInfo);
#ifdef EXIF_DEBUG
php_error_docref1(NULL TSRMLS_CC, Z_STRVAL_PP(p_name), E_NOTICE, "done");
#endif
}
/* }}} */
/* {{{ proto string exif_thumbnail(string filename [, &width, &height [, &imagetype]])
Reads the embedded thumbnail */
PHP_FUNCTION(exif_thumbnail)
{
zval *p_width = 0, *p_height = 0, *p_imagetype = 0;
char *p_name;
int p_name_len, ret, arg_c = ZEND_NUM_ARGS();
image_info_type ImageInfo;
memset(&ImageInfo, 0, sizeof(ImageInfo));
if (arg_c!=1 && arg_c!=3 && arg_c!=4) {
WRONG_PARAM_COUNT;
}
if (zend_parse_parameters(arg_c TSRMLS_CC, "p|z/z/z/", &p_name, &p_name_len, &p_width, &p_height, &p_imagetype) == FAILURE) {
return;
}
ret = exif_read_file(&ImageInfo, p_name, 1, 0 TSRMLS_CC);
if (ret==FALSE) {
exif_discard_imageinfo(&ImageInfo);
RETURN_FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Thumbnail data %d %d %d, %d x %d", ImageInfo.Thumbnail.data, ImageInfo.Thumbnail.size, ImageInfo.Thumbnail.filetype, ImageInfo.Thumbnail.width, ImageInfo.Thumbnail.height);
#endif
if (!ImageInfo.Thumbnail.data || !ImageInfo.Thumbnail.size) {
exif_discard_imageinfo(&ImageInfo);
RETURN_FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Returning thumbnail(%d)", ImageInfo.Thumbnail.size);
#endif
ZVAL_STRINGL(return_value, ImageInfo.Thumbnail.data, ImageInfo.Thumbnail.size, 1);
if (arg_c >= 3) {
if (!ImageInfo.Thumbnail.width || !ImageInfo.Thumbnail.height) {
exif_scan_thumbnail(&ImageInfo TSRMLS_CC);
}
zval_dtor(p_width);
zval_dtor(p_height);
ZVAL_LONG(p_width, ImageInfo.Thumbnail.width);
ZVAL_LONG(p_height, ImageInfo.Thumbnail.height);
}
if (arg_c >= 4) {
zval_dtor(p_imagetype);
ZVAL_LONG(p_imagetype, ImageInfo.Thumbnail.filetype);
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Discarding info");
#endif
exif_discard_imageinfo(&ImageInfo);
#ifdef EXIF_DEBUG
php_error_docref1(NULL TSRMLS_CC, p_name, E_NOTICE, "Done");
#endif
}
/* }}} */
/* {{{ proto int exif_imagetype(string imagefile)
Get the type of an image */
PHP_FUNCTION(exif_imagetype)
{
char *imagefile;
int imagefile_len;
php_stream * stream;
int itype = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &imagefile, &imagefile_len) == FAILURE) {
return;
}
stream = php_stream_open_wrapper(imagefile, "rb", IGNORE_PATH|REPORT_ERRORS, NULL);
if (stream == NULL) {
RETURN_FALSE;
}
itype = php_getimagetype(stream, NULL TSRMLS_CC);
php_stream_close(stream);
if (itype == IMAGE_FILETYPE_UNKNOWN) {
RETURN_FALSE;
} else {
ZVAL_LONG(return_value, itype);
}
}
/* }}} */
#endif
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: sw=4 ts=4 tw=78 fdm=marker
* vim<600: sw=4 ts=4 tw=78
*/
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_4822_0 |
crossvul-cpp_data_good_5505_0 | /* inftrees.c -- generate Huffman trees for efficient decoding
* Copyright (C) 1995-2013 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include "zutil.h"
#include "inftrees.h"
#define MAXBITS 15
const char inflate_copyright[] =
" inflate 1.2.8.1 Copyright 1995-2013 Mark Adler ";
/*
If you use the zlib library in a product, an acknowledgment is welcome
in the documentation of your product. If for some reason you cannot
include such an acknowledgment, I would appreciate that you keep this
copyright string in the executable of your product.
*/
/*
Build a set of tables to decode the provided canonical Huffman code.
The code lengths are lens[0..codes-1]. The result starts at *table,
whose indices are 0..2^bits-1. work is a writable array of at least
lens shorts, which is used as a work area. type is the type of code
to be generated, CODES, LENS, or DISTS. On return, zero is success,
-1 is an invalid code, and +1 means that ENOUGH isn't enough. table
on return points to the next available entry's address. bits is the
requested root table index bits, and on return it is the actual root
table index bits. It will differ if the request is greater than the
longest code or if it is less than the shortest code.
*/
int ZLIB_INTERNAL inflate_table(type, lens, codes, table, bits, work)
codetype type;
unsigned short FAR *lens;
unsigned codes;
code FAR * FAR *table;
unsigned FAR *bits;
unsigned short FAR *work;
{
unsigned len; /* a code's length in bits */
unsigned sym; /* index of code symbols */
unsigned min, max; /* minimum and maximum code lengths */
unsigned root; /* number of index bits for root table */
unsigned curr; /* number of index bits for current table */
unsigned drop; /* code bits to drop for sub-table */
int left; /* number of prefix codes available */
unsigned used; /* code entries in table used */
unsigned huff; /* Huffman code */
unsigned incr; /* for incrementing code, index */
unsigned fill; /* index for replicating entries */
unsigned low; /* low bits for current root entry */
unsigned mask; /* mask for low root bits */
code here; /* table entry for duplication */
code FAR *next; /* next available space in table */
const unsigned short FAR *base; /* base value table to use */
const unsigned short FAR *extra; /* extra bits table to use */
unsigned match; /* use base and extra for symbol >= match */
unsigned short count[MAXBITS+1]; /* number of codes of each length */
unsigned short offs[MAXBITS+1]; /* offsets in table for each length */
static const unsigned short lbase[31] = { /* Length codes 257..285 base */
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const unsigned short lext[31] = { /* Length codes 257..285 extra */
16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 203, 198};
static const unsigned short dbase[32] = { /* Distance codes 0..29 base */
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
8193, 12289, 16385, 24577, 0, 0};
static const unsigned short dext[32] = { /* Distance codes 0..29 extra */
16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,
23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
28, 28, 29, 29, 64, 64};
/*
Process a set of code lengths to create a canonical Huffman code. The
code lengths are lens[0..codes-1]. Each length corresponds to the
symbols 0..codes-1. The Huffman code is generated by first sorting the
symbols by length from short to long, and retaining the symbol order
for codes with equal lengths. Then the code starts with all zero bits
for the first code of the shortest length, and the codes are integer
increments for the same length, and zeros are appended as the length
increases. For the deflate format, these bits are stored backwards
from their more natural integer increment ordering, and so when the
decoding tables are built in the large loop below, the integer codes
are incremented backwards.
This routine assumes, but does not check, that all of the entries in
lens[] are in the range 0..MAXBITS. The caller must assure this.
1..MAXBITS is interpreted as that code length. zero means that that
symbol does not occur in this code.
The codes are sorted by computing a count of codes for each length,
creating from that a table of starting indices for each length in the
sorted table, and then entering the symbols in order in the sorted
table. The sorted table is work[], with that space being provided by
the caller.
The length counts are used for other purposes as well, i.e. finding
the minimum and maximum length codes, determining if there are any
codes at all, checking for a valid set of lengths, and looking ahead
at length counts to determine sub-table sizes when building the
decoding tables.
*/
/* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */
for (len = 0; len <= MAXBITS; len++)
count[len] = 0;
for (sym = 0; sym < codes; sym++)
count[lens[sym]]++;
/* bound code lengths, force root to be within code lengths */
root = *bits;
for (max = MAXBITS; max >= 1; max--)
if (count[max] != 0) break;
if (root > max) root = max;
if (max == 0) { /* no symbols to code at all */
here.op = (unsigned char)64; /* invalid code marker */
here.bits = (unsigned char)1;
here.val = (unsigned short)0;
*(*table)++ = here; /* make a table to force an error */
*(*table)++ = here;
*bits = 1;
return 0; /* no symbols, but wait for decoding to report error */
}
for (min = 1; min < max; min++)
if (count[min] != 0) break;
if (root < min) root = min;
/* check for an over-subscribed or incomplete set of lengths */
left = 1;
for (len = 1; len <= MAXBITS; len++) {
left <<= 1;
left -= count[len];
if (left < 0) return -1; /* over-subscribed */
}
if (left > 0 && (type == CODES || max != 1))
return -1; /* incomplete set */
/* generate offsets into symbol table for each length for sorting */
offs[1] = 0;
for (len = 1; len < MAXBITS; len++)
offs[len + 1] = offs[len] + count[len];
/* sort symbols by length, by symbol order within each length */
for (sym = 0; sym < codes; sym++)
if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym;
/*
Create and fill in decoding tables. In this loop, the table being
filled is at next and has curr index bits. The code being used is huff
with length len. That code is converted to an index by dropping drop
bits off of the bottom. For codes where len is less than drop + curr,
those top drop + curr - len bits are incremented through all values to
fill the table with replicated entries.
root is the number of index bits for the root table. When len exceeds
root, sub-tables are created pointed to by the root entry with an index
of the low root bits of huff. This is saved in low to check for when a
new sub-table should be started. drop is zero when the root table is
being filled, and drop is root when sub-tables are being filled.
When a new sub-table is needed, it is necessary to look ahead in the
code lengths to determine what size sub-table is needed. The length
counts are used for this, and so count[] is decremented as codes are
entered in the tables.
used keeps track of how many table entries have been allocated from the
provided *table space. It is checked for LENS and DIST tables against
the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in
the initial root table size constants. See the comments in inftrees.h
for more information.
sym increments through all symbols, and the loop terminates when
all codes of length max, i.e. all codes, have been processed. This
routine permits incomplete codes, so another loop after this one fills
in the rest of the decoding tables with invalid code markers.
*/
/* set up for code type */
switch (type) {
case CODES:
base = extra = work; /* dummy value--not used */
match = 20;
break;
case LENS:
base = lbase;
extra = lext;
match = 257;
break;
default: /* DISTS */
base = dbase;
extra = dext;
match = 0;
}
/* initialize state for loop */
huff = 0; /* starting code */
sym = 0; /* starting code symbol */
len = min; /* starting code length */
next = *table; /* current table to fill in */
curr = root; /* current table index bits */
drop = 0; /* current bits to drop from code for index */
low = (unsigned)(-1); /* trigger new sub-table when len > root */
used = 1U << root; /* use root table entries */
mask = used - 1; /* mask for comparing low */
/* check available table space */
if ((type == LENS && used > ENOUGH_LENS) ||
(type == DISTS && used > ENOUGH_DISTS))
return 1;
/* process all codes and make table entries */
for (;;) {
/* create table entry */
here.bits = (unsigned char)(len - drop);
if (work[sym] + 1 < match) {
here.op = (unsigned char)0;
here.val = work[sym];
}
else if (work[sym] >= match) {
here.op = (unsigned char)(extra[work[sym] - match]);
here.val = base[work[sym] - match];
}
else {
here.op = (unsigned char)(32 + 64); /* end of block */
here.val = 0;
}
/* replicate for those indices with low len bits equal to huff */
incr = 1U << (len - drop);
fill = 1U << curr;
min = fill; /* save offset to next table */
do {
fill -= incr;
next[(huff >> drop) + fill] = here;
} while (fill != 0);
/* backwards increment the len-bit code huff */
incr = 1U << (len - 1);
while (huff & incr)
incr >>= 1;
if (incr != 0) {
huff &= incr - 1;
huff += incr;
}
else
huff = 0;
/* go to next symbol, update count, len */
sym++;
if (--(count[len]) == 0) {
if (len == max) break;
len = lens[work[sym]];
}
/* create new sub-table if needed */
if (len > root && (huff & mask) != low) {
/* if first time, transition to sub-tables */
if (drop == 0)
drop = root;
/* increment past last table */
next += min; /* here min is 1 << curr */
/* determine length of next table */
curr = len - drop;
left = (int)(1 << curr);
while (curr + drop < max) {
left -= count[curr + drop];
if (left <= 0) break;
curr++;
left <<= 1;
}
/* check for enough space */
used += 1U << curr;
if ((type == LENS && used > ENOUGH_LENS) ||
(type == DISTS && used > ENOUGH_DISTS))
return 1;
/* point entry in root table to sub-table */
low = huff & mask;
(*table)[low].op = (unsigned char)curr;
(*table)[low].bits = (unsigned char)root;
(*table)[low].val = (unsigned short)(next - *table);
}
}
/* fill in remaining table entry if code is incomplete (guaranteed to have
at most one remaining entry, since if the code is incomplete, the
maximum code length that was allowed to get this far is one bit) */
if (huff != 0) {
here.op = (unsigned char)64; /* invalid code marker */
here.bits = (unsigned char)(len - drop);
here.val = (unsigned short)0;
next[huff] = here;
}
/* set return parameters */
*table += used;
*bits = root;
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5505_0 |
crossvul-cpp_data_good_2124_0 | /*
* Linux Socket Filter - Kernel level socket filtering
*
* Based on the design of the Berkeley Packet Filter. The new
* internal format has been designed by PLUMgrid:
*
* Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
*
* Authors:
*
* Jay Schulist <jschlst@samba.org>
* Alexei Starovoitov <ast@plumgrid.com>
* Daniel Borkmann <dborkman@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Andi Kleen - Fix a few bad bugs and races.
* Kris Katterjohn - Added many additional checks in sk_chk_filter()
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_packet.h>
#include <linux/gfp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/netlink.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <linux/filter.h>
#include <linux/ratelimit.h>
#include <linux/seccomp.h>
#include <linux/if_vlan.h>
/* No hurry in this branch
*
* Exported for the bpf jit load helper.
*/
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
{
u8 *ptr = NULL;
if (k >= SKF_NET_OFF)
ptr = skb_network_header(skb) + k - SKF_NET_OFF;
else if (k >= SKF_LL_OFF)
ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
return ptr;
return NULL;
}
static inline void *load_pointer(const struct sk_buff *skb, int k,
unsigned int size, void *buffer)
{
if (k >= 0)
return skb_header_pointer(skb, k, size, buffer);
return bpf_internal_load_pointer_neg_helper(skb, k, size);
}
/**
* sk_filter - run a packet through a socket filter
* @sk: sock associated with &sk_buff
* @skb: buffer to filter
*
* Run the filter code and then cut skb->data to correct size returned by
* sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
* than pkt_len we keep whole skb->data. This is the socket level
* wrapper to sk_run_filter. It returns 0 if the packet should
* be accepted or -EPERM if the packet should be tossed.
*
*/
int sk_filter(struct sock *sk, struct sk_buff *skb)
{
int err;
struct sk_filter *filter;
/*
* If the skb was allocated from pfmemalloc reserves, only
* allow SOCK_MEMALLOC sockets to use it as this socket is
* helping free memory
*/
if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
return -ENOMEM;
err = security_sock_rcv_skb(sk, skb);
if (err)
return err;
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
if (filter) {
unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
}
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(sk_filter);
/* Base function for offset calculation. Needs to go into .text section,
* therefore keeping it non-static as well; will also be used by JITs
* anyway later on, so do not let the compiler omit it.
*/
noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
return 0;
}
/**
* __sk_run_filter - run a filter on a given context
* @ctx: buffer to run the filter on
* @insn: filter to apply
*
* Decode and apply filter instructions to the skb->data. Return length to
* keep, 0 for none. @ctx is the data we are operating on, @insn is the
* array of filter instructions.
*/
unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
{
u64 stack[MAX_BPF_STACK / sizeof(u64)];
u64 regs[MAX_BPF_REG], tmp;
void *ptr;
int off;
#define K insn->imm
#define A regs[insn->a_reg]
#define X regs[insn->x_reg]
#define R0 regs[0]
#define CONT ({insn++; goto select_insn; })
#define CONT_JMP ({insn++; goto select_insn; })
static const void *jumptable[256] = {
[0 ... 255] = &&default_label,
/* Now overwrite non-defaults ... */
#define DL(A, B, C) [A|B|C] = &&A##_##B##_##C
DL(BPF_ALU, BPF_ADD, BPF_X),
DL(BPF_ALU, BPF_ADD, BPF_K),
DL(BPF_ALU, BPF_SUB, BPF_X),
DL(BPF_ALU, BPF_SUB, BPF_K),
DL(BPF_ALU, BPF_AND, BPF_X),
DL(BPF_ALU, BPF_AND, BPF_K),
DL(BPF_ALU, BPF_OR, BPF_X),
DL(BPF_ALU, BPF_OR, BPF_K),
DL(BPF_ALU, BPF_LSH, BPF_X),
DL(BPF_ALU, BPF_LSH, BPF_K),
DL(BPF_ALU, BPF_RSH, BPF_X),
DL(BPF_ALU, BPF_RSH, BPF_K),
DL(BPF_ALU, BPF_XOR, BPF_X),
DL(BPF_ALU, BPF_XOR, BPF_K),
DL(BPF_ALU, BPF_MUL, BPF_X),
DL(BPF_ALU, BPF_MUL, BPF_K),
DL(BPF_ALU, BPF_MOV, BPF_X),
DL(BPF_ALU, BPF_MOV, BPF_K),
DL(BPF_ALU, BPF_DIV, BPF_X),
DL(BPF_ALU, BPF_DIV, BPF_K),
DL(BPF_ALU, BPF_MOD, BPF_X),
DL(BPF_ALU, BPF_MOD, BPF_K),
DL(BPF_ALU, BPF_NEG, 0),
DL(BPF_ALU, BPF_END, BPF_TO_BE),
DL(BPF_ALU, BPF_END, BPF_TO_LE),
DL(BPF_ALU64, BPF_ADD, BPF_X),
DL(BPF_ALU64, BPF_ADD, BPF_K),
DL(BPF_ALU64, BPF_SUB, BPF_X),
DL(BPF_ALU64, BPF_SUB, BPF_K),
DL(BPF_ALU64, BPF_AND, BPF_X),
DL(BPF_ALU64, BPF_AND, BPF_K),
DL(BPF_ALU64, BPF_OR, BPF_X),
DL(BPF_ALU64, BPF_OR, BPF_K),
DL(BPF_ALU64, BPF_LSH, BPF_X),
DL(BPF_ALU64, BPF_LSH, BPF_K),
DL(BPF_ALU64, BPF_RSH, BPF_X),
DL(BPF_ALU64, BPF_RSH, BPF_K),
DL(BPF_ALU64, BPF_XOR, BPF_X),
DL(BPF_ALU64, BPF_XOR, BPF_K),
DL(BPF_ALU64, BPF_MUL, BPF_X),
DL(BPF_ALU64, BPF_MUL, BPF_K),
DL(BPF_ALU64, BPF_MOV, BPF_X),
DL(BPF_ALU64, BPF_MOV, BPF_K),
DL(BPF_ALU64, BPF_ARSH, BPF_X),
DL(BPF_ALU64, BPF_ARSH, BPF_K),
DL(BPF_ALU64, BPF_DIV, BPF_X),
DL(BPF_ALU64, BPF_DIV, BPF_K),
DL(BPF_ALU64, BPF_MOD, BPF_X),
DL(BPF_ALU64, BPF_MOD, BPF_K),
DL(BPF_ALU64, BPF_NEG, 0),
DL(BPF_JMP, BPF_CALL, 0),
DL(BPF_JMP, BPF_JA, 0),
DL(BPF_JMP, BPF_JEQ, BPF_X),
DL(BPF_JMP, BPF_JEQ, BPF_K),
DL(BPF_JMP, BPF_JNE, BPF_X),
DL(BPF_JMP, BPF_JNE, BPF_K),
DL(BPF_JMP, BPF_JGT, BPF_X),
DL(BPF_JMP, BPF_JGT, BPF_K),
DL(BPF_JMP, BPF_JGE, BPF_X),
DL(BPF_JMP, BPF_JGE, BPF_K),
DL(BPF_JMP, BPF_JSGT, BPF_X),
DL(BPF_JMP, BPF_JSGT, BPF_K),
DL(BPF_JMP, BPF_JSGE, BPF_X),
DL(BPF_JMP, BPF_JSGE, BPF_K),
DL(BPF_JMP, BPF_JSET, BPF_X),
DL(BPF_JMP, BPF_JSET, BPF_K),
DL(BPF_JMP, BPF_EXIT, 0),
DL(BPF_STX, BPF_MEM, BPF_B),
DL(BPF_STX, BPF_MEM, BPF_H),
DL(BPF_STX, BPF_MEM, BPF_W),
DL(BPF_STX, BPF_MEM, BPF_DW),
DL(BPF_STX, BPF_XADD, BPF_W),
DL(BPF_STX, BPF_XADD, BPF_DW),
DL(BPF_ST, BPF_MEM, BPF_B),
DL(BPF_ST, BPF_MEM, BPF_H),
DL(BPF_ST, BPF_MEM, BPF_W),
DL(BPF_ST, BPF_MEM, BPF_DW),
DL(BPF_LDX, BPF_MEM, BPF_B),
DL(BPF_LDX, BPF_MEM, BPF_H),
DL(BPF_LDX, BPF_MEM, BPF_W),
DL(BPF_LDX, BPF_MEM, BPF_DW),
DL(BPF_LD, BPF_ABS, BPF_W),
DL(BPF_LD, BPF_ABS, BPF_H),
DL(BPF_LD, BPF_ABS, BPF_B),
DL(BPF_LD, BPF_IND, BPF_W),
DL(BPF_LD, BPF_IND, BPF_H),
DL(BPF_LD, BPF_IND, BPF_B),
#undef DL
};
regs[FP_REG] = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
regs[ARG1_REG] = (u64) (unsigned long) ctx;
select_insn:
goto *jumptable[insn->code];
/* ALU */
#define ALU(OPCODE, OP) \
BPF_ALU64_##OPCODE##_BPF_X: \
A = A OP X; \
CONT; \
BPF_ALU_##OPCODE##_BPF_X: \
A = (u32) A OP (u32) X; \
CONT; \
BPF_ALU64_##OPCODE##_BPF_K: \
A = A OP K; \
CONT; \
BPF_ALU_##OPCODE##_BPF_K: \
A = (u32) A OP (u32) K; \
CONT;
ALU(BPF_ADD, +)
ALU(BPF_SUB, -)
ALU(BPF_AND, &)
ALU(BPF_OR, |)
ALU(BPF_LSH, <<)
ALU(BPF_RSH, >>)
ALU(BPF_XOR, ^)
ALU(BPF_MUL, *)
#undef ALU
BPF_ALU_BPF_NEG_0:
A = (u32) -A;
CONT;
BPF_ALU64_BPF_NEG_0:
A = -A;
CONT;
BPF_ALU_BPF_MOV_BPF_X:
A = (u32) X;
CONT;
BPF_ALU_BPF_MOV_BPF_K:
A = (u32) K;
CONT;
BPF_ALU64_BPF_MOV_BPF_X:
A = X;
CONT;
BPF_ALU64_BPF_MOV_BPF_K:
A = K;
CONT;
BPF_ALU64_BPF_ARSH_BPF_X:
(*(s64 *) &A) >>= X;
CONT;
BPF_ALU64_BPF_ARSH_BPF_K:
(*(s64 *) &A) >>= K;
CONT;
BPF_ALU64_BPF_MOD_BPF_X:
if (unlikely(X == 0))
return 0;
tmp = A;
A = do_div(tmp, X);
CONT;
BPF_ALU_BPF_MOD_BPF_X:
if (unlikely(X == 0))
return 0;
tmp = (u32) A;
A = do_div(tmp, (u32) X);
CONT;
BPF_ALU64_BPF_MOD_BPF_K:
tmp = A;
A = do_div(tmp, K);
CONT;
BPF_ALU_BPF_MOD_BPF_K:
tmp = (u32) A;
A = do_div(tmp, (u32) K);
CONT;
BPF_ALU64_BPF_DIV_BPF_X:
if (unlikely(X == 0))
return 0;
do_div(A, X);
CONT;
BPF_ALU_BPF_DIV_BPF_X:
if (unlikely(X == 0))
return 0;
tmp = (u32) A;
do_div(tmp, (u32) X);
A = (u32) tmp;
CONT;
BPF_ALU64_BPF_DIV_BPF_K:
do_div(A, K);
CONT;
BPF_ALU_BPF_DIV_BPF_K:
tmp = (u32) A;
do_div(tmp, (u32) K);
A = (u32) tmp;
CONT;
BPF_ALU_BPF_END_BPF_TO_BE:
switch (K) {
case 16:
A = (__force u16) cpu_to_be16(A);
break;
case 32:
A = (__force u32) cpu_to_be32(A);
break;
case 64:
A = (__force u64) cpu_to_be64(A);
break;
}
CONT;
BPF_ALU_BPF_END_BPF_TO_LE:
switch (K) {
case 16:
A = (__force u16) cpu_to_le16(A);
break;
case 32:
A = (__force u32) cpu_to_le32(A);
break;
case 64:
A = (__force u64) cpu_to_le64(A);
break;
}
CONT;
/* CALL */
BPF_JMP_BPF_CALL_0:
/* Function call scratches R1-R5 registers, preserves R6-R9,
* and stores return value into R0.
*/
R0 = (__bpf_call_base + insn->imm)(regs[1], regs[2], regs[3],
regs[4], regs[5]);
CONT;
/* JMP */
BPF_JMP_BPF_JA_0:
insn += insn->off;
CONT;
BPF_JMP_BPF_JEQ_BPF_X:
if (A == X) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JEQ_BPF_K:
if (A == K) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JNE_BPF_X:
if (A != X) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JNE_BPF_K:
if (A != K) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JGT_BPF_X:
if (A > X) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JGT_BPF_K:
if (A > K) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JGE_BPF_X:
if (A >= X) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JGE_BPF_K:
if (A >= K) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JSGT_BPF_X:
if (((s64)A) > ((s64)X)) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JSGT_BPF_K:
if (((s64)A) > ((s64)K)) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JSGE_BPF_X:
if (((s64)A) >= ((s64)X)) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JSGE_BPF_K:
if (((s64)A) >= ((s64)K)) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JSET_BPF_X:
if (A & X) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JSET_BPF_K:
if (A & K) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_EXIT_0:
return R0;
/* STX and ST and LDX*/
#define LDST(SIZEOP, SIZE) \
BPF_STX_BPF_MEM_##SIZEOP: \
*(SIZE *)(unsigned long) (A + insn->off) = X; \
CONT; \
BPF_ST_BPF_MEM_##SIZEOP: \
*(SIZE *)(unsigned long) (A + insn->off) = K; \
CONT; \
BPF_LDX_BPF_MEM_##SIZEOP: \
A = *(SIZE *)(unsigned long) (X + insn->off); \
CONT;
LDST(BPF_B, u8)
LDST(BPF_H, u16)
LDST(BPF_W, u32)
LDST(BPF_DW, u64)
#undef LDST
BPF_STX_BPF_XADD_BPF_W: /* lock xadd *(u32 *)(A + insn->off) += X */
atomic_add((u32) X, (atomic_t *)(unsigned long)
(A + insn->off));
CONT;
BPF_STX_BPF_XADD_BPF_DW: /* lock xadd *(u64 *)(A + insn->off) += X */
atomic64_add((u64) X, (atomic64_t *)(unsigned long)
(A + insn->off));
CONT;
BPF_LD_BPF_ABS_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */
off = K;
load_word:
/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
* appearing in the programs where ctx == skb. All programs
* keep 'ctx' in regs[CTX_REG] == R6, sk_convert_filter()
* saves it in R6, internal BPF verifier will check that
* R6 == ctx.
*
* BPF_ABS and BPF_IND are wrappers of function calls, so
* they scratch R1-R5 registers, preserve R6-R9, and store
* return value into R0.
*
* Implicit input:
* ctx
*
* Explicit input:
* X == any register
* K == 32-bit immediate
*
* Output:
* R0 - 8/16/32-bit skb data converted to cpu endianness
*/
ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp);
if (likely(ptr != NULL)) {
R0 = get_unaligned_be32(ptr);
CONT;
}
return 0;
BPF_LD_BPF_ABS_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */
off = K;
load_half:
ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp);
if (likely(ptr != NULL)) {
R0 = get_unaligned_be16(ptr);
CONT;
}
return 0;
BPF_LD_BPF_ABS_BPF_B: /* R0 = *(u8 *) (ctx + K) */
off = K;
load_byte:
ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp);
if (likely(ptr != NULL)) {
R0 = *(u8 *)ptr;
CONT;
}
return 0;
BPF_LD_BPF_IND_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */
off = K + X;
goto load_word;
BPF_LD_BPF_IND_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */
off = K + X;
goto load_half;
BPF_LD_BPF_IND_BPF_B: /* R0 = *(u8 *) (skb->data + X + K) */
off = K + X;
goto load_byte;
default_label:
/* If we ever reach this, we have a bug somewhere. */
WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
return 0;
#undef CONT_JMP
#undef CONT
#undef R0
#undef X
#undef A
#undef K
}
u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
const struct sock_filter_int *insni)
__attribute__ ((alias ("__sk_run_filter")));
u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
const struct sock_filter_int *insni)
__attribute__ ((alias ("__sk_run_filter")));
EXPORT_SYMBOL_GPL(sk_run_filter_int_skb);
/* Helper to find the offset of pkt_type in sk_buff structure. We want
* to make sure its still a 3bit field starting at a byte boundary;
* taken from arch/x86/net/bpf_jit_comp.c.
*/
#define PKT_TYPE_MAX 7
static unsigned int pkt_type_offset(void)
{
struct sk_buff skb_probe = { .pkt_type = ~0, };
u8 *ct = (u8 *) &skb_probe;
unsigned int off;
for (off = 0; off < sizeof(struct sk_buff); off++) {
if (ct[off] == PKT_TYPE_MAX)
return off;
}
pr_err_once("Please fix %s, as pkt_type couldn't be found!\n", __func__);
return -1;
}
static u64 __skb_get_pay_offset(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *)(long) ctx;
return __skb_get_poff(skb);
}
static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *)(long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
if (skb->len < sizeof(struct nlattr))
return 0;
if (A > skb->len - sizeof(struct nlattr))
return 0;
nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X);
if (nla)
return (void *) nla - (void *) skb->data;
return 0;
}
static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *)(long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
if (skb->len < sizeof(struct nlattr))
return 0;
if (A > skb->len - sizeof(struct nlattr))
return 0;
nla = (struct nlattr *) &skb->data[A];
if (nla->nla_len > skb->len - A)
return 0;
nla = nla_find_nested(nla, X);
if (nla)
return (void *) nla - (void *) skb->data;
return 0;
}
static u64 __get_raw_cpu_id(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
{
return raw_smp_processor_id();
}
/* Register mappings for user programs. */
#define A_REG 0
#define X_REG 7
#define TMP_REG 8
#define ARG2_REG 2
#define ARG3_REG 3
static bool convert_bpf_extensions(struct sock_filter *fp,
struct sock_filter_int **insnp)
{
struct sock_filter_int *insn = *insnp;
switch (fp->k) {
case SKF_AD_OFF + SKF_AD_PROTOCOL:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
insn->code = BPF_LDX | BPF_MEM | BPF_H;
insn->a_reg = A_REG;
insn->x_reg = CTX_REG;
insn->off = offsetof(struct sk_buff, protocol);
insn++;
/* A = ntohs(A) [emitting a nop or swap16] */
insn->code = BPF_ALU | BPF_END | BPF_FROM_BE;
insn->a_reg = A_REG;
insn->imm = 16;
break;
case SKF_AD_OFF + SKF_AD_PKTTYPE:
insn->code = BPF_LDX | BPF_MEM | BPF_B;
insn->a_reg = A_REG;
insn->x_reg = CTX_REG;
insn->off = pkt_type_offset();
if (insn->off < 0)
return false;
insn++;
insn->code = BPF_ALU | BPF_AND | BPF_K;
insn->a_reg = A_REG;
insn->imm = PKT_TYPE_MAX;
break;
case SKF_AD_OFF + SKF_AD_IFINDEX:
case SKF_AD_OFF + SKF_AD_HATYPE:
if (FIELD_SIZEOF(struct sk_buff, dev) == 8)
insn->code = BPF_LDX | BPF_MEM | BPF_DW;
else
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = TMP_REG;
insn->x_reg = CTX_REG;
insn->off = offsetof(struct sk_buff, dev);
insn++;
insn->code = BPF_JMP | BPF_JNE | BPF_K;
insn->a_reg = TMP_REG;
insn->imm = 0;
insn->off = 1;
insn++;
insn->code = BPF_JMP | BPF_EXIT;
insn++;
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
insn->a_reg = A_REG;
insn->x_reg = TMP_REG;
if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) {
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->off = offsetof(struct net_device, ifindex);
} else {
insn->code = BPF_LDX | BPF_MEM | BPF_H;
insn->off = offsetof(struct net_device, type);
}
break;
case SKF_AD_OFF + SKF_AD_MARK:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = A_REG;
insn->x_reg = CTX_REG;
insn->off = offsetof(struct sk_buff, mark);
break;
case SKF_AD_OFF + SKF_AD_RXHASH:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = A_REG;
insn->x_reg = CTX_REG;
insn->off = offsetof(struct sk_buff, hash);
break;
case SKF_AD_OFF + SKF_AD_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
insn->code = BPF_LDX | BPF_MEM | BPF_H;
insn->a_reg = A_REG;
insn->x_reg = CTX_REG;
insn->off = offsetof(struct sk_buff, queue_mapping);
break;
case SKF_AD_OFF + SKF_AD_VLAN_TAG:
case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
insn->code = BPF_LDX | BPF_MEM | BPF_H;
insn->a_reg = A_REG;
insn->x_reg = CTX_REG;
insn->off = offsetof(struct sk_buff, vlan_tci);
insn++;
BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
insn->code = BPF_ALU | BPF_AND | BPF_K;
insn->a_reg = A_REG;
insn->imm = ~VLAN_TAG_PRESENT;
} else {
insn->code = BPF_ALU | BPF_RSH | BPF_K;
insn->a_reg = A_REG;
insn->imm = 12;
insn++;
insn->code = BPF_ALU | BPF_AND | BPF_K;
insn->a_reg = A_REG;
insn->imm = 1;
}
break;
case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
case SKF_AD_OFF + SKF_AD_NLATTR:
case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
case SKF_AD_OFF + SKF_AD_CPU:
/* arg1 = ctx */
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = ARG1_REG;
insn->x_reg = CTX_REG;
insn++;
/* arg2 = A */
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = ARG2_REG;
insn->x_reg = A_REG;
insn++;
/* arg3 = X */
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = ARG3_REG;
insn->x_reg = X_REG;
insn++;
/* Emit call(ctx, arg2=A, arg3=X) */
insn->code = BPF_JMP | BPF_CALL;
switch (fp->k) {
case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
insn->imm = __skb_get_pay_offset - __bpf_call_base;
break;
case SKF_AD_OFF + SKF_AD_NLATTR:
insn->imm = __skb_get_nlattr - __bpf_call_base;
break;
case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
insn->imm = __skb_get_nlattr_nest - __bpf_call_base;
break;
case SKF_AD_OFF + SKF_AD_CPU:
insn->imm = __get_raw_cpu_id - __bpf_call_base;
break;
}
break;
case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
insn->code = BPF_ALU | BPF_XOR | BPF_X;
insn->a_reg = A_REG;
insn->x_reg = X_REG;
break;
default:
/* This is just a dummy call to avoid letting the compiler
* evict __bpf_call_base() as an optimization. Placed here
* where no-one bothers.
*/
BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
return false;
}
*insnp = insn;
return true;
}
/**
* sk_convert_filter - convert filter program
* @prog: the user passed filter program
* @len: the length of the user passed filter program
* @new_prog: buffer where converted program will be stored
* @new_len: pointer to store length of converted program
*
* Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
* Conversion workflow:
*
* 1) First pass for calculating the new program length:
* sk_convert_filter(old_prog, old_len, NULL, &new_len)
*
* 2) 2nd pass to remap in two passes: 1st pass finds new
* jump offsets, 2nd pass remapping:
* new_prog = kmalloc(sizeof(struct sock_filter_int) * new_len);
* sk_convert_filter(old_prog, old_len, new_prog, &new_len);
*
* User BPF's register A is mapped to our BPF register 6, user BPF
* register X is mapped to BPF register 7; frame pointer is always
* register 10; Context 'void *ctx' is stored in register 1, that is,
* for socket filters: ctx == 'struct sk_buff *', for seccomp:
* ctx == 'struct seccomp_data *'.
*/
int sk_convert_filter(struct sock_filter *prog, int len,
struct sock_filter_int *new_prog, int *new_len)
{
int new_flen = 0, pass = 0, target, i;
struct sock_filter_int *new_insn;
struct sock_filter *fp;
int *addrs = NULL;
u8 bpf_src;
BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
BUILD_BUG_ON(FP_REG + 1 != MAX_BPF_REG);
if (len <= 0 || len >= BPF_MAXINSNS)
return -EINVAL;
if (new_prog) {
addrs = kzalloc(len * sizeof(*addrs), GFP_KERNEL);
if (!addrs)
return -ENOMEM;
}
do_pass:
new_insn = new_prog;
fp = prog;
if (new_insn) {
new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
new_insn->a_reg = CTX_REG;
new_insn->x_reg = ARG1_REG;
}
new_insn++;
for (i = 0; i < len; fp++, i++) {
struct sock_filter_int tmp_insns[6] = { };
struct sock_filter_int *insn = tmp_insns;
if (addrs)
addrs[i] = new_insn - new_prog;
switch (fp->code) {
/* All arithmetic insns and skb loads map as-is. */
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU | BPF_NEG:
case BPF_LD | BPF_ABS | BPF_W:
case BPF_LD | BPF_ABS | BPF_H:
case BPF_LD | BPF_ABS | BPF_B:
case BPF_LD | BPF_IND | BPF_W:
case BPF_LD | BPF_IND | BPF_H:
case BPF_LD | BPF_IND | BPF_B:
/* Check for overloaded BPF extension and
* directly convert it if found, otherwise
* just move on with mapping.
*/
if (BPF_CLASS(fp->code) == BPF_LD &&
BPF_MODE(fp->code) == BPF_ABS &&
convert_bpf_extensions(fp, &insn))
break;
insn->code = fp->code;
insn->a_reg = A_REG;
insn->x_reg = X_REG;
insn->imm = fp->k;
break;
/* Jump opcodes map as-is, but offsets need adjustment. */
case BPF_JMP | BPF_JA:
target = i + fp->k + 1;
insn->code = fp->code;
#define EMIT_JMP \
do { \
if (target >= len || target < 0) \
goto err; \
insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
/* Adjust pc relative offset for 2nd or 3rd insn. */ \
insn->off -= insn - tmp_insns; \
} while (0)
EMIT_JMP;
break;
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X:
if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
/* BPF immediates are signed, zero extend
* immediate into tmp register and use it
* in compare insn.
*/
insn->code = BPF_ALU | BPF_MOV | BPF_K;
insn->a_reg = TMP_REG;
insn->imm = fp->k;
insn++;
insn->a_reg = A_REG;
insn->x_reg = TMP_REG;
bpf_src = BPF_X;
} else {
insn->a_reg = A_REG;
insn->x_reg = X_REG;
insn->imm = fp->k;
bpf_src = BPF_SRC(fp->code);
}
/* Common case where 'jump_false' is next insn. */
if (fp->jf == 0) {
insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
target = i + fp->jt + 1;
EMIT_JMP;
break;
}
/* Convert JEQ into JNE when 'jump_true' is next insn. */
if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
insn->code = BPF_JMP | BPF_JNE | bpf_src;
target = i + fp->jf + 1;
EMIT_JMP;
break;
}
/* Other jumps are mapped into two insns: Jxx and JA. */
target = i + fp->jt + 1;
insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
EMIT_JMP;
insn++;
insn->code = BPF_JMP | BPF_JA;
target = i + fp->jf + 1;
EMIT_JMP;
break;
/* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
case BPF_LDX | BPF_MSH | BPF_B:
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = TMP_REG;
insn->x_reg = A_REG;
insn++;
insn->code = BPF_LD | BPF_ABS | BPF_B;
insn->a_reg = A_REG;
insn->imm = fp->k;
insn++;
insn->code = BPF_ALU | BPF_AND | BPF_K;
insn->a_reg = A_REG;
insn->imm = 0xf;
insn++;
insn->code = BPF_ALU | BPF_LSH | BPF_K;
insn->a_reg = A_REG;
insn->imm = 2;
insn++;
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = X_REG;
insn->x_reg = A_REG;
insn++;
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = A_REG;
insn->x_reg = TMP_REG;
break;
/* RET_K, RET_A are remaped into 2 insns. */
case BPF_RET | BPF_A:
case BPF_RET | BPF_K:
insn->code = BPF_ALU | BPF_MOV |
(BPF_RVAL(fp->code) == BPF_K ?
BPF_K : BPF_X);
insn->a_reg = 0;
insn->x_reg = A_REG;
insn->imm = fp->k;
insn++;
insn->code = BPF_JMP | BPF_EXIT;
break;
/* Store to stack. */
case BPF_ST:
case BPF_STX:
insn->code = BPF_STX | BPF_MEM | BPF_W;
insn->a_reg = FP_REG;
insn->x_reg = fp->code == BPF_ST ? A_REG : X_REG;
insn->off = -(BPF_MEMWORDS - fp->k) * 4;
break;
/* Load from stack. */
case BPF_LD | BPF_MEM:
case BPF_LDX | BPF_MEM:
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
A_REG : X_REG;
insn->x_reg = FP_REG;
insn->off = -(BPF_MEMWORDS - fp->k) * 4;
break;
/* A = K or X = K */
case BPF_LD | BPF_IMM:
case BPF_LDX | BPF_IMM:
insn->code = BPF_ALU | BPF_MOV | BPF_K;
insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
A_REG : X_REG;
insn->imm = fp->k;
break;
/* X = A */
case BPF_MISC | BPF_TAX:
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = X_REG;
insn->x_reg = A_REG;
break;
/* A = X */
case BPF_MISC | BPF_TXA:
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = A_REG;
insn->x_reg = X_REG;
break;
/* A = skb->len or X = skb->len */
case BPF_LD | BPF_W | BPF_LEN:
case BPF_LDX | BPF_W | BPF_LEN:
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
A_REG : X_REG;
insn->x_reg = CTX_REG;
insn->off = offsetof(struct sk_buff, len);
break;
/* access seccomp_data fields */
case BPF_LDX | BPF_ABS | BPF_W:
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = A_REG;
insn->x_reg = CTX_REG;
insn->off = fp->k;
break;
default:
goto err;
}
insn++;
if (new_prog)
memcpy(new_insn, tmp_insns,
sizeof(*insn) * (insn - tmp_insns));
new_insn += insn - tmp_insns;
}
if (!new_prog) {
/* Only calculating new length. */
*new_len = new_insn - new_prog;
return 0;
}
pass++;
if (new_flen != new_insn - new_prog) {
new_flen = new_insn - new_prog;
if (pass > 2)
goto err;
goto do_pass;
}
kfree(addrs);
BUG_ON(*new_len != new_flen);
return 0;
err:
kfree(addrs);
return -EINVAL;
}
/* Security:
*
* A BPF program is able to use 16 cells of memory to store intermediate
* values (check u32 mem[BPF_MEMWORDS] in sk_run_filter()).
*
* As we dont want to clear mem[] array for each packet going through
* sk_run_filter(), we check that filter loaded by user never try to read
* a cell if not previously written, and we check all branches to be sure
* a malicious user doesn't try to abuse us.
*/
static int check_load_and_stores(struct sock_filter *filter, int flen)
{
u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
int pc, ret = 0;
BUILD_BUG_ON(BPF_MEMWORDS > 16);
masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
if (!masks)
return -ENOMEM;
memset(masks, 0xff, flen * sizeof(*masks));
for (pc = 0; pc < flen; pc++) {
memvalid &= masks[pc];
switch (filter[pc].code) {
case BPF_S_ST:
case BPF_S_STX:
memvalid |= (1 << filter[pc].k);
break;
case BPF_S_LD_MEM:
case BPF_S_LDX_MEM:
if (!(memvalid & (1 << filter[pc].k))) {
ret = -EINVAL;
goto error;
}
break;
case BPF_S_JMP_JA:
/* a jump must set masks on target */
masks[pc + 1 + filter[pc].k] &= memvalid;
memvalid = ~0;
break;
case BPF_S_JMP_JEQ_K:
case BPF_S_JMP_JEQ_X:
case BPF_S_JMP_JGE_K:
case BPF_S_JMP_JGE_X:
case BPF_S_JMP_JGT_K:
case BPF_S_JMP_JGT_X:
case BPF_S_JMP_JSET_X:
case BPF_S_JMP_JSET_K:
/* a jump must set masks on targets */
masks[pc + 1 + filter[pc].jt] &= memvalid;
masks[pc + 1 + filter[pc].jf] &= memvalid;
memvalid = ~0;
break;
}
}
error:
kfree(masks);
return ret;
}
/**
* sk_chk_filter - verify socket filter code
* @filter: filter to verify
* @flen: length of filter
*
* Check the user's filter code. If we let some ugly
* filter code slip through kaboom! The filter must contain
* no references or jumps that are out of range, no illegal
* instructions, and must end with a RET instruction.
*
* All jumps are forward as they are not signed.
*
* Returns 0 if the rule set is legal or -EINVAL if not.
*/
int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
{
/*
* Valid instructions are initialized to non-0.
* Invalid instructions are initialized to 0.
*/
static const u8 codes[] = {
[BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
[BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
[BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
[BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
[BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
[BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
[BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
[BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
[BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
[BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
[BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
[BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
[BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
[BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
[BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
[BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
[BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
[BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
[BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
[BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
[BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
[BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
[BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
[BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
[BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
[BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
[BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
[BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
[BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
[BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
[BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
[BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
[BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
[BPF_RET|BPF_K] = BPF_S_RET_K,
[BPF_RET|BPF_A] = BPF_S_RET_A,
[BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
[BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
[BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
[BPF_ST] = BPF_S_ST,
[BPF_STX] = BPF_S_STX,
[BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
[BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
[BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
[BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
[BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
[BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
[BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
[BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
[BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
};
int pc;
bool anc_found;
if (flen == 0 || flen > BPF_MAXINSNS)
return -EINVAL;
/* check the filter code now */
for (pc = 0; pc < flen; pc++) {
struct sock_filter *ftest = &filter[pc];
u16 code = ftest->code;
if (code >= ARRAY_SIZE(codes))
return -EINVAL;
code = codes[code];
if (!code)
return -EINVAL;
/* Some instructions need special checks */
switch (code) {
case BPF_S_ALU_DIV_K:
case BPF_S_ALU_MOD_K:
/* check for division by zero */
if (ftest->k == 0)
return -EINVAL;
break;
case BPF_S_LD_MEM:
case BPF_S_LDX_MEM:
case BPF_S_ST:
case BPF_S_STX:
/* check for invalid memory addresses */
if (ftest->k >= BPF_MEMWORDS)
return -EINVAL;
break;
case BPF_S_JMP_JA:
/*
* Note, the large ftest->k might cause loops.
* Compare this with conditional jumps below,
* where offsets are limited. --ANK (981016)
*/
if (ftest->k >= (unsigned int)(flen-pc-1))
return -EINVAL;
break;
case BPF_S_JMP_JEQ_K:
case BPF_S_JMP_JEQ_X:
case BPF_S_JMP_JGE_K:
case BPF_S_JMP_JGE_X:
case BPF_S_JMP_JGT_K:
case BPF_S_JMP_JGT_X:
case BPF_S_JMP_JSET_X:
case BPF_S_JMP_JSET_K:
/* for conditionals both must be safe */
if (pc + ftest->jt + 1 >= flen ||
pc + ftest->jf + 1 >= flen)
return -EINVAL;
break;
case BPF_S_LD_W_ABS:
case BPF_S_LD_H_ABS:
case BPF_S_LD_B_ABS:
anc_found = false;
#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
code = BPF_S_ANC_##CODE; \
anc_found = true; \
break
switch (ftest->k) {
ANCILLARY(PROTOCOL);
ANCILLARY(PKTTYPE);
ANCILLARY(IFINDEX);
ANCILLARY(NLATTR);
ANCILLARY(NLATTR_NEST);
ANCILLARY(MARK);
ANCILLARY(QUEUE);
ANCILLARY(HATYPE);
ANCILLARY(RXHASH);
ANCILLARY(CPU);
ANCILLARY(ALU_XOR_X);
ANCILLARY(VLAN_TAG);
ANCILLARY(VLAN_TAG_PRESENT);
ANCILLARY(PAY_OFFSET);
}
/* ancillary operation unknown or unsupported */
if (anc_found == false && ftest->k >= SKF_AD_OFF)
return -EINVAL;
}
ftest->code = code;
}
/* last instruction must be a RET code */
switch (filter[flen - 1].code) {
case BPF_S_RET_K:
case BPF_S_RET_A:
return check_load_and_stores(filter, flen);
}
return -EINVAL;
}
EXPORT_SYMBOL(sk_chk_filter);
static int sk_store_orig_filter(struct sk_filter *fp,
const struct sock_fprog *fprog)
{
unsigned int fsize = sk_filter_proglen(fprog);
struct sock_fprog_kern *fkprog;
fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
if (!fp->orig_prog)
return -ENOMEM;
fkprog = fp->orig_prog;
fkprog->len = fprog->len;
fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
if (!fkprog->filter) {
kfree(fp->orig_prog);
return -ENOMEM;
}
return 0;
}
static void sk_release_orig_filter(struct sk_filter *fp)
{
struct sock_fprog_kern *fprog = fp->orig_prog;
if (fprog) {
kfree(fprog->filter);
kfree(fprog);
}
}
/**
* sk_filter_release_rcu - Release a socket filter by rcu_head
* @rcu: rcu_head that contains the sk_filter to free
*/
static void sk_filter_release_rcu(struct rcu_head *rcu)
{
struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
sk_release_orig_filter(fp);
bpf_jit_free(fp);
}
/**
* sk_filter_release - release a socket filter
* @fp: filter to remove
*
* Remove a filter from a socket and release its resources.
*/
static void sk_filter_release(struct sk_filter *fp)
{
if (atomic_dec_and_test(&fp->refcnt))
call_rcu(&fp->rcu, sk_filter_release_rcu);
}
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
{
atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
sk_filter_release(fp);
}
void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{
atomic_inc(&fp->refcnt);
atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
}
static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
struct sock *sk,
unsigned int len)
{
struct sk_filter *fp_new;
if (sk == NULL)
return krealloc(fp, len, GFP_KERNEL);
fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
if (fp_new) {
memcpy(fp_new, fp, sizeof(struct sk_filter));
/* As we're kepping orig_prog in fp_new along,
* we need to make sure we're not evicting it
* from the old fp.
*/
fp->orig_prog = NULL;
sk_filter_uncharge(sk, fp);
}
return fp_new;
}
static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
struct sock *sk)
{
struct sock_filter *old_prog;
struct sk_filter *old_fp;
int i, err, new_len, old_len = fp->len;
/* We are free to overwrite insns et al right here as it
* won't be used at this point in time anymore internally
* after the migration to the internal BPF instruction
* representation.
*/
BUILD_BUG_ON(sizeof(struct sock_filter) !=
sizeof(struct sock_filter_int));
/* For now, we need to unfiddle BPF_S_* identifiers in place.
* This can sooner or later on be subject to removal, e.g. when
* JITs have been converted.
*/
for (i = 0; i < fp->len; i++)
sk_decode_filter(&fp->insns[i], &fp->insns[i]);
/* Conversion cannot happen on overlapping memory areas,
* so we need to keep the user BPF around until the 2nd
* pass. At this time, the user BPF is stored in fp->insns.
*/
old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
GFP_KERNEL);
if (!old_prog) {
err = -ENOMEM;
goto out_err;
}
/* 1st pass: calculate the new program length. */
err = sk_convert_filter(old_prog, old_len, NULL, &new_len);
if (err)
goto out_err_free;
/* Expand fp for appending the new filter representation. */
old_fp = fp;
fp = __sk_migrate_realloc(old_fp, sk, sk_filter_size(new_len));
if (!fp) {
/* The old_fp is still around in case we couldn't
* allocate new memory, so uncharge on that one.
*/
fp = old_fp;
err = -ENOMEM;
goto out_err_free;
}
fp->bpf_func = sk_run_filter_int_skb;
fp->len = new_len;
/* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
err = sk_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
if (err)
/* 2nd sk_convert_filter() can fail only if it fails
* to allocate memory, remapping must succeed. Note,
* that at this time old_fp has already been released
* by __sk_migrate_realloc().
*/
goto out_err_free;
kfree(old_prog);
return fp;
out_err_free:
kfree(old_prog);
out_err:
/* Rollback filter setup. */
if (sk != NULL)
sk_filter_uncharge(sk, fp);
else
kfree(fp);
return ERR_PTR(err);
}
static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
struct sock *sk)
{
int err;
fp->bpf_func = NULL;
fp->jited = 0;
err = sk_chk_filter(fp->insns, fp->len);
if (err)
return ERR_PTR(err);
/* Probe if we can JIT compile the filter and if so, do
* the compilation of the filter.
*/
bpf_jit_compile(fp);
/* JIT compiler couldn't process this filter, so do the
* internal BPF translation for the optimized interpreter.
*/
if (!fp->jited)
fp = __sk_migrate_filter(fp, sk);
return fp;
}
/**
* sk_unattached_filter_create - create an unattached filter
* @fprog: the filter program
* @pfp: the unattached filter that is created
*
* Create a filter independent of any socket. We first run some
* sanity checks on it to make sure it does not explode on us later.
* If an error occurs or there is insufficient memory for the filter
* a negative errno code is returned. On success the return is zero.
*/
int sk_unattached_filter_create(struct sk_filter **pfp,
struct sock_fprog *fprog)
{
unsigned int fsize = sk_filter_proglen(fprog);
struct sk_filter *fp;
/* Make sure new filter is there and in the right amounts. */
if (fprog->filter == NULL)
return -EINVAL;
fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
if (!fp)
return -ENOMEM;
memcpy(fp->insns, fprog->filter, fsize);
atomic_set(&fp->refcnt, 1);
fp->len = fprog->len;
/* Since unattached filters are not copied back to user
* space through sk_get_filter(), we do not need to hold
* a copy here, and can spare us the work.
*/
fp->orig_prog = NULL;
/* __sk_prepare_filter() already takes care of uncharging
* memory in case something goes wrong.
*/
fp = __sk_prepare_filter(fp, NULL);
if (IS_ERR(fp))
return PTR_ERR(fp);
*pfp = fp;
return 0;
}
EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
void sk_unattached_filter_destroy(struct sk_filter *fp)
{
sk_filter_release(fp);
}
EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
/**
* sk_attach_filter - attach a socket filter
* @fprog: the filter program
* @sk: the socket to use
*
* Attach the user's filter code. We first run some sanity checks on
* it to make sure it does not explode on us later. If an error
* occurs or there is insufficient memory for the filter a negative
* errno code is returned. On success the return is zero.
*/
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
struct sk_filter *fp, *old_fp;
unsigned int fsize = sk_filter_proglen(fprog);
unsigned int sk_fsize = sk_filter_size(fprog->len);
int err;
if (sock_flag(sk, SOCK_FILTER_LOCKED))
return -EPERM;
/* Make sure new filter is there and in the right amounts. */
if (fprog->filter == NULL)
return -EINVAL;
fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
if (!fp)
return -ENOMEM;
if (copy_from_user(fp->insns, fprog->filter, fsize)) {
sock_kfree_s(sk, fp, sk_fsize);
return -EFAULT;
}
atomic_set(&fp->refcnt, 1);
fp->len = fprog->len;
err = sk_store_orig_filter(fp, fprog);
if (err) {
sk_filter_uncharge(sk, fp);
return -ENOMEM;
}
/* __sk_prepare_filter() already takes care of uncharging
* memory in case something goes wrong.
*/
fp = __sk_prepare_filter(fp, sk);
if (IS_ERR(fp))
return PTR_ERR(fp);
old_fp = rcu_dereference_protected(sk->sk_filter,
sock_owned_by_user(sk));
rcu_assign_pointer(sk->sk_filter, fp);
if (old_fp)
sk_filter_uncharge(sk, old_fp);
return 0;
}
EXPORT_SYMBOL_GPL(sk_attach_filter);
int sk_detach_filter(struct sock *sk)
{
int ret = -ENOENT;
struct sk_filter *filter;
if (sock_flag(sk, SOCK_FILTER_LOCKED))
return -EPERM;
filter = rcu_dereference_protected(sk->sk_filter,
sock_owned_by_user(sk));
if (filter) {
RCU_INIT_POINTER(sk->sk_filter, NULL);
sk_filter_uncharge(sk, filter);
ret = 0;
}
return ret;
}
EXPORT_SYMBOL_GPL(sk_detach_filter);
void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
{
static const u16 decodes[] = {
[BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
[BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
[BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
[BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
[BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
[BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
[BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
[BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
[BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
[BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
[BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
[BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
[BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
[BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
[BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
[BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
[BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
[BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
[BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
[BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
[BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
[BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
[BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
[BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
[BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
[BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
[BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
[BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
[BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
[BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
[BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
[BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
[BPF_S_RET_K] = BPF_RET|BPF_K,
[BPF_S_RET_A] = BPF_RET|BPF_A,
[BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
[BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
[BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
[BPF_S_ST] = BPF_ST,
[BPF_S_STX] = BPF_STX,
[BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
[BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
[BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
[BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
[BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
[BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
[BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
[BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
[BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
};
u16 code;
code = filt->code;
to->code = decodes[code];
to->jt = filt->jt;
to->jf = filt->jf;
to->k = filt->k;
}
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
unsigned int len)
{
struct sock_fprog_kern *fprog;
struct sk_filter *filter;
int ret = 0;
lock_sock(sk);
filter = rcu_dereference_protected(sk->sk_filter,
sock_owned_by_user(sk));
if (!filter)
goto out;
/* We're copying the filter that has been originally attached,
* so no conversion/decode needed anymore.
*/
fprog = filter->orig_prog;
ret = fprog->len;
if (!len)
/* User space only enquires number of filter blocks. */
goto out;
ret = -EINVAL;
if (len < fprog->len)
goto out;
ret = -EFAULT;
if (copy_to_user(ubuf, fprog->filter, sk_filter_proglen(fprog)))
goto out;
/* Instead of bytes, the API requests to return the number
* of filter blocks.
*/
ret = fprog->len;
out:
release_sock(sk);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2124_0 |
crossvul-cpp_data_good_3653_0 | /*
* Copyright © 2008,2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
uint32_t flush_domains;
uint32_t flush_rings;
uint32_t flips;
};
/*
* Set the next domain for the specified object. This
* may not actually perform the necessary flushing/invaliding though,
* as that may want to be batched with other set_domain operations
*
* This is (we hope) the only really tricky part of gem. The goal
* is fairly simple -- track which caches hold bits of the object
* and make sure they remain coherent. A few concrete examples may
* help to explain how it works. For shorthand, we use the notation
* (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
* a pair of read and write domain masks.
*
* Case 1: the batch buffer
*
* 1. Allocated
* 2. Written by CPU
* 3. Mapped to GTT
* 4. Read by GPU
* 5. Unmapped from GTT
* 6. Freed
*
* Let's take these a step at a time
*
* 1. Allocated
* Pages allocated from the kernel may still have
* cache contents, so we set them to (CPU, CPU) always.
* 2. Written by CPU (using pwrite)
* The pwrite function calls set_domain (CPU, CPU) and
* this function does nothing (as nothing changes)
* 3. Mapped by GTT
* This function asserts that the object is not
* currently in any GPU-based read or write domains
* 4. Read by GPU
* i915_gem_execbuffer calls set_domain (COMMAND, 0).
* As write_domain is zero, this function adds in the
* current read domains (CPU+COMMAND, 0).
* flush_domains is set to CPU.
* invalidate_domains is set to COMMAND
* clflush is run to get data out of the CPU caches
* then i915_dev_set_domain calls i915_gem_flush to
* emit an MI_FLUSH and drm_agp_chipset_flush
* 5. Unmapped from GTT
* i915_gem_object_unbind calls set_domain (CPU, CPU)
* flush_domains and invalidate_domains end up both zero
* so no flushing/invalidating happens
* 6. Freed
* yay, done
*
* Case 2: The shared render buffer
*
* 1. Allocated
* 2. Mapped to GTT
* 3. Read/written by GPU
* 4. set_domain to (CPU,CPU)
* 5. Read/written by CPU
* 6. Read/written by GPU
*
* 1. Allocated
* Same as last example, (CPU, CPU)
* 2. Mapped to GTT
* Nothing changes (assertions find that it is not in the GPU)
* 3. Read/written by GPU
* execbuffer calls set_domain (RENDER, RENDER)
* flush_domains gets CPU
* invalidate_domains gets GPU
* clflush (obj)
* MI_FLUSH and drm_agp_chipset_flush
* 4. set_domain (CPU, CPU)
* flush_domains gets GPU
* invalidate_domains gets CPU
* wait_rendering (obj) to make sure all drawing is complete.
* This will include an MI_FLUSH to get the data from GPU
* to memory
* clflush (obj) to invalidate the CPU cache
* Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
* 5. Read/written by CPU
* cache lines are loaded and dirtied
* 6. Read written by GPU
* Same as last GPU access
*
* Case 3: The constant buffer
*
* 1. Allocated
* 2. Written by CPU
* 3. Read by GPU
* 4. Updated (written) by CPU again
* 5. Read by GPU
*
* 1. Allocated
* (CPU, CPU)
* 2. Written by CPU
* (CPU, CPU)
* 3. Read by GPU
* (CPU+RENDER, 0)
* flush_domains = CPU
* invalidate_domains = RENDER
* clflush (obj)
* MI_FLUSH
* drm_agp_chipset_flush
* 4. Updated (written) by CPU again
* (CPU, CPU)
* flush_domains = 0 (no previous write domain)
* invalidate_domains = 0 (no new read domains)
* 5. Read by GPU
* (CPU+RENDER, 0)
* flush_domains = CPU
* invalidate_domains = RENDER
* clflush (obj)
* MI_FLUSH
* drm_agp_chipset_flush
*/
static void
i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
struct change_domains *cd)
{
uint32_t invalidate_domains = 0, flush_domains = 0;
/*
* If the object isn't moving to a new write domain,
* let the object stay in multiple read domains
*/
if (obj->base.pending_write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
/*
* Flush the current write domain if
* the new read domains don't match. Invalidate
* any read domains which differ from the old
* write domain
*/
if (obj->base.write_domain &&
(((obj->base.write_domain != obj->base.pending_read_domains ||
obj->ring != ring)) ||
(obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
flush_domains |= obj->base.write_domain;
invalidate_domains |=
obj->base.pending_read_domains & ~obj->base.write_domain;
}
/*
* Invalidate any read caches which may have
* stale data. That is, any new read domains.
*/
invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
if (obj->base.pending_write_domain)
cd->flips |= atomic_read(&obj->pending_flip);
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
* write_domains). So if we have a current write domain that we
* aren't changing, set pending_write_domain to that.
*/
if (flush_domains == 0 && obj->base.pending_write_domain == 0)
obj->base.pending_write_domain = obj->base.write_domain;
cd->invalidate_domains |= invalidate_domains;
cd->flush_domains |= flush_domains;
if (flush_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= intel_ring_flag(obj->ring);
if (invalidate_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= intel_ring_flag(ring);
}
struct eb_objects {
int and;
struct hlist_head buckets[0];
};
static struct eb_objects *
eb_create(int size)
{
struct eb_objects *eb;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
while (count > size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
sizeof(struct eb_objects),
GFP_KERNEL);
if (eb == NULL)
return eb;
eb->and = count - 1;
return eb;
}
static void
eb_reset(struct eb_objects *eb)
{
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
static void
eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
{
hlist_add_head(&obj->exec_node,
&eb->buckets[obj->exec_handle & eb->and]);
}
static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
{
struct hlist_head *head;
struct hlist_node *node;
struct drm_i915_gem_object *obj;
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
if (obj->exec_handle == handle)
return obj;
}
return NULL;
}
static void
eb_destroy(struct eb_objects *eb)
{
kfree(eb);
}
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
uint32_t target_offset;
int ret = -EINVAL;
/* we've already hold a reference to all valid objects */
target_obj = &eb_get_object(eb, reloc->target_handle)->base;
if (unlikely(target_obj == NULL))
return -ENOENT;
target_offset = to_intel_bo(target_obj)->gtt_offset;
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
if (unlikely(target_offset == 0)) {
DRM_DEBUG("No GTT space found for object %d\n",
reloc->target_handle);
return ret;
}
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
DRM_DEBUG("reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
return ret;
}
if (unlikely((reloc->write_domain | reloc->read_domains)
& ~I915_GEM_GPU_DOMAINS)) {
DRM_DEBUG("reloc with read/write non-GPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
return ret;
}
if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain)) {
DRM_DEBUG("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->write_domain,
target_obj->pending_write_domain);
return ret;
}
target_obj->pending_read_domains |= reloc->read_domains;
target_obj->pending_write_domain |= reloc->write_domain;
/* If the relocation already has the right value in it, no
* more work needs to be done.
*/
if (target_offset == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset > obj->base.size - 4)) {
DRM_DEBUG("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
(int) reloc->offset,
(int) obj->base.size);
return ret;
}
if (unlikely(reloc->offset & 3)) {
DRM_DEBUG("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
obj, reloc->target_handle,
(int) reloc->offset);
return ret;
}
reloc->delta += target_offset;
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
char *vaddr;
vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
kunmap_atomic(vaddr);
} else {
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
/* We can't wait for rendering with pagefaults disabled */
if (obj->active && in_atomic())
return -EFAULT;
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret)
return ret;
/* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc->offset & ~PAGE_MASK));
iowrite32(reloc->delta, reloc_entry);
io_mapping_unmap_atomic(reloc_page);
}
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
return 0;
}
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb)
{
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
for (i = 0; i < entry->relocation_count; i++) {
struct drm_i915_gem_relocation_entry reloc;
if (__copy_from_user_inatomic(&reloc,
user_relocs+i,
sizeof(reloc)))
return -EFAULT;
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
if (ret)
return ret;
if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
&reloc.presumed_offset,
sizeof(reloc.presumed_offset)))
return -EFAULT;
}
return 0;
}
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *relocs)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
if (ret)
return ret;
}
return 0;
}
static int
i915_gem_execbuffer_relocate(struct drm_device *dev,
struct eb_objects *eb,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
int ret = 0;
/* This is the fast path and we cannot handle a pagefault whilst
* holding the struct mutex lest the user pass in the relocations
* contained within a mmaped bo. For in such a case we, the page
* fault handler would call i915_gem_fault() and we would try to
* acquire the struct mutex again. Obviously this is bad and so
* lockdep complains vehemently.
*/
pagefault_disable();
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
break;
}
pagefault_enable();
return ret;
}
#define __EXEC_OBJECT_HAS_FENCE (1<<31)
static int
pin_and_fence_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
int ret;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
if (ret)
return ret;
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
if (obj->tiling_mode) {
ret = i915_gem_object_get_fence(obj, ring);
if (ret)
goto err_unpin;
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
i915_gem_object_pin_fence(obj);
} else {
ret = i915_gem_object_put_fence(obj);
if (ret)
goto err_unpin;
}
obj->pending_fenced_gpu_access = true;
}
}
entry->offset = obj->gtt_offset;
return 0;
err_unpin:
i915_gem_object_unpin(obj);
return ret;
}
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
int ret, retry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
struct list_head ordered_objects;
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable;
obj = list_first_entry(objects,
struct drm_i915_gem_object,
exec_list);
entry = obj->exec_entry;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
if (need_mappable)
list_move(&obj->exec_list, &ordered_objects);
else
list_move_tail(&obj->exec_list, &ordered_objects);
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
}
list_splice(&ordered_objects, objects);
/* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases:
*
* 1a. Unbind all objects that do not match the GTT constraints for
* the execbuffer (fenceable, mappable, alignment etc).
* 1b. Increment pin count for already bound objects.
* 2. Bind new objects.
* 3. Decrement pin count.
*
* This avoid unnecessary unbinding of later objects in order to makr
* room for the earlier objects *unless* we need to defragment.
*/
retry = 0;
do {
ret = 0;
/* Unbind any ill-fitting objects or pin. */
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
if (!obj->gtt_space)
continue;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
ret = pin_and_fence_object(obj, ring);
if (ret)
goto err;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
if (obj->gtt_space)
continue;
ret = pin_and_fence_object(obj, ring);
if (ret) {
int ret_ignore;
/* This can potentially raise a harmless
* -EINVAL if we failed to bind in the above
* call. It cannot raise -EINTR since we know
* that the bo is freshly bound and so will
* not need to be flushed or waited upon.
*/
ret_ignore = i915_gem_object_unbind(obj);
(void)ret_ignore;
WARN_ON(obj->gtt_space);
break;
}
}
/* Decrement pin count for bound objects */
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry;
if (!obj->gtt_space)
continue;
entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
i915_gem_object_unpin_fence(obj);
entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}
i915_gem_object_unpin(obj);
/* ... and ensure ppgtt mapping exist if needed. */
if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, obj->cache_level);
obj->has_aliasing_ppgtt_mapping = 1;
}
}
if (ret != -ENOSPC || retry > 1)
return ret;
/* First attempt, just clear anything that is purgeable.
* Second attempt, clear the entire GTT.
*/
ret = i915_gem_evict_everything(ring->dev, retry == 0);
if (ret)
return ret;
retry++;
} while (1);
err:
list_for_each_entry_continue_reverse(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry;
if (!obj->gtt_space)
continue;
entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
i915_gem_object_unpin_fence(obj);
entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}
i915_gem_object_unpin(obj);
}
return ret;
}
static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct list_head *objects,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec,
int count)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
int *reloc_offset;
int i, total, ret;
/* We may process another execbuffer during the unlock... */
while (!list_empty(objects)) {
obj = list_first_entry(objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
mutex_unlock(&dev->struct_mutex);
total = 0;
for (i = 0; i < count; i++)
total += exec[i].relocation_count;
reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
reloc = drm_malloc_ab(total, sizeof(*reloc));
if (reloc == NULL || reloc_offset == NULL) {
drm_free_large(reloc);
drm_free_large(reloc_offset);
mutex_lock(&dev->struct_mutex);
return -ENOMEM;
}
total = 0;
for (i = 0; i < count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
if (copy_from_user(reloc+total, user_relocs,
exec[i].relocation_count * sizeof(*reloc))) {
ret = -EFAULT;
mutex_lock(&dev->struct_mutex);
goto err;
}
reloc_offset[i] = total;
total += exec[i].relocation_count;
}
ret = i915_mutex_lock_interruptible(dev);
if (ret) {
mutex_lock(&dev->struct_mutex);
goto err;
}
/* reacquire the objects */
eb_reset(eb);
for (i = 0; i < count; i++) {
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
goto err;
}
list_add_tail(&obj->exec_list, objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
ret = i915_gem_execbuffer_reserve(ring, file, objects);
if (ret)
goto err;
list_for_each_entry(obj, objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset]);
if (ret)
goto err;
}
/* Leave the user relocations as are, this is the painfully slow path,
* and we want to avoid the complication of dropping the lock whilst
* having buffers reserved in the aperture and so causing spurious
* ENOSPC for random operations.
*/
err:
drm_free_large(reloc);
drm_free_large(reloc_offset);
return ret;
}
static int
i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains,
uint32_t flush_domains,
uint32_t flush_rings)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i, ret;
if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i)) {
ret = i915_gem_flush_ring(&dev_priv->ring[i],
invalidate_domains,
flush_domains);
if (ret)
return ret;
}
}
return 0;
}
static bool
intel_enable_semaphores(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
return 0;
if (i915_semaphores >= 0)
return i915_semaphores;
/* Disable semaphores on SNB */
if (INTEL_INFO(dev)->gen == 6)
return 0;
return 1;
}
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
{
struct intel_ring_buffer *from = obj->ring;
u32 seqno;
int ret, idx;
if (from == NULL || to == from)
return 0;
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
if (!intel_enable_semaphores(obj->base.dev))
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
seqno = obj->last_rendering_seqno;
if (seqno <= from->sync_seqno[idx])
return 0;
if (seqno == from->outstanding_lazy_request) {
struct drm_i915_gem_request *request;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
ret = i915_add_request(from, NULL, request);
if (ret) {
kfree(request);
return ret;
}
seqno = request->seqno;
}
from->sync_seqno[idx] = seqno;
return to->sync_to(to, from, seqno - 1);
}
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
u32 plane, flip_mask;
int ret;
/* Check for any pending flips. As we only maintain a flip queue depth
* of 1, we can simply insert a WAIT for the next display flip prior
* to executing the batch and avoid stalling the CPU.
*/
for (plane = 0; flips >> plane; plane++) {
if (((flips >> plane) & 1) == 0)
continue;
if (plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
}
return 0;
}
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
struct change_domains cd;
int ret;
memset(&cd, 0, sizeof(cd));
list_for_each_entry(obj, objects, exec_list)
i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
if (cd.invalidate_domains | cd.flush_domains) {
ret = i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains,
cd.flush_domains,
cd.flush_rings);
if (ret)
return ret;
}
if (cd.flips) {
ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
if (ret)
return ret;
}
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_sync_rings(obj, ring);
if (ret)
return ret;
}
return 0;
}
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
{
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
}
static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
int count)
{
int i;
for (i = 0; i < count; i++) {
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
int length; /* limited by fault_in_pages_readable() */
/* First check for malicious input causing overflow */
if (exec[i].relocation_count >
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
return -EINVAL;
length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry);
if (!access_ok(VERIFY_READ, ptr, length))
return -EFAULT;
/* we may also need to update the presumed offsets */
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
if (fault_in_pages_readable(ptr, length))
return -EFAULT;
}
return 0;
}
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct intel_ring_buffer *ring,
u32 seqno)
{
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, objects, exec_list) {
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
i915_gem_object_move_to_active(obj, ring, seqno);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->pending_gpu_write = true;
list_move_tail(&obj->gpu_write_list,
&ring->gpu_write_list);
intel_mark_busy(ring->dev, obj);
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_request *request;
u32 invalidate;
/*
* Ensure that the commands in the batch buffer are
* finished before the interrupt fires.
*
* The sampler always gets flushed on i965 (sigh).
*/
invalidate = I915_GEM_DOMAIN_COMMAND;
if (INTEL_INFO(dev)->gen >= 4)
invalidate |= I915_GEM_DOMAIN_SAMPLER;
if (ring->flush(ring, invalidate, 0)) {
i915_gem_next_request_seqno(ring);
return;
}
/* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL || i915_add_request(ring, file, request)) {
i915_gem_next_request_seqno(ring);
kfree(request);
}
}
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
return 0;
ret = intel_ring_begin(ring, 4 * 3);
if (ret)
return ret;
for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(ring, 0);
}
intel_ring_advance(ring);
return 0;
}
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head objects;
struct eb_objects *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
u32 exec_start, exec_len;
u32 seqno;
u32 mask;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
DRM_DEBUG("execbuf with invalid offset/length\n");
return -EINVAL;
}
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
return ret;
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
ring = &dev_priv->ring[RCS];
break;
case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
DRM_DEBUG("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BLT:
if (!HAS_BLT(dev)) {
DRM_DEBUG("execbuf with invalid ring (BLT)\n");
return -EINVAL;
}
ring = &dev_priv->ring[BCS];
break;
default:
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mask = I915_EXEC_CONSTANTS_MASK;
switch (mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4)
return -EINVAL;
if (INTEL_INFO(dev)->gen > 5 &&
mode == I915_EXEC_CONSTANTS_REL_SURFACE)
return -EINVAL;
/* The HW changed the meaning on this bit on gen6 */
if (INTEL_INFO(dev)->gen >= 6)
mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
return -EINVAL;
}
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto pre_mutex_err;
}
if (copy_from_user(cliprects,
(struct drm_clip_rect __user *)(uintptr_t)
args->cliprects_ptr,
sizeof(*cliprects)*args->num_cliprects)) {
ret = -EFAULT;
goto pre_mutex_err;
}
}
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto pre_mutex_err;
if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
}
eb = eb_create(args->buffer_count);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
goto pre_mutex_err;
}
/* Look up object handles */
INIT_LIST_HEAD(&objects);
for (i = 0; i < args->buffer_count; i++) {
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
ret = -ENOENT;
goto err;
}
if (!list_empty(&obj->exec_list)) {
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
goto err;
}
list_add_tail(&obj->exec_list, &objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
/* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(objects.prev,
struct drm_i915_gem_object,
exec_list);
/* Move the objects en-masse into the GTT, evicting if necessary. */
ret = i915_gem_execbuffer_reserve(ring, file, &objects);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
&objects, eb,
exec,
args->buffer_count);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
goto err;
}
/* Set the pending read domains for the batch buffer to COMMAND */
if (batch_obj->base.pending_write_domain) {
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL;
goto err;
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
if (ret)
goto err;
seqno = i915_gem_next_request_seqno(ring);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
if (seqno < ring->sync_seqno[i]) {
/* The GPU can not handle its semaphore value wrapping,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
ret = i915_gpu_idle(dev, true);
if (ret)
goto err;
BUG_ON(ring->sync_seqno[i]);
}
}
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
if (ret)
goto err;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, INSTPM);
intel_ring_emit(ring, mask << 16 | mode);
intel_ring_advance(ring);
dev_priv->relative_constants_mode = mode;
}
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(dev, ring);
if (ret)
goto err;
}
trace_i915_gem_ring_dispatch(ring, seqno);
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto err;
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len);
if (ret)
goto err;
}
} else {
ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
if (ret)
goto err;
}
i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
eb_destroy(eb);
while (!list_empty(&objects)) {
struct drm_i915_gem_object *obj;
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
kfree(cliprects);
return ret;
}
/*
* Legacy execbuffer just creates an exec2 list from the original exec object
* list array and passes it to the real function.
*/
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret, i;
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
/* Copy in the exec list from userland */
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
if (exec_list == NULL || exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
drm_free_large(exec_list);
drm_free_large(exec2_list);
return -ENOMEM;
}
ret = copy_from_user(exec_list,
(struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec_list);
drm_free_large(exec2_list);
return -EFAULT;
}
for (i = 0; i < args->buffer_count; i++) {
exec2_list[i].handle = exec_list[i].handle;
exec2_list[i].relocation_count = exec_list[i].relocation_count;
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
exec2_list[i].alignment = exec_list[i].alignment;
exec2_list[i].offset = exec_list[i].offset;
if (INTEL_INFO(dev)->gen < 4)
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
else
exec2_list[i].flags = 0;
}
exec2.buffers_ptr = args->buffers_ptr;
exec2.buffer_count = args->buffer_count;
exec2.batch_start_offset = args->batch_start_offset;
exec2.batch_len = args->batch_len;
exec2.DR1 = args->DR1;
exec2.DR4 = args->DR4;
exec2.num_cliprects = args->num_cliprects;
exec2.cliprects_ptr = args->cliprects_ptr;
exec2.flags = I915_EXEC_RENDER;
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
exec_list,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
}
drm_free_large(exec_list);
drm_free_large(exec2_list);
return ret;
}
int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
if (args->buffer_count < 1 ||
args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL)
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
(struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec2_list);
return -EFAULT;
}
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
exec2_list,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
}
drm_free_large(exec2_list);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3653_0 |
crossvul-cpp_data_good_4911_0 | /*
+----------------------------------------------------------------------+
| PHP Version 7 |
+----------------------------------------------------------------------+
| Copyright (c) 1997-2016 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Rasmus Lerdorf <rasmus@php.net> |
| Ilia Alshanetsky <iliaa@php.net> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
#include <stdio.h>
#include "php.h"
#include <ctype.h>
#include "php_string.h"
#include "ext/standard/head.h"
#include "ext/standard/file.h"
#include "basic_functions.h"
#include "exec.h"
#include "php_globals.h"
#include "SAPI.h"
#if HAVE_SYS_WAIT_H
#include <sys/wait.h>
#endif
#if HAVE_SIGNAL_H
#include <signal.h>
#endif
#if HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#if HAVE_SYS_STAT_H
#include <sys/stat.h>
#endif
#if HAVE_FCNTL_H
#include <fcntl.h>
#endif
#if HAVE_NICE && HAVE_UNISTD_H
#include <unistd.h>
#endif
/* {{{ php_exec
* If type==0, only last line of output is returned (exec)
* If type==1, all lines will be printed and last lined returned (system)
* If type==2, all lines will be saved to given array (exec with &$array)
* If type==3, output will be printed binary, no lines will be saved or returned (passthru)
*
*/
PHPAPI int php_exec(int type, char *cmd, zval *array, zval *return_value)
{
FILE *fp;
char *buf;
size_t l = 0;
int pclose_return;
char *b, *d=NULL;
php_stream *stream;
size_t buflen, bufl = 0;
#if PHP_SIGCHILD
void (*sig_handler)() = NULL;
#endif
#if PHP_SIGCHILD
sig_handler = signal (SIGCHLD, SIG_DFL);
#endif
#ifdef PHP_WIN32
fp = VCWD_POPEN(cmd, "rb");
#else
fp = VCWD_POPEN(cmd, "r");
#endif
if (!fp) {
php_error_docref(NULL, E_WARNING, "Unable to fork [%s]", cmd);
goto err;
}
stream = php_stream_fopen_from_pipe(fp, "rb");
buf = (char *) emalloc(EXEC_INPUT_BUF);
buflen = EXEC_INPUT_BUF;
if (type != 3) {
b = buf;
while (php_stream_get_line(stream, b, EXEC_INPUT_BUF, &bufl)) {
/* no new line found, let's read some more */
if (b[bufl - 1] != '\n' && !php_stream_eof(stream)) {
if (buflen < (bufl + (b - buf) + EXEC_INPUT_BUF)) {
bufl += b - buf;
buflen = bufl + EXEC_INPUT_BUF;
buf = erealloc(buf, buflen);
b = buf + bufl;
} else {
b += bufl;
}
continue;
} else if (b != buf) {
bufl += b - buf;
}
if (type == 1) {
PHPWRITE(buf, bufl);
if (php_output_get_level() < 1) {
sapi_flush();
}
} else if (type == 2) {
/* strip trailing whitespaces */
l = bufl;
while (l-- > 0 && isspace(((unsigned char *)buf)[l]));
if (l != (bufl - 1)) {
bufl = l + 1;
buf[bufl] = '\0';
}
add_next_index_stringl(array, buf, bufl);
}
b = buf;
}
if (bufl) {
/* strip trailing whitespaces if we have not done so already */
if ((type == 2 && buf != b) || type != 2) {
l = bufl;
while (l-- > 0 && isspace(((unsigned char *)buf)[l]));
if (l != (bufl - 1)) {
bufl = l + 1;
buf[bufl] = '\0';
}
if (type == 2) {
add_next_index_stringl(array, buf, bufl);
}
}
/* Return last line from the shell command */
RETVAL_STRINGL(buf, bufl);
} else { /* should return NULL, but for BC we return "" */
RETVAL_EMPTY_STRING();
}
} else {
while((bufl = php_stream_read(stream, buf, EXEC_INPUT_BUF)) > 0) {
PHPWRITE(buf, bufl);
}
}
pclose_return = php_stream_close(stream);
efree(buf);
done:
#if PHP_SIGCHILD
if (sig_handler) {
signal(SIGCHLD, sig_handler);
}
#endif
if (d) {
efree(d);
}
return pclose_return;
err:
pclose_return = -1;
goto done;
}
/* }}} */
static void php_exec_ex(INTERNAL_FUNCTION_PARAMETERS, int mode) /* {{{ */
{
char *cmd;
size_t cmd_len;
zval *ret_code=NULL, *ret_array=NULL;
int ret;
if (mode) {
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|z/", &cmd, &cmd_len, &ret_code) == FAILURE) {
RETURN_FALSE;
}
} else {
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|z/z/", &cmd, &cmd_len, &ret_array, &ret_code) == FAILURE) {
RETURN_FALSE;
}
}
if (!cmd_len) {
php_error_docref(NULL, E_WARNING, "Cannot execute a blank command");
RETURN_FALSE;
}
if (strlen(cmd) != cmd_len) {
php_error_docref(NULL, E_WARNING, "NULL byte detected. Possible attack");
RETURN_FALSE;
}
if (!ret_array) {
ret = php_exec(mode, cmd, NULL, return_value);
} else {
if (Z_TYPE_P(ret_array) != IS_ARRAY) {
zval_dtor(ret_array);
array_init(ret_array);
}
ret = php_exec(2, cmd, ret_array, return_value);
}
if (ret_code) {
zval_dtor(ret_code);
ZVAL_LONG(ret_code, ret);
}
}
/* }}} */
/* {{{ proto string exec(string command [, array &output [, int &return_value]])
Execute an external program */
PHP_FUNCTION(exec)
{
php_exec_ex(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0);
}
/* }}} */
/* {{{ proto int system(string command [, int &return_value])
Execute an external program and display output */
PHP_FUNCTION(system)
{
php_exec_ex(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1);
}
/* }}} */
/* {{{ proto void passthru(string command [, int &return_value])
Execute an external program and display raw output */
PHP_FUNCTION(passthru)
{
php_exec_ex(INTERNAL_FUNCTION_PARAM_PASSTHRU, 3);
}
/* }}} */
/* {{{ php_escape_shell_cmd
Escape all chars that could possibly be used to
break out of a shell command
This function emalloc's a string and returns the pointer.
Remember to efree it when done with it.
*NOT* safe for binary strings
*/
PHPAPI zend_string *php_escape_shell_cmd(char *str)
{
register int x, y, l = (int)strlen(str);
size_t estimate = (2 * l) + 1;
zend_string *cmd;
#ifndef PHP_WIN32
char *p = NULL;
#endif
cmd = zend_string_safe_alloc(2, l, 0, 0);
for (x = 0, y = 0; x < l; x++) {
int mb_len = php_mblen(str + x, (l - x));
/* skip non-valid multibyte characters */
if (mb_len < 0) {
continue;
} else if (mb_len > 1) {
memcpy(ZSTR_VAL(cmd) + y, str + x, mb_len);
y += mb_len;
x += mb_len - 1;
continue;
}
switch (str[x]) {
#ifndef PHP_WIN32
case '"':
case '\'':
if (!p && (p = memchr(str + x + 1, str[x], l - x - 1))) {
/* noop */
} else if (p && *p == str[x]) {
p = NULL;
} else {
ZSTR_VAL(cmd)[y++] = '\\';
}
ZSTR_VAL(cmd)[y++] = str[x];
break;
#else
/* % is Windows specific for environmental variables, ^%PATH% will
output PATH while ^%PATH^% will not. escapeshellcmd->val will escape all % and !.
*/
case '%':
case '!':
case '"':
case '\'':
#endif
case '#': /* This is character-set independent */
case '&':
case ';':
case '`':
case '|':
case '*':
case '?':
case '~':
case '<':
case '>':
case '^':
case '(':
case ')':
case '[':
case ']':
case '{':
case '}':
case '$':
case '\\':
case '\x0A': /* excluding these two */
case '\xFF':
#ifdef PHP_WIN32
ZSTR_VAL(cmd)[y++] = '^';
#else
ZSTR_VAL(cmd)[y++] = '\\';
#endif
/* fall-through */
default:
ZSTR_VAL(cmd)[y++] = str[x];
}
}
ZSTR_VAL(cmd)[y] = '\0';
if ((estimate - y) > 4096) {
/* realloc if the estimate was way overill
* Arbitrary cutoff point of 4096 */
cmd = zend_string_truncate(cmd, y, 0);
}
ZSTR_LEN(cmd) = y;
return cmd;
}
/* }}} */
/* {{{ php_escape_shell_arg
*/
PHPAPI zend_string *php_escape_shell_arg(char *str)
{
int x, y = 0, l = (int)strlen(str);
zend_string *cmd;
size_t estimate = (4 * l) + 3;
cmd = zend_string_safe_alloc(4, l, 2, 0); /* worst case */
#ifdef PHP_WIN32
ZSTR_VAL(cmd)[y++] = '"';
#else
ZSTR_VAL(cmd)[y++] = '\'';
#endif
for (x = 0; x < l; x++) {
int mb_len = php_mblen(str + x, (l - x));
/* skip non-valid multibyte characters */
if (mb_len < 0) {
continue;
} else if (mb_len > 1) {
memcpy(ZSTR_VAL(cmd) + y, str + x, mb_len);
y += mb_len;
x += mb_len - 1;
continue;
}
switch (str[x]) {
#ifdef PHP_WIN32
case '"':
case '%':
case '!':
ZSTR_VAL(cmd)[y++] = ' ';
break;
#else
case '\'':
ZSTR_VAL(cmd)[y++] = '\'';
ZSTR_VAL(cmd)[y++] = '\\';
ZSTR_VAL(cmd)[y++] = '\'';
#endif
/* fall-through */
default:
ZSTR_VAL(cmd)[y++] = str[x];
}
}
#ifdef PHP_WIN32
if (y > 0 && '\\' == ZSTR_VAL(cmd)[y - 1]) {
int k = 0, n = y - 1;
for (; n >= 0 && '\\' == ZSTR_VAL(cmd)[n]; n--, k++);
if (k % 2) {
ZSTR_VAL(cmd)[y++] = '\\';
}
}
ZSTR_VAL(cmd)[y++] = '"';
#else
ZSTR_VAL(cmd)[y++] = '\'';
#endif
ZSTR_VAL(cmd)[y] = '\0';
if ((estimate - y) > 4096) {
/* realloc if the estimate was way overill
* Arbitrary cutoff point of 4096 */
cmd = zend_string_truncate(cmd, y, 0);
}
ZSTR_LEN(cmd) = y;
return cmd;
}
/* }}} */
/* {{{ proto string escapeshellcmd(string command)
Escape shell metacharacters */
PHP_FUNCTION(escapeshellcmd)
{
char *command;
size_t command_len;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &command, &command_len) == FAILURE) {
return;
}
if (command_len) {
RETVAL_STR(php_escape_shell_cmd(command));
} else {
RETVAL_EMPTY_STRING();
}
}
/* }}} */
/* {{{ proto string escapeshellarg(string arg)
Quote and escape an argument for use in a shell command */
PHP_FUNCTION(escapeshellarg)
{
char *argument;
size_t argument_len;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &argument, &argument_len) == FAILURE) {
return;
}
if (argument) {
RETVAL_STR(php_escape_shell_arg(argument));
}
}
/* }}} */
/* {{{ proto string shell_exec(string cmd)
Execute command via shell and return complete output as string */
PHP_FUNCTION(shell_exec)
{
FILE *in;
char *command;
size_t command_len;
zend_string *ret;
php_stream *stream;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &command, &command_len) == FAILURE) {
return;
}
#ifdef PHP_WIN32
if ((in=VCWD_POPEN(command, "rt"))==NULL) {
#else
if ((in=VCWD_POPEN(command, "r"))==NULL) {
#endif
php_error_docref(NULL, E_WARNING, "Unable to execute '%s'", command);
RETURN_FALSE;
}
stream = php_stream_fopen_from_pipe(in, "rb");
ret = php_stream_copy_to_mem(stream, PHP_STREAM_COPY_ALL, 0);
php_stream_close(stream);
if (ret && ZSTR_LEN(ret) > 0) {
RETVAL_STR(ret);
}
}
/* }}} */
#ifdef HAVE_NICE
/* {{{ proto bool proc_nice(int priority)
Change the priority of the current process */
PHP_FUNCTION(proc_nice)
{
zend_long pri;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &pri) == FAILURE) {
RETURN_FALSE;
}
errno = 0;
php_ignore_value(nice(pri));
if (errno) {
php_error_docref(NULL, E_WARNING, "Only a super user may attempt to increase the priority of a process");
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
#endif
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: sw=4 ts=4 fdm=marker
* vim<600: sw=4 ts=4
*/
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_4911_0 |
crossvul-cpp_data_good_3664_0 | /*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
#include "private/gc_priv.h"
#include <stdio.h>
#include <string.h>
/* Allocate reclaim list for kind: */
/* Return TRUE on success */
STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
{
struct hblk ** result = (struct hblk **)
GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
if (result == 0) return(FALSE);
BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
kind -> ok_reclaim_list = result;
return(TRUE);
}
GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
GC_bool ignore_off_page,
GC_bool retry); /* from alloc.c */
/* Allocate a large block of size lb bytes. */
/* The block is not cleared. */
/* Flags is 0 or IGNORE_OFF_PAGE. */
/* We hold the allocation lock. */
/* EXTRA_BYTES were already added to lb. */
GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
{
struct hblk * h;
word n_blocks;
ptr_t result;
GC_bool retry = FALSE;
/* Round up to a multiple of a granule. */
lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
n_blocks = OBJ_SZ_TO_BLOCKS(lb);
if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
/* Do our share of marking work */
if (GC_incremental && !GC_dont_gc)
GC_collect_a_little_inner((int)n_blocks);
h = GC_allochblk(lb, k, flags);
# ifdef USE_MUNMAP
if (0 == h) {
GC_merge_unmapped();
h = GC_allochblk(lb, k, flags);
}
# endif
while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) {
h = GC_allochblk(lb, k, flags);
retry = TRUE;
}
if (h == 0) {
result = 0;
} else {
size_t total_bytes = n_blocks * HBLKSIZE;
if (n_blocks > 1) {
GC_large_allocd_bytes += total_bytes;
if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
GC_max_large_allocd_bytes = GC_large_allocd_bytes;
}
result = h -> hb_body;
}
return result;
}
/* Allocate a large block of size lb bytes. Clear if appropriate. */
/* We hold the allocation lock. */
/* EXTRA_BYTES were already added to lb. */
STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
{
ptr_t result = GC_alloc_large(lb, k, flags);
word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
if (0 == result) return 0;
if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
/* Clear the whole block, in case of GC_realloc call. */
BZERO(result, n_blocks * HBLKSIZE);
}
return result;
}
/* allocate lb bytes for an object of kind k. */
/* Should not be used to directly to allocate */
/* objects such as STUBBORN objects that */
/* require special handling on allocation. */
/* First a version that assumes we already */
/* hold lock: */
GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
{
void *op;
if(SMALL_OBJ(lb)) {
struct obj_kind * kind = GC_obj_kinds + k;
size_t lg = GC_size_map[lb];
void ** opp = &(kind -> ok_freelist[lg]);
op = *opp;
if (EXPECT(0 == op, FALSE)) {
if (GC_size_map[lb] == 0) {
if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
return(GC_generic_malloc_inner(lb, k));
}
if (kind -> ok_reclaim_list == 0) {
if (!GC_alloc_reclaim_list(kind)) goto out;
}
op = GC_allocobj(lg, k);
if (op == 0) goto out;
}
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
} else {
op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
GC_bytes_allocd += lb;
}
out:
return op;
}
/* Allocate a composite object of size n bytes. The caller guarantees */
/* that pointers past the first page are not relevant. Caller holds */
/* allocation lock. */
GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
{
word lb_adjusted;
void * op;
if (lb <= HBLKSIZE)
return(GC_generic_malloc_inner(lb, k));
lb_adjusted = ADD_SLOP(lb);
op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
GC_bytes_allocd += lb_adjusted;
return op;
}
GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k)
{
void * result;
DCL_LOCK_STATE;
if (EXPECT(GC_have_errors, FALSE))
GC_print_all_errors();
GC_INVOKE_FINALIZERS();
if (SMALL_OBJ(lb)) {
LOCK();
result = GC_generic_malloc_inner((word)lb, k);
UNLOCK();
} else {
size_t lg;
size_t lb_rounded;
word n_blocks;
GC_bool init;
lg = ROUNDED_UP_GRANULES(lb);
lb_rounded = GRANULES_TO_BYTES(lg);
if (lb_rounded < lb)
return((*GC_get_oom_fn())(lb));
n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
init = GC_obj_kinds[k].ok_init;
LOCK();
result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
if (0 != result) {
if (GC_debugging_started) {
BZERO(result, n_blocks * HBLKSIZE);
} else {
# ifdef THREADS
/* Clear any memory that might be used for GC descriptors */
/* before we release the lock. */
((word *)result)[0] = 0;
((word *)result)[1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
# endif
}
}
GC_bytes_allocd += lb_rounded;
UNLOCK();
if (init && !GC_debugging_started && 0 != result) {
BZERO(result, n_blocks * HBLKSIZE);
}
}
if (0 == result) {
return((*GC_get_oom_fn())(lb));
} else {
return(result);
}
}
/* Allocate lb bytes of atomic (pointerfree) data */
#ifdef THREAD_LOCAL_ALLOC
GC_INNER void * GC_core_malloc_atomic(size_t lb)
#else
GC_API void * GC_CALL GC_malloc_atomic(size_t lb)
#endif
{
void *op;
void ** opp;
size_t lg;
DCL_LOCK_STATE;
if(SMALL_OBJ(lb)) {
lg = GC_size_map[lb];
opp = &(GC_aobjfreelist[lg]);
LOCK();
if (EXPECT((op = *opp) == 0, FALSE)) {
UNLOCK();
return(GENERAL_MALLOC((word)lb, PTRFREE));
}
*opp = obj_link(op);
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
UNLOCK();
return((void *) op);
} else {
return(GENERAL_MALLOC((word)lb, PTRFREE));
}
}
/* Allocate lb bytes of composite (pointerful) data */
#ifdef THREAD_LOCAL_ALLOC
GC_INNER void * GC_core_malloc(size_t lb)
#else
GC_API void * GC_CALL GC_malloc(size_t lb)
#endif
{
void *op;
void **opp;
size_t lg;
DCL_LOCK_STATE;
if(SMALL_OBJ(lb)) {
lg = GC_size_map[lb];
opp = (void **)&(GC_objfreelist[lg]);
LOCK();
if (EXPECT((op = *opp) == 0, FALSE)) {
UNLOCK();
return (GENERAL_MALLOC((word)lb, NORMAL));
}
GC_ASSERT(0 == obj_link(op)
|| ((word)obj_link(op)
<= (word)GC_greatest_plausible_heap_addr
&& (word)obj_link(op)
>= (word)GC_least_plausible_heap_addr));
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
UNLOCK();
return op;
} else {
return(GENERAL_MALLOC(lb, NORMAL));
}
}
/* Allocate lb bytes of pointerful, traced, but not collectable data */
GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb)
{
void *op;
void **opp;
size_t lg;
DCL_LOCK_STATE;
if( SMALL_OBJ(lb) ) {
if (EXTRA_BYTES != 0 && lb != 0) lb--;
/* We don't need the extra byte, since this won't be */
/* collected anyway. */
lg = GC_size_map[lb];
opp = &(GC_uobjfreelist[lg]);
LOCK();
op = *opp;
if (EXPECT(0 != op, TRUE)) {
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
/* Mark bit ws already set on free list. It will be */
/* cleared only temporarily during a collection, as a */
/* result of the normal free list mark bit clearing. */
GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
UNLOCK();
} else {
UNLOCK();
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
/* For small objects, the free lists are completely marked. */
}
GC_ASSERT(0 == op || GC_is_marked(op));
return((void *) op);
} else {
hdr * hhdr;
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
if (0 == op) return(0);
GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
hhdr = HDR(op);
/* We don't need the lock here, since we have an undisguised */
/* pointer. We do need to hold the lock while we adjust */
/* mark bits. */
LOCK();
set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
GC_ASSERT(hhdr -> hb_n_marks == 0);
hhdr -> hb_n_marks = 1;
UNLOCK();
return((void *) op);
}
}
#ifdef REDIRECT_MALLOC
# ifndef MSWINCE
# include <errno.h>
# endif
/* Avoid unnecessary nested procedure calls here, by #defining some */
/* malloc replacements. Otherwise we end up saving a */
/* meaningless return address in the object. It also speeds things up, */
/* but it is admittedly quite ugly. */
# define GC_debug_malloc_replacement(lb) \
GC_debug_malloc(lb, GC_DBG_RA "unknown", 0)
void * malloc(size_t lb)
{
/* It might help to manually inline the GC_malloc call here. */
/* But any decent compiler should reduce the extra procedure call */
/* to at most a jump instruction in this case. */
# if defined(I386) && defined(GC_SOLARIS_THREADS)
/*
* Thread initialisation can call malloc before
* we're ready for it.
* It's not clear that this is enough to help matters.
* The thread implementation may well call malloc at other
* inopportune times.
*/
if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb);
# endif /* I386 && GC_SOLARIS_THREADS */
return((void *)REDIRECT_MALLOC(lb));
}
#if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
STATIC ptr_t GC_libpthread_start = 0;
STATIC ptr_t GC_libpthread_end = 0;
STATIC ptr_t GC_libld_start = 0;
STATIC ptr_t GC_libld_end = 0;
STATIC void GC_init_lib_bounds(void)
{
if (GC_libpthread_start != 0) return;
GC_init(); /* if not called yet */
if (!GC_text_mapping("libpthread-",
&GC_libpthread_start, &GC_libpthread_end)) {
WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
/* This might still work with some versions of libpthread, */
/* so we don't abort. Perhaps we should. */
/* Generate message only once: */
GC_libpthread_start = (ptr_t)1;
}
if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
}
}
#endif /* GC_LINUX_THREADS */
#include <limits.h>
#ifdef SIZE_MAX
# define GC_SIZE_MAX SIZE_MAX
#else
# define GC_SIZE_MAX (~(size_t)0)
#endif
#define GC_SQRT_SIZE_MAX ((1U << (WORDSZ / 2)) - 1)
void * calloc(size_t n, size_t lb)
{
if ((lb | n) > GC_SQRT_SIZE_MAX /* fast initial test */
&& lb && n > GC_SIZE_MAX / lb)
return NULL;
# if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
/* libpthread allocated some memory that is only pointed to by */
/* mmapped thread stacks. Make sure it's not collectable. */
{
static GC_bool lib_bounds_set = FALSE;
ptr_t caller = (ptr_t)__builtin_return_address(0);
/* This test does not need to ensure memory visibility, since */
/* the bounds will be set when/if we create another thread. */
if (!EXPECT(lib_bounds_set, TRUE)) {
GC_init_lib_bounds();
lib_bounds_set = TRUE;
}
if (((word)caller >= (word)GC_libpthread_start
&& (word)caller < (word)GC_libpthread_end)
|| ((word)caller >= (word)GC_libld_start
&& (word)caller < (word)GC_libld_end))
return GC_malloc_uncollectable(n*lb);
/* The two ranges are actually usually adjacent, so there may */
/* be a way to speed this up. */
}
# endif
return((void *)REDIRECT_MALLOC(n*lb));
}
#ifndef strdup
char *strdup(const char *s)
{
size_t lb = strlen(s) + 1;
char *result = (char *)REDIRECT_MALLOC(lb);
if (result == 0) {
errno = ENOMEM;
return 0;
}
BCOPY(s, result, lb);
return result;
}
#endif /* !defined(strdup) */
/* If strdup is macro defined, we assume that it actually calls malloc, */
/* and thus the right thing will happen even without overriding it. */
/* This seems to be true on most Linux systems. */
#ifndef strndup
/* This is similar to strdup(). */
char *strndup(const char *str, size_t size)
{
char *copy;
size_t len = strlen(str);
if (len > size)
len = size;
copy = (char *)REDIRECT_MALLOC(len + 1);
if (copy == NULL) {
errno = ENOMEM;
return NULL;
}
BCOPY(str, copy, len);
copy[len] = '\0';
return copy;
}
#endif /* !strndup */
#undef GC_debug_malloc_replacement
#endif /* REDIRECT_MALLOC */
/* Explicitly deallocate an object p. */
GC_API void GC_CALL GC_free(void * p)
{
struct hblk *h;
hdr *hhdr;
size_t sz; /* In bytes */
size_t ngranules; /* sz in granules */
void **flh;
int knd;
struct obj_kind * ok;
DCL_LOCK_STATE;
if (p == 0) return;
/* Required by ANSI. It's not my fault ... */
# ifdef LOG_ALLOCS
GC_err_printf("GC_free(%p), GC: %lu\n", p, (unsigned long)GC_gc_no);
# endif
h = HBLKPTR(p);
hhdr = HDR(h);
# if defined(REDIRECT_MALLOC) && \
(defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
|| defined(MSWIN32))
/* For Solaris, we have to redirect malloc calls during */
/* initialization. For the others, this seems to happen */
/* implicitly. */
/* Don't try to deallocate that memory. */
if (0 == hhdr) return;
# endif
GC_ASSERT(GC_base(p) == p);
sz = hhdr -> hb_sz;
ngranules = BYTES_TO_GRANULES(sz);
knd = hhdr -> hb_obj_kind;
ok = &GC_obj_kinds[knd];
if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
LOCK();
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
/* Its unnecessary to clear the mark bit. If the */
/* object is reallocated, it doesn't matter. O.w. the */
/* collector will do it, since it's on a free list. */
if (ok -> ok_init) {
BZERO((word *)p + 1, sz-sizeof(word));
}
flh = &(ok -> ok_freelist[ngranules]);
obj_link(p) = *flh;
*flh = (ptr_t)p;
UNLOCK();
} else {
size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
LOCK();
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (nblocks > 1) {
GC_large_allocd_bytes -= nblocks * HBLKSIZE;
}
GC_freehblk(h);
UNLOCK();
}
}
/* Explicitly deallocate an object p when we already hold lock. */
/* Only used for internally allocated objects, so we can take some */
/* shortcuts. */
#ifdef THREADS
GC_INNER void GC_free_inner(void * p)
{
struct hblk *h;
hdr *hhdr;
size_t sz; /* bytes */
size_t ngranules; /* sz in granules */
void ** flh;
int knd;
struct obj_kind * ok;
h = HBLKPTR(p);
hhdr = HDR(h);
knd = hhdr -> hb_obj_kind;
sz = hhdr -> hb_sz;
ngranules = BYTES_TO_GRANULES(sz);
ok = &GC_obj_kinds[knd];
if (ngranules <= MAXOBJGRANULES) {
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (ok -> ok_init) {
BZERO((word *)p + 1, sz-sizeof(word));
}
flh = &(ok -> ok_freelist[ngranules]);
obj_link(p) = *flh;
*flh = (ptr_t)p;
} else {
size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (nblocks > 1) {
GC_large_allocd_bytes -= nblocks * HBLKSIZE;
}
GC_freehblk(h);
}
}
#endif /* THREADS */
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
# define REDIRECT_FREE GC_free
#endif
#ifdef REDIRECT_FREE
void free(void * p)
{
# if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
{
/* Don't bother with initialization checks. If nothing */
/* has been initialized, the check fails, and that's safe, */
/* since we haven't allocated uncollectable objects either. */
ptr_t caller = (ptr_t)__builtin_return_address(0);
/* This test does not need to ensure memory visibility, since */
/* the bounds will be set when/if we create another thread. */
if (((word)caller >= (word)GC_libpthread_start
&& (word)caller < (word)GC_libpthread_end)
|| ((word)caller >= (word)GC_libld_start
&& (word)caller < (word)GC_libld_end)) {
GC_free(p);
return;
}
}
# endif
# ifndef IGNORE_FREE
REDIRECT_FREE(p);
# endif
}
#endif /* REDIRECT_FREE */
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3664_0 |
crossvul-cpp_data_bad_5814_0 | /*
* DSP utils
* Copyright (c) 2000, 2001 Fabrice Bellard
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
* gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* DSP utils
*/
#include "libavutil/attributes.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "avcodec.h"
#include "copy_block.h"
#include "dct.h"
#include "dsputil.h"
#include "simple_idct.h"
#include "faandct.h"
#include "faanidct.h"
#include "imgconvert.h"
#include "mathops.h"
#include "mpegvideo.h"
#include "config.h"
#include "diracdsp.h"
uint32_t ff_squareTbl[512] = {0, };
#define BIT_DEPTH 16
#include "dsputil_template.c"
#undef BIT_DEPTH
#define BIT_DEPTH 8
#include "dsputil_template.c"
// 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
#define pb_7f (~0UL/255 * 0x7f)
#define pb_80 (~0UL/255 * 0x80)
/* Specific zigzag scan for 248 idct. NOTE that unlike the
specification, we interleave the fields */
const uint8_t ff_zigzag248_direct[64] = {
0, 8, 1, 9, 16, 24, 2, 10,
17, 25, 32, 40, 48, 56, 33, 41,
18, 26, 3, 11, 4, 12, 19, 27,
34, 42, 49, 57, 50, 58, 35, 43,
20, 28, 5, 13, 6, 14, 21, 29,
36, 44, 51, 59, 52, 60, 37, 45,
22, 30, 7, 15, 23, 31, 38, 46,
53, 61, 54, 62, 39, 47, 55, 63,
};
/* not permutated inverse zigzag_direct + 1 for MMX quantizer */
DECLARE_ALIGNED(16, uint16_t, ff_inv_zigzag_direct16)[64];
const uint8_t ff_alternate_horizontal_scan[64] = {
0, 1, 2, 3, 8, 9, 16, 17,
10, 11, 4, 5, 6, 7, 15, 14,
13, 12, 19, 18, 24, 25, 32, 33,
26, 27, 20, 21, 22, 23, 28, 29,
30, 31, 34, 35, 40, 41, 48, 49,
42, 43, 36, 37, 38, 39, 44, 45,
46, 47, 50, 51, 56, 57, 58, 59,
52, 53, 54, 55, 60, 61, 62, 63,
};
const uint8_t ff_alternate_vertical_scan[64] = {
0, 8, 16, 24, 1, 9, 2, 10,
17, 25, 32, 40, 48, 56, 57, 49,
41, 33, 26, 18, 3, 11, 4, 12,
19, 27, 34, 42, 50, 58, 35, 43,
51, 59, 20, 28, 5, 13, 6, 14,
21, 29, 36, 44, 52, 60, 37, 45,
53, 61, 22, 30, 7, 15, 23, 31,
38, 46, 54, 62, 39, 47, 55, 63,
};
/* Input permutation for the simple_idct_mmx */
static const uint8_t simple_mmx_permutation[64]={
0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
};
static const uint8_t idct_sse2_row_perm[8] = {0, 4, 1, 5, 2, 6, 3, 7};
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st,
const uint8_t *src_scantable)
{
int i;
int end;
st->scantable= src_scantable;
for(i=0; i<64; i++){
int j;
j = src_scantable[i];
st->permutated[i] = permutation[j];
}
end=-1;
for(i=0; i<64; i++){
int j;
j = st->permutated[i];
if(j>end) end=j;
st->raster_end[i]= end;
}
}
av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation,
int idct_permutation_type)
{
int i;
switch(idct_permutation_type){
case FF_NO_IDCT_PERM:
for(i=0; i<64; i++)
idct_permutation[i]= i;
break;
case FF_LIBMPEG2_IDCT_PERM:
for(i=0; i<64; i++)
idct_permutation[i]= (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
break;
case FF_SIMPLE_IDCT_PERM:
for(i=0; i<64; i++)
idct_permutation[i]= simple_mmx_permutation[i];
break;
case FF_TRANSPOSE_IDCT_PERM:
for(i=0; i<64; i++)
idct_permutation[i]= ((i&7)<<3) | (i>>3);
break;
case FF_PARTTRANS_IDCT_PERM:
for(i=0; i<64; i++)
idct_permutation[i]= (i&0x24) | ((i&3)<<3) | ((i>>3)&3);
break;
case FF_SSE2_IDCT_PERM:
for(i=0; i<64; i++)
idct_permutation[i]= (i&0x38) | idct_sse2_row_perm[i&7];
break;
default:
av_log(NULL, AV_LOG_ERROR, "Internal error, IDCT permutation not set\n");
}
}
static int pix_sum_c(uint8_t * pix, int line_size)
{
int s, i, j;
s = 0;
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j += 8) {
s += pix[0];
s += pix[1];
s += pix[2];
s += pix[3];
s += pix[4];
s += pix[5];
s += pix[6];
s += pix[7];
pix += 8;
}
pix += line_size - 16;
}
return s;
}
static int pix_norm1_c(uint8_t * pix, int line_size)
{
int s, i, j;
uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j += 8) {
#if 0
s += sq[pix[0]];
s += sq[pix[1]];
s += sq[pix[2]];
s += sq[pix[3]];
s += sq[pix[4]];
s += sq[pix[5]];
s += sq[pix[6]];
s += sq[pix[7]];
#else
#if HAVE_FAST_64BIT
register uint64_t x=*(uint64_t*)pix;
s += sq[x&0xff];
s += sq[(x>>8)&0xff];
s += sq[(x>>16)&0xff];
s += sq[(x>>24)&0xff];
s += sq[(x>>32)&0xff];
s += sq[(x>>40)&0xff];
s += sq[(x>>48)&0xff];
s += sq[(x>>56)&0xff];
#else
register uint32_t x=*(uint32_t*)pix;
s += sq[x&0xff];
s += sq[(x>>8)&0xff];
s += sq[(x>>16)&0xff];
s += sq[(x>>24)&0xff];
x=*(uint32_t*)(pix+4);
s += sq[x&0xff];
s += sq[(x>>8)&0xff];
s += sq[(x>>16)&0xff];
s += sq[(x>>24)&0xff];
#endif
#endif
pix += 8;
}
pix += line_size - 16;
}
return s;
}
static void bswap_buf(uint32_t *dst, const uint32_t *src, int w){
int i;
for(i=0; i+8<=w; i+=8){
dst[i+0]= av_bswap32(src[i+0]);
dst[i+1]= av_bswap32(src[i+1]);
dst[i+2]= av_bswap32(src[i+2]);
dst[i+3]= av_bswap32(src[i+3]);
dst[i+4]= av_bswap32(src[i+4]);
dst[i+5]= av_bswap32(src[i+5]);
dst[i+6]= av_bswap32(src[i+6]);
dst[i+7]= av_bswap32(src[i+7]);
}
for(;i<w; i++){
dst[i+0]= av_bswap32(src[i+0]);
}
}
static void bswap16_buf(uint16_t *dst, const uint16_t *src, int len)
{
while (len--)
*dst++ = av_bswap16(*src++);
}
static int sse4_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
{
int s, i;
uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < h; i++) {
s += sq[pix1[0] - pix2[0]];
s += sq[pix1[1] - pix2[1]];
s += sq[pix1[2] - pix2[2]];
s += sq[pix1[3] - pix2[3]];
pix1 += line_size;
pix2 += line_size;
}
return s;
}
static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
{
int s, i;
uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < h; i++) {
s += sq[pix1[0] - pix2[0]];
s += sq[pix1[1] - pix2[1]];
s += sq[pix1[2] - pix2[2]];
s += sq[pix1[3] - pix2[3]];
s += sq[pix1[4] - pix2[4]];
s += sq[pix1[5] - pix2[5]];
s += sq[pix1[6] - pix2[6]];
s += sq[pix1[7] - pix2[7]];
pix1 += line_size;
pix2 += line_size;
}
return s;
}
static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < h; i++) {
s += sq[pix1[ 0] - pix2[ 0]];
s += sq[pix1[ 1] - pix2[ 1]];
s += sq[pix1[ 2] - pix2[ 2]];
s += sq[pix1[ 3] - pix2[ 3]];
s += sq[pix1[ 4] - pix2[ 4]];
s += sq[pix1[ 5] - pix2[ 5]];
s += sq[pix1[ 6] - pix2[ 6]];
s += sq[pix1[ 7] - pix2[ 7]];
s += sq[pix1[ 8] - pix2[ 8]];
s += sq[pix1[ 9] - pix2[ 9]];
s += sq[pix1[10] - pix2[10]];
s += sq[pix1[11] - pix2[11]];
s += sq[pix1[12] - pix2[12]];
s += sq[pix1[13] - pix2[13]];
s += sq[pix1[14] - pix2[14]];
s += sq[pix1[15] - pix2[15]];
pix1 += line_size;
pix2 += line_size;
}
return s;
}
static void diff_pixels_c(int16_t *av_restrict block, const uint8_t *s1,
const uint8_t *s2, int stride){
int i;
/* read the pixels */
for(i=0;i<8;i++) {
block[0] = s1[0] - s2[0];
block[1] = s1[1] - s2[1];
block[2] = s1[2] - s2[2];
block[3] = s1[3] - s2[3];
block[4] = s1[4] - s2[4];
block[5] = s1[5] - s2[5];
block[6] = s1[6] - s2[6];
block[7] = s1[7] - s2[7];
s1 += stride;
s2 += stride;
block += 8;
}
}
static void put_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<8;i++) {
pixels[0] = av_clip_uint8(block[0]);
pixels[1] = av_clip_uint8(block[1]);
pixels[2] = av_clip_uint8(block[2]);
pixels[3] = av_clip_uint8(block[3]);
pixels[4] = av_clip_uint8(block[4]);
pixels[5] = av_clip_uint8(block[5]);
pixels[6] = av_clip_uint8(block[6]);
pixels[7] = av_clip_uint8(block[7]);
pixels += line_size;
block += 8;
}
}
static void put_pixels_clamped4_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<4;i++) {
pixels[0] = av_clip_uint8(block[0]);
pixels[1] = av_clip_uint8(block[1]);
pixels[2] = av_clip_uint8(block[2]);
pixels[3] = av_clip_uint8(block[3]);
pixels += line_size;
block += 8;
}
}
static void put_pixels_clamped2_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<2;i++) {
pixels[0] = av_clip_uint8(block[0]);
pixels[1] = av_clip_uint8(block[1]);
pixels += line_size;
block += 8;
}
}
static void put_signed_pixels_clamped_c(const int16_t *block,
uint8_t *av_restrict pixels,
int line_size)
{
int i, j;
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++) {
if (*block < -128)
*pixels = 0;
else if (*block > 127)
*pixels = 255;
else
*pixels = (uint8_t)(*block + 128);
block++;
pixels++;
}
pixels += (line_size - 8);
}
}
static void add_pixels8_c(uint8_t *av_restrict pixels,
int16_t *block,
int line_size)
{
int i;
for(i=0;i<8;i++) {
pixels[0] += block[0];
pixels[1] += block[1];
pixels[2] += block[2];
pixels[3] += block[3];
pixels[4] += block[4];
pixels[5] += block[5];
pixels[6] += block[6];
pixels[7] += block[7];
pixels += line_size;
block += 8;
}
}
static void add_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<8;i++) {
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
pixels[2] = av_clip_uint8(pixels[2] + block[2]);
pixels[3] = av_clip_uint8(pixels[3] + block[3]);
pixels[4] = av_clip_uint8(pixels[4] + block[4]);
pixels[5] = av_clip_uint8(pixels[5] + block[5]);
pixels[6] = av_clip_uint8(pixels[6] + block[6]);
pixels[7] = av_clip_uint8(pixels[7] + block[7]);
pixels += line_size;
block += 8;
}
}
static void add_pixels_clamped4_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<4;i++) {
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
pixels[2] = av_clip_uint8(pixels[2] + block[2]);
pixels[3] = av_clip_uint8(pixels[3] + block[3]);
pixels += line_size;
block += 8;
}
}
static void add_pixels_clamped2_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<2;i++) {
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
pixels += line_size;
block += 8;
}
}
static int sum_abs_dctelem_c(int16_t *block)
{
int sum=0, i;
for(i=0; i<64; i++)
sum+= FFABS(block[i]);
return sum;
}
static void fill_block16_c(uint8_t *block, uint8_t value, int line_size, int h)
{
int i;
for (i = 0; i < h; i++) {
memset(block, value, 16);
block += line_size;
}
}
static void fill_block8_c(uint8_t *block, uint8_t value, int line_size, int h)
{
int i;
for (i = 0; i < h; i++) {
memset(block, value, 8);
block += line_size;
}
}
#define avg2(a,b) ((a+b+1)>>1)
#define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
{
const int A=(16-x16)*(16-y16);
const int B=( x16)*(16-y16);
const int C=(16-x16)*( y16);
const int D=( x16)*( y16);
int i;
for(i=0; i<h; i++)
{
dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8;
dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8;
dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8;
dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8;
dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8;
dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8;
dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8;
dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8;
dst+= stride;
src+= stride;
}
}
void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
{
int y, vx, vy;
const int s= 1<<shift;
width--;
height--;
for(y=0; y<h; y++){
int x;
vx= ox;
vy= oy;
for(x=0; x<8; x++){ //XXX FIXME optimize
int src_x, src_y, frac_x, frac_y, index;
src_x= vx>>16;
src_y= vy>>16;
frac_x= src_x&(s-1);
frac_y= src_y&(s-1);
src_x>>=shift;
src_y>>=shift;
if((unsigned)src_x < width){
if((unsigned)src_y < height){
index= src_x + src_y*stride;
dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
+ src[index +1]* frac_x )*(s-frac_y)
+ ( src[index+stride ]*(s-frac_x)
+ src[index+stride+1]* frac_x )* frac_y
+ r)>>(shift*2);
}else{
index= src_x + av_clip(src_y, 0, height)*stride;
dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
+ src[index +1]* frac_x )*s
+ r)>>(shift*2);
}
}else{
if((unsigned)src_y < height){
index= av_clip(src_x, 0, width) + src_y*stride;
dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
+ src[index+stride ]* frac_y )*s
+ r)>>(shift*2);
}else{
index= av_clip(src_x, 0, width) + av_clip(src_y, 0, height)*stride;
dst[y*stride + x]= src[index ];
}
}
vx+= dxx;
vy+= dyx;
}
ox += dxy;
oy += dyy;
}
}
static inline void put_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
switch(width){
case 2: put_pixels2_8_c (dst, src, stride, height); break;
case 4: put_pixels4_8_c (dst, src, stride, height); break;
case 8: put_pixels8_8_c (dst, src, stride, height); break;
case 16:put_pixels16_8_c(dst, src, stride, height); break;
}
}
static inline void put_tpel_pixels_mc10_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (683*(2*src[j] + src[j+1] + 1)) >> 11;
}
src += stride;
dst += stride;
}
}
static inline void put_tpel_pixels_mc20_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (683*(src[j] + 2*src[j+1] + 1)) >> 11;
}
src += stride;
dst += stride;
}
}
static inline void put_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (683*(2*src[j] + src[j+stride] + 1)) >> 11;
}
src += stride;
dst += stride;
}
}
static inline void put_tpel_pixels_mc11_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (2731*(4*src[j] + 3*src[j+1] + 3*src[j+stride] + 2*src[j+stride+1] + 6)) >> 15;
}
src += stride;
dst += stride;
}
}
static inline void put_tpel_pixels_mc12_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (2731*(3*src[j] + 2*src[j+1] + 4*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15;
}
src += stride;
dst += stride;
}
}
static inline void put_tpel_pixels_mc02_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (683*(src[j] + 2*src[j+stride] + 1)) >> 11;
}
src += stride;
dst += stride;
}
}
static inline void put_tpel_pixels_mc21_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (2731*(3*src[j] + 4*src[j+1] + 2*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15;
}
src += stride;
dst += stride;
}
}
static inline void put_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (2731*(2*src[j] + 3*src[j+1] + 3*src[j+stride] + 4*src[j+stride+1] + 6)) >> 15;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
switch(width){
case 2: avg_pixels2_8_c (dst, src, stride, height); break;
case 4: avg_pixels4_8_c (dst, src, stride, height); break;
case 8: avg_pixels8_8_c (dst, src, stride, height); break;
case 16:avg_pixels16_8_c(dst, src, stride, height); break;
}
}
static inline void avg_tpel_pixels_mc10_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((683*(2*src[j] + src[j+1] + 1)) >> 11) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc20_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((683*(src[j] + 2*src[j+1] + 1)) >> 11) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((683*(2*src[j] + src[j+stride] + 1)) >> 11) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc11_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((2731*(4*src[j] + 3*src[j+1] + 3*src[j+stride] + 2*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc12_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((2731*(3*src[j] + 2*src[j+1] + 4*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc02_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((683*(src[j] + 2*src[j+stride] + 1)) >> 11) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc21_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((2731*(3*src[j] + 4*src[j+1] + 2*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((2731*(2*src[j] + 3*src[j+1] + 3*src[j+stride] + 4*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
#define QPEL_MC(r, OPNAME, RND, OP) \
static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<h; i++)\
{\
OP(dst[0], (src[0]+src[1])*20 - (src[0]+src[2])*6 + (src[1]+src[3])*3 - (src[2]+src[4]));\
OP(dst[1], (src[1]+src[2])*20 - (src[0]+src[3])*6 + (src[0]+src[4])*3 - (src[1]+src[5]));\
OP(dst[2], (src[2]+src[3])*20 - (src[1]+src[4])*6 + (src[0]+src[5])*3 - (src[0]+src[6]));\
OP(dst[3], (src[3]+src[4])*20 - (src[2]+src[5])*6 + (src[1]+src[6])*3 - (src[0]+src[7]));\
OP(dst[4], (src[4]+src[5])*20 - (src[3]+src[6])*6 + (src[2]+src[7])*3 - (src[1]+src[8]));\
OP(dst[5], (src[5]+src[6])*20 - (src[4]+src[7])*6 + (src[3]+src[8])*3 - (src[2]+src[8]));\
OP(dst[6], (src[6]+src[7])*20 - (src[5]+src[8])*6 + (src[4]+src[8])*3 - (src[3]+src[7]));\
OP(dst[7], (src[7]+src[8])*20 - (src[6]+src[8])*6 + (src[5]+src[7])*3 - (src[4]+src[6]));\
dst+=dstStride;\
src+=srcStride;\
}\
}\
\
static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int w=8;\
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<w; i++)\
{\
const int src0= src[0*srcStride];\
const int src1= src[1*srcStride];\
const int src2= src[2*srcStride];\
const int src3= src[3*srcStride];\
const int src4= src[4*srcStride];\
const int src5= src[5*srcStride];\
const int src6= src[6*srcStride];\
const int src7= src[7*srcStride];\
const int src8= src[8*srcStride];\
OP(dst[0*dstStride], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
OP(dst[7*dstStride], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
dst++;\
src++;\
}\
}\
\
static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
\
for(i=0; i<h; i++)\
{\
OP(dst[ 0], (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]));\
OP(dst[ 1], (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]));\
OP(dst[ 2], (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]));\
OP(dst[ 3], (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]));\
OP(dst[ 4], (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]));\
OP(dst[ 5], (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]));\
OP(dst[ 6], (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]));\
OP(dst[ 7], (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]));\
OP(dst[ 8], (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]));\
OP(dst[ 9], (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]));\
OP(dst[10], (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]));\
OP(dst[11], (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]));\
OP(dst[12], (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]));\
OP(dst[13], (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]));\
OP(dst[14], (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]));\
OP(dst[15], (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]));\
dst+=dstStride;\
src+=srcStride;\
}\
}\
\
static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
const int w=16;\
for(i=0; i<w; i++)\
{\
const int src0= src[0*srcStride];\
const int src1= src[1*srcStride];\
const int src2= src[2*srcStride];\
const int src3= src[3*srcStride];\
const int src4= src[4*srcStride];\
const int src5= src[5*srcStride];\
const int src6= src[6*srcStride];\
const int src7= src[7*srcStride];\
const int src8= src[8*srcStride];\
const int src9= src[9*srcStride];\
const int src10= src[10*srcStride];\
const int src11= src[11*srcStride];\
const int src12= src[12*srcStride];\
const int src13= src[13*srcStride];\
const int src14= src[14*srcStride];\
const int src15= src[15*srcStride];\
const int src16= src[16*srcStride];\
OP(dst[ 0*dstStride], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
OP(dst[ 1*dstStride], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
OP(dst[ 2*dstStride], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
OP(dst[ 3*dstStride], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
OP(dst[ 4*dstStride], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
OP(dst[ 5*dstStride], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
OP(dst[ 6*dstStride], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
OP(dst[ 7*dstStride], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
OP(dst[ 8*dstStride], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
OP(dst[ 9*dstStride], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
OP(dst[10*dstStride], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
OP(dst[11*dstStride], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
OP(dst[12*dstStride], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
OP(dst[13*dstStride], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
OP(dst[14*dstStride], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
OP(dst[15*dstStride], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
dst++;\
src++;\
}\
}\
\
static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t half[64];\
put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
OPNAME ## pixels8_l2_8(dst, src, half, stride, stride, 8, 8);\
}\
\
static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
}\
\
static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t half[64];\
put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
OPNAME ## pixels8_l2_8(dst, src+1, half, stride, stride, 8, 8);\
}\
\
static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t half[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
OPNAME ## pixels8_l2_8(dst, full, half, stride, 16, 8, 8);\
}\
\
static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
copy_block9(full, src, 16, stride, 9);\
OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
}\
\
static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t half[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
OPNAME ## pixels8_l2_8(dst, full+16, half, stride, 16, 8, 8);\
}\
void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfV[64];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l4_8(dst, full, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8);\
}\
void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfV[64];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l4_8(dst, full+1, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## pixels8_l2_8(halfH, halfH, full+1, 8, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8);\
}\
void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfV[64];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l4_8(dst, full+16, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfH+8, halfHV, stride, 8, 8, 8);\
}\
void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfV[64];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full , 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l4_8(dst, full+17, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## pixels8_l2_8(halfH, halfH, full+1, 8, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfH+8, halfHV, stride, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t halfH[72];\
uint8_t halfHV[64];\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t halfH[72];\
uint8_t halfHV[64];\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfH+8, halfHV, stride, 8, 8, 8);\
}\
void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfV[64];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9);\
OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
}\
void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfV[64];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## pixels8_l2_8(halfH, halfH, full+1, 8, 8, 16, 9);\
OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
}\
static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t halfH[72];\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
}\
\
static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t half[256];\
put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
OPNAME ## pixels16_l2_8(dst, src, half, stride, stride, 16, 16);\
}\
\
static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
}\
\
static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t half[256];\
put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
OPNAME ## pixels16_l2_8(dst, src+1, half, stride, stride, 16, 16);\
}\
\
static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t half[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
OPNAME ## pixels16_l2_8(dst, full, half, stride, 24, 16, 16);\
}\
\
static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
copy_block17(full, src, 24, stride, 17);\
OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
}\
\
static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t half[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
OPNAME ## pixels16_l2_8(dst, full+24, half, stride, 24, 16, 16);\
}\
void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfV[256];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l4_8(dst, full, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16);\
}\
void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfV[256];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l4_8(dst, full+1, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## pixels16_l2_8(halfH, halfH, full+1, 16, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16);\
}\
void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfV[256];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l4_8(dst, full+24, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfH+16, halfHV, stride, 16, 16, 16);\
}\
void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfV[256];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full , 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l4_8(dst, full+25, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## pixels16_l2_8(halfH, halfH, full+1, 16, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfH+16, halfHV, stride, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t halfH[272];\
uint8_t halfHV[256];\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t halfH[272];\
uint8_t halfHV[256];\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfH+16, halfHV, stride, 16, 16, 16);\
}\
void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfV[256];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17);\
OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
}\
void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfV[256];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## pixels16_l2_8(halfH, halfH, full+1, 16, 16, 24, 17);\
OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
}\
static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t halfH[272];\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
}
#define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
#define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
#define op_put(a, b) a = cm[((b) + 16)>>5]
#define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
QPEL_MC(0, put_ , _ , op_put)
QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
QPEL_MC(0, avg_ , _ , op_avg)
//QPEL_MC(1, avg_no_rnd , _ , op_avg)
#undef op_avg
#undef op_avg_no_rnd
#undef op_put
#undef op_put_no_rnd
void ff_put_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
put_pixels8_8_c(dst, src, stride, 8);
}
void ff_avg_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
avg_pixels8_8_c(dst, src, stride, 8);
}
void ff_put_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
put_pixels16_8_c(dst, src, stride, 16);
}
void ff_avg_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
avg_pixels16_8_c(dst, src, stride, 16);
}
#define put_qpel8_mc00_c ff_put_pixels8x8_c
#define avg_qpel8_mc00_c ff_avg_pixels8x8_c
#define put_qpel16_mc00_c ff_put_pixels16x16_c
#define avg_qpel16_mc00_c ff_avg_pixels16x16_c
#define put_no_rnd_qpel8_mc00_c ff_put_pixels8x8_c
#define put_no_rnd_qpel16_mc00_c ff_put_pixels16x16_c
static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int i;
for(i=0; i<h; i++){
dst[0]= cm[(9*(src[0] + src[1]) - (src[-1] + src[2]) + 8)>>4];
dst[1]= cm[(9*(src[1] + src[2]) - (src[ 0] + src[3]) + 8)>>4];
dst[2]= cm[(9*(src[2] + src[3]) - (src[ 1] + src[4]) + 8)>>4];
dst[3]= cm[(9*(src[3] + src[4]) - (src[ 2] + src[5]) + 8)>>4];
dst[4]= cm[(9*(src[4] + src[5]) - (src[ 3] + src[6]) + 8)>>4];
dst[5]= cm[(9*(src[5] + src[6]) - (src[ 4] + src[7]) + 8)>>4];
dst[6]= cm[(9*(src[6] + src[7]) - (src[ 5] + src[8]) + 8)>>4];
dst[7]= cm[(9*(src[7] + src[8]) - (src[ 6] + src[9]) + 8)>>4];
dst+=dstStride;
src+=srcStride;
}
}
#if CONFIG_RV40_DECODER
void ff_put_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
put_pixels16_xy2_8_c(dst, src, stride, 16);
}
void ff_avg_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
avg_pixels16_xy2_8_c(dst, src, stride, 16);
}
void ff_put_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
put_pixels8_xy2_8_c(dst, src, stride, 8);
}
void ff_avg_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
avg_pixels8_xy2_8_c(dst, src, stride, 8);
}
#endif /* CONFIG_RV40_DECODER */
#if CONFIG_DIRAC_DECODER
#define DIRAC_MC(OPNAME)\
void ff_ ## OPNAME ## _dirac_pixels8_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels8_8_c(dst, src[0], stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels16_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels16_8_c(dst, src[0], stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels32_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels16_8_c(dst , src[0] , stride, h);\
OPNAME ## _pixels16_8_c(dst+16, src[0]+16, stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels8_l2_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels8_l2_8(dst, src[0], src[1], stride, stride, stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels16_l2_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels16_l2_8(dst, src[0], src[1], stride, stride, stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels32_l2_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels16_l2_8(dst , src[0] , src[1] , stride, stride, stride, h);\
OPNAME ## _pixels16_l2_8(dst+16, src[0]+16, src[1]+16, stride, stride, stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels8_l4_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels8_l4_8(dst, src[0], src[1], src[2], src[3], stride, stride, stride, stride, stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels16_l4_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels16_l4_8(dst, src[0], src[1], src[2], src[3], stride, stride, stride, stride, stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels32_l4_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels16_l4_8(dst , src[0] , src[1] , src[2] , src[3] , stride, stride, stride, stride, stride, h);\
OPNAME ## _pixels16_l4_8(dst+16, src[0]+16, src[1]+16, src[2]+16, src[3]+16, stride, stride, stride, stride, stride, h);\
}
DIRAC_MC(put)
DIRAC_MC(avg)
#endif
static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int i;
for(i=0; i<w; i++){
const int src_1= src[ -srcStride];
const int src0 = src[0 ];
const int src1 = src[ srcStride];
const int src2 = src[2*srcStride];
const int src3 = src[3*srcStride];
const int src4 = src[4*srcStride];
const int src5 = src[5*srcStride];
const int src6 = src[6*srcStride];
const int src7 = src[7*srcStride];
const int src8 = src[8*srcStride];
const int src9 = src[9*srcStride];
dst[0*dstStride]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
dst[1*dstStride]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
dst[2*dstStride]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
dst[3*dstStride]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
dst[4*dstStride]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
dst[5*dstStride]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
dst[6*dstStride]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
dst[7*dstStride]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
src++;
dst++;
}
}
static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
uint8_t half[64];
wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
put_pixels8_l2_8(dst, src, half, stride, stride, 8, 8);
}
static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
}
static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
uint8_t half[64];
wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
put_pixels8_l2_8(dst, src+1, half, stride, stride, 8, 8);
}
static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
}
static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
uint8_t halfH[88];
uint8_t halfV[64];
uint8_t halfHV[64];
wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
put_pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);
}
static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
uint8_t halfH[88];
uint8_t halfV[64];
uint8_t halfHV[64];
wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
put_pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);
}
static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
uint8_t halfH[88];
wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
}
static void h263_v_loop_filter_c(uint8_t *src, int stride, int qscale){
if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
int x;
const int strength= ff_h263_loop_filter_strength[qscale];
for(x=0; x<8; x++){
int d1, d2, ad1;
int p0= src[x-2*stride];
int p1= src[x-1*stride];
int p2= src[x+0*stride];
int p3= src[x+1*stride];
int d = (p0 - p3 + 4*(p2 - p1)) / 8;
if (d<-2*strength) d1= 0;
else if(d<- strength) d1=-2*strength - d;
else if(d< strength) d1= d;
else if(d< 2*strength) d1= 2*strength - d;
else d1= 0;
p1 += d1;
p2 -= d1;
if(p1&256) p1= ~(p1>>31);
if(p2&256) p2= ~(p2>>31);
src[x-1*stride] = p1;
src[x+0*stride] = p2;
ad1= FFABS(d1)>>1;
d2= av_clip((p0-p3)/4, -ad1, ad1);
src[x-2*stride] = p0 - d2;
src[x+ stride] = p3 + d2;
}
}
}
static void h263_h_loop_filter_c(uint8_t *src, int stride, int qscale){
if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
int y;
const int strength= ff_h263_loop_filter_strength[qscale];
for(y=0; y<8; y++){
int d1, d2, ad1;
int p0= src[y*stride-2];
int p1= src[y*stride-1];
int p2= src[y*stride+0];
int p3= src[y*stride+1];
int d = (p0 - p3 + 4*(p2 - p1)) / 8;
if (d<-2*strength) d1= 0;
else if(d<- strength) d1=-2*strength - d;
else if(d< strength) d1= d;
else if(d< 2*strength) d1= 2*strength - d;
else d1= 0;
p1 += d1;
p2 -= d1;
if(p1&256) p1= ~(p1>>31);
if(p2&256) p2= ~(p2>>31);
src[y*stride-1] = p1;
src[y*stride+0] = p2;
ad1= FFABS(d1)>>1;
d2= av_clip((p0-p3)/4, -ad1, ad1);
src[y*stride-2] = p0 - d2;
src[y*stride+1] = p3 + d2;
}
}
}
static inline int pix_abs16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - pix2[0]);
s += abs(pix1[1] - pix2[1]);
s += abs(pix1[2] - pix2[2]);
s += abs(pix1[3] - pix2[3]);
s += abs(pix1[4] - pix2[4]);
s += abs(pix1[5] - pix2[5]);
s += abs(pix1[6] - pix2[6]);
s += abs(pix1[7] - pix2[7]);
s += abs(pix1[8] - pix2[8]);
s += abs(pix1[9] - pix2[9]);
s += abs(pix1[10] - pix2[10]);
s += abs(pix1[11] - pix2[11]);
s += abs(pix1[12] - pix2[12]);
s += abs(pix1[13] - pix2[13]);
s += abs(pix1[14] - pix2[14]);
s += abs(pix1[15] - pix2[15]);
pix1 += line_size;
pix2 += line_size;
}
return s;
}
static int pix_abs16_x2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
pix1 += line_size;
pix2 += line_size;
}
return s;
}
static int pix_abs16_y2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint8_t *pix3 = pix2 + line_size;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
pix1 += line_size;
pix2 += line_size;
pix3 += line_size;
}
return s;
}
static int pix_abs16_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint8_t *pix3 = pix2 + line_size;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
pix1 += line_size;
pix2 += line_size;
pix3 += line_size;
}
return s;
}
static inline int pix_abs8_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - pix2[0]);
s += abs(pix1[1] - pix2[1]);
s += abs(pix1[2] - pix2[2]);
s += abs(pix1[3] - pix2[3]);
s += abs(pix1[4] - pix2[4]);
s += abs(pix1[5] - pix2[5]);
s += abs(pix1[6] - pix2[6]);
s += abs(pix1[7] - pix2[7]);
pix1 += line_size;
pix2 += line_size;
}
return s;
}
static int pix_abs8_x2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
pix1 += line_size;
pix2 += line_size;
}
return s;
}
static int pix_abs8_y2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint8_t *pix3 = pix2 + line_size;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
pix1 += line_size;
pix2 += line_size;
pix3 += line_size;
}
return s;
}
static int pix_abs8_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint8_t *pix3 = pix2 + line_size;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
pix1 += line_size;
pix2 += line_size;
pix3 += line_size;
}
return s;
}
static int nsse16_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
MpegEncContext *c = v;
int score1=0;
int score2=0;
int x,y;
for(y=0; y<h; y++){
for(x=0; x<16; x++){
score1+= (s1[x ] - s2[x ])*(s1[x ] - s2[x ]);
}
if(y+1<h){
for(x=0; x<15; x++){
score2+= FFABS( s1[x ] - s1[x +stride]
- s1[x+1] + s1[x+1+stride])
-FFABS( s2[x ] - s2[x +stride]
- s2[x+1] + s2[x+1+stride]);
}
}
s1+= stride;
s2+= stride;
}
if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
else return score1 + FFABS(score2)*8;
}
static int nsse8_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
MpegEncContext *c = v;
int score1=0;
int score2=0;
int x,y;
for(y=0; y<h; y++){
for(x=0; x<8; x++){
score1+= (s1[x ] - s2[x ])*(s1[x ] - s2[x ]);
}
if(y+1<h){
for(x=0; x<7; x++){
score2+= FFABS( s1[x ] - s1[x +stride]
- s1[x+1] + s1[x+1+stride])
-FFABS( s2[x ] - s2[x +stride]
- s2[x+1] + s2[x+1+stride]);
}
}
s1+= stride;
s2+= stride;
}
if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
else return score1 + FFABS(score2)*8;
}
static int try_8x8basis_c(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
int i;
unsigned int sum=0;
for(i=0; i<8*8; i++){
int b= rem[i] + ((basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT));
int w= weight[i];
b>>= RECON_SHIFT;
av_assert2(-512<b && b<512);
sum += (w*b)*(w*b)>>4;
}
return sum>>2;
}
static void add_8x8basis_c(int16_t rem[64], int16_t basis[64], int scale){
int i;
for(i=0; i<8*8; i++){
rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
}
}
static int zero_cmp(void *s, uint8_t *a, uint8_t *b, int stride, int h){
return 0;
}
void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
int i;
memset(cmp, 0, sizeof(void*)*6);
for(i=0; i<6; i++){
switch(type&0xFF){
case FF_CMP_SAD:
cmp[i]= c->sad[i];
break;
case FF_CMP_SATD:
cmp[i]= c->hadamard8_diff[i];
break;
case FF_CMP_SSE:
cmp[i]= c->sse[i];
break;
case FF_CMP_DCT:
cmp[i]= c->dct_sad[i];
break;
case FF_CMP_DCT264:
cmp[i]= c->dct264_sad[i];
break;
case FF_CMP_DCTMAX:
cmp[i]= c->dct_max[i];
break;
case FF_CMP_PSNR:
cmp[i]= c->quant_psnr[i];
break;
case FF_CMP_BIT:
cmp[i]= c->bit[i];
break;
case FF_CMP_RD:
cmp[i]= c->rd[i];
break;
case FF_CMP_VSAD:
cmp[i]= c->vsad[i];
break;
case FF_CMP_VSSE:
cmp[i]= c->vsse[i];
break;
case FF_CMP_ZERO:
cmp[i]= zero_cmp;
break;
case FF_CMP_NSSE:
cmp[i]= c->nsse[i];
break;
#if CONFIG_DWT
case FF_CMP_W53:
cmp[i]= c->w53[i];
break;
case FF_CMP_W97:
cmp[i]= c->w97[i];
break;
#endif
default:
av_log(NULL, AV_LOG_ERROR,"internal error in cmp function selection\n");
}
}
}
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
long i;
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
long a = *(long*)(src+i);
long b = *(long*)(dst+i);
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
}
for(; i<w; i++)
dst[i+0] += src[i+0];
}
static void diff_bytes_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w){
long i;
#if !HAVE_FAST_UNALIGNED
if((long)src2 & (sizeof(long)-1)){
for(i=0; i+7<w; i+=8){
dst[i+0] = src1[i+0]-src2[i+0];
dst[i+1] = src1[i+1]-src2[i+1];
dst[i+2] = src1[i+2]-src2[i+2];
dst[i+3] = src1[i+3]-src2[i+3];
dst[i+4] = src1[i+4]-src2[i+4];
dst[i+5] = src1[i+5]-src2[i+5];
dst[i+6] = src1[i+6]-src2[i+6];
dst[i+7] = src1[i+7]-src2[i+7];
}
}else
#endif
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
long a = *(long*)(src1+i);
long b = *(long*)(src2+i);
*(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
}
for(; i<w; i++)
dst[i+0] = src1[i+0]-src2[i+0];
}
static void add_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1, const uint8_t *diff, int w, int *left, int *left_top){
int i;
uint8_t l, lt;
l= *left;
lt= *left_top;
for(i=0; i<w; i++){
l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
lt= src1[i];
dst[i]= l;
}
*left= l;
*left_top= lt;
}
static void sub_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top){
int i;
uint8_t l, lt;
l= *left;
lt= *left_top;
for(i=0; i<w; i++){
const int pred= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF);
lt= src1[i];
l= src2[i];
dst[i]= l - pred;
}
*left= l;
*left_top= lt;
}
static int add_hfyu_left_prediction_c(uint8_t *dst, const uint8_t *src, int w, int acc){
int i;
for(i=0; i<w-1; i++){
acc+= src[i];
dst[i]= acc;
i++;
acc+= src[i];
dst[i]= acc;
}
for(; i<w; i++){
acc+= src[i];
dst[i]= acc;
}
return acc;
}
#if HAVE_BIGENDIAN
#define B 3
#define G 2
#define R 1
#define A 0
#else
#define B 0
#define G 1
#define R 2
#define A 3
#endif
static void add_hfyu_left_prediction_bgr32_c(uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha){
int i;
int r,g,b,a;
r= *red;
g= *green;
b= *blue;
a= *alpha;
for(i=0; i<w; i++){
b+= src[4*i+B];
g+= src[4*i+G];
r+= src[4*i+R];
a+= src[4*i+A];
dst[4*i+B]= b;
dst[4*i+G]= g;
dst[4*i+R]= r;
dst[4*i+A]= a;
}
*red= r;
*green= g;
*blue= b;
*alpha= a;
}
#undef B
#undef G
#undef R
#undef A
#define BUTTERFLY2(o1,o2,i1,i2) \
o1= (i1)+(i2);\
o2= (i1)-(i2);
#define BUTTERFLY1(x,y) \
{\
int a,b;\
a= x;\
b= y;\
x= a+b;\
y= a-b;\
}
#define BUTTERFLYA(x,y) (FFABS((x)+(y)) + FFABS((x)-(y)))
static int hadamard8_diff8x8_c(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
int i;
int temp[64];
int sum=0;
av_assert2(h==8);
for(i=0; i<8; i++){
//FIXME try pointer walks
BUTTERFLY2(temp[8*i+0], temp[8*i+1], src[stride*i+0]-dst[stride*i+0],src[stride*i+1]-dst[stride*i+1]);
BUTTERFLY2(temp[8*i+2], temp[8*i+3], src[stride*i+2]-dst[stride*i+2],src[stride*i+3]-dst[stride*i+3]);
BUTTERFLY2(temp[8*i+4], temp[8*i+5], src[stride*i+4]-dst[stride*i+4],src[stride*i+5]-dst[stride*i+5]);
BUTTERFLY2(temp[8*i+6], temp[8*i+7], src[stride*i+6]-dst[stride*i+6],src[stride*i+7]-dst[stride*i+7]);
BUTTERFLY1(temp[8*i+0], temp[8*i+2]);
BUTTERFLY1(temp[8*i+1], temp[8*i+3]);
BUTTERFLY1(temp[8*i+4], temp[8*i+6]);
BUTTERFLY1(temp[8*i+5], temp[8*i+7]);
BUTTERFLY1(temp[8*i+0], temp[8*i+4]);
BUTTERFLY1(temp[8*i+1], temp[8*i+5]);
BUTTERFLY1(temp[8*i+2], temp[8*i+6]);
BUTTERFLY1(temp[8*i+3], temp[8*i+7]);
}
for(i=0; i<8; i++){
BUTTERFLY1(temp[8*0+i], temp[8*1+i]);
BUTTERFLY1(temp[8*2+i], temp[8*3+i]);
BUTTERFLY1(temp[8*4+i], temp[8*5+i]);
BUTTERFLY1(temp[8*6+i], temp[8*7+i]);
BUTTERFLY1(temp[8*0+i], temp[8*2+i]);
BUTTERFLY1(temp[8*1+i], temp[8*3+i]);
BUTTERFLY1(temp[8*4+i], temp[8*6+i]);
BUTTERFLY1(temp[8*5+i], temp[8*7+i]);
sum +=
BUTTERFLYA(temp[8*0+i], temp[8*4+i])
+BUTTERFLYA(temp[8*1+i], temp[8*5+i])
+BUTTERFLYA(temp[8*2+i], temp[8*6+i])
+BUTTERFLYA(temp[8*3+i], temp[8*7+i]);
}
return sum;
}
static int hadamard8_intra8x8_c(/*MpegEncContext*/ void *s, uint8_t *src, uint8_t *dummy, int stride, int h){
int i;
int temp[64];
int sum=0;
av_assert2(h==8);
for(i=0; i<8; i++){
//FIXME try pointer walks
BUTTERFLY2(temp[8*i+0], temp[8*i+1], src[stride*i+0],src[stride*i+1]);
BUTTERFLY2(temp[8*i+2], temp[8*i+3], src[stride*i+2],src[stride*i+3]);
BUTTERFLY2(temp[8*i+4], temp[8*i+5], src[stride*i+4],src[stride*i+5]);
BUTTERFLY2(temp[8*i+6], temp[8*i+7], src[stride*i+6],src[stride*i+7]);
BUTTERFLY1(temp[8*i+0], temp[8*i+2]);
BUTTERFLY1(temp[8*i+1], temp[8*i+3]);
BUTTERFLY1(temp[8*i+4], temp[8*i+6]);
BUTTERFLY1(temp[8*i+5], temp[8*i+7]);
BUTTERFLY1(temp[8*i+0], temp[8*i+4]);
BUTTERFLY1(temp[8*i+1], temp[8*i+5]);
BUTTERFLY1(temp[8*i+2], temp[8*i+6]);
BUTTERFLY1(temp[8*i+3], temp[8*i+7]);
}
for(i=0; i<8; i++){
BUTTERFLY1(temp[8*0+i], temp[8*1+i]);
BUTTERFLY1(temp[8*2+i], temp[8*3+i]);
BUTTERFLY1(temp[8*4+i], temp[8*5+i]);
BUTTERFLY1(temp[8*6+i], temp[8*7+i]);
BUTTERFLY1(temp[8*0+i], temp[8*2+i]);
BUTTERFLY1(temp[8*1+i], temp[8*3+i]);
BUTTERFLY1(temp[8*4+i], temp[8*6+i]);
BUTTERFLY1(temp[8*5+i], temp[8*7+i]);
sum +=
BUTTERFLYA(temp[8*0+i], temp[8*4+i])
+BUTTERFLYA(temp[8*1+i], temp[8*5+i])
+BUTTERFLYA(temp[8*2+i], temp[8*6+i])
+BUTTERFLYA(temp[8*3+i], temp[8*7+i]);
}
sum -= FFABS(temp[8*0] + temp[8*4]); // -mean
return sum;
}
static int dct_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
LOCAL_ALIGNED_16(int16_t, temp, [64]);
av_assert2(h==8);
s->dsp.diff_pixels(temp, src1, src2, stride);
s->dsp.fdct(temp);
return s->dsp.sum_abs_dctelem(temp);
}
#if CONFIG_GPL
#define DCT8_1D {\
const int s07 = SRC(0) + SRC(7);\
const int s16 = SRC(1) + SRC(6);\
const int s25 = SRC(2) + SRC(5);\
const int s34 = SRC(3) + SRC(4);\
const int a0 = s07 + s34;\
const int a1 = s16 + s25;\
const int a2 = s07 - s34;\
const int a3 = s16 - s25;\
const int d07 = SRC(0) - SRC(7);\
const int d16 = SRC(1) - SRC(6);\
const int d25 = SRC(2) - SRC(5);\
const int d34 = SRC(3) - SRC(4);\
const int a4 = d16 + d25 + (d07 + (d07>>1));\
const int a5 = d07 - d34 - (d25 + (d25>>1));\
const int a6 = d07 + d34 - (d16 + (d16>>1));\
const int a7 = d16 - d25 + (d34 + (d34>>1));\
DST(0, a0 + a1 ) ;\
DST(1, a4 + (a7>>2)) ;\
DST(2, a2 + (a3>>1)) ;\
DST(3, a5 + (a6>>2)) ;\
DST(4, a0 - a1 ) ;\
DST(5, a6 - (a5>>2)) ;\
DST(6, (a2>>1) - a3 ) ;\
DST(7, (a4>>2) - a7 ) ;\
}
static int dct264_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
int16_t dct[8][8];
int i;
int sum=0;
s->dsp.diff_pixels(dct[0], src1, src2, stride);
#define SRC(x) dct[i][x]
#define DST(x,v) dct[i][x]= v
for( i = 0; i < 8; i++ )
DCT8_1D
#undef SRC
#undef DST
#define SRC(x) dct[x][i]
#define DST(x,v) sum += FFABS(v)
for( i = 0; i < 8; i++ )
DCT8_1D
#undef SRC
#undef DST
return sum;
}
#endif
static int dct_max8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
LOCAL_ALIGNED_16(int16_t, temp, [64]);
int sum=0, i;
av_assert2(h==8);
s->dsp.diff_pixels(temp, src1, src2, stride);
s->dsp.fdct(temp);
for(i=0; i<64; i++)
sum= FFMAX(sum, FFABS(temp[i]));
return sum;
}
static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
LOCAL_ALIGNED_16(int16_t, temp, [64*2]);
int16_t * const bak = temp+64;
int sum=0, i;
av_assert2(h==8);
s->mb_intra=0;
s->dsp.diff_pixels(temp, src1, src2, stride);
memcpy(bak, temp, 64*sizeof(int16_t));
s->block_last_index[0/*FIXME*/]= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
s->dct_unquantize_inter(s, temp, 0, s->qscale);
ff_simple_idct_8(temp); //FIXME
for(i=0; i<64; i++)
sum+= (temp[i]-bak[i])*(temp[i]-bak[i]);
return sum;
}
static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
const uint8_t *scantable= s->intra_scantable.permutated;
LOCAL_ALIGNED_16(int16_t, temp, [64]);
LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]);
LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]);
int i, last, run, bits, level, distortion, start_i;
const int esc_length= s->ac_esc_length;
uint8_t * length;
uint8_t * last_length;
av_assert2(h==8);
copy_block8(lsrc1, src1, 8, stride, 8);
copy_block8(lsrc2, src2, 8, stride, 8);
s->dsp.diff_pixels(temp, lsrc1, lsrc2, 8);
s->block_last_index[0/*FIXME*/]= last= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
bits=0;
if (s->mb_intra) {
start_i = 1;
length = s->intra_ac_vlc_length;
last_length= s->intra_ac_vlc_last_length;
bits+= s->luma_dc_vlc_length[temp[0] + 256]; //FIXME chroma
} else {
start_i = 0;
length = s->inter_ac_vlc_length;
last_length= s->inter_ac_vlc_last_length;
}
if(last>=start_i){
run=0;
for(i=start_i; i<last; i++){
int j= scantable[i];
level= temp[j];
if(level){
level+=64;
if((level&(~127)) == 0){
bits+= length[UNI_AC_ENC_INDEX(run, level)];
}else
bits+= esc_length;
run=0;
}else
run++;
}
i= scantable[last];
level= temp[i] + 64;
av_assert2(level - 64);
if((level&(~127)) == 0){
bits+= last_length[UNI_AC_ENC_INDEX(run, level)];
}else
bits+= esc_length;
}
if(last>=0){
if(s->mb_intra)
s->dct_unquantize_intra(s, temp, 0, s->qscale);
else
s->dct_unquantize_inter(s, temp, 0, s->qscale);
}
s->dsp.idct_add(lsrc2, 8, temp);
distortion= s->dsp.sse[1](NULL, lsrc2, lsrc1, 8, 8);
return distortion + ((bits*s->qscale*s->qscale*109 + 64)>>7);
}
static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
const uint8_t *scantable= s->intra_scantable.permutated;
LOCAL_ALIGNED_16(int16_t, temp, [64]);
int i, last, run, bits, level, start_i;
const int esc_length= s->ac_esc_length;
uint8_t * length;
uint8_t * last_length;
av_assert2(h==8);
s->dsp.diff_pixels(temp, src1, src2, stride);
s->block_last_index[0/*FIXME*/]= last= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
bits=0;
if (s->mb_intra) {
start_i = 1;
length = s->intra_ac_vlc_length;
last_length= s->intra_ac_vlc_last_length;
bits+= s->luma_dc_vlc_length[temp[0] + 256]; //FIXME chroma
} else {
start_i = 0;
length = s->inter_ac_vlc_length;
last_length= s->inter_ac_vlc_last_length;
}
if(last>=start_i){
run=0;
for(i=start_i; i<last; i++){
int j= scantable[i];
level= temp[j];
if(level){
level+=64;
if((level&(~127)) == 0){
bits+= length[UNI_AC_ENC_INDEX(run, level)];
}else
bits+= esc_length;
run=0;
}else
run++;
}
i= scantable[last];
level= temp[i] + 64;
av_assert2(level - 64);
if((level&(~127)) == 0){
bits+= last_length[UNI_AC_ENC_INDEX(run, level)];
}else
bits+= esc_length;
}
return bits;
}
#define VSAD_INTRA(size) \
static int vsad_intra##size##_c(/*MpegEncContext*/ void *c, uint8_t *s, uint8_t *dummy, int stride, int h){ \
int score=0; \
int x,y; \
\
for(y=1; y<h; y++){ \
for(x=0; x<size; x+=4){ \
score+= FFABS(s[x ] - s[x +stride]) + FFABS(s[x+1] - s[x+1+stride]) \
+FFABS(s[x+2] - s[x+2+stride]) + FFABS(s[x+3] - s[x+3+stride]); \
} \
s+= stride; \
} \
\
return score; \
}
VSAD_INTRA(8)
VSAD_INTRA(16)
static int vsad16_c(/*MpegEncContext*/ void *c, uint8_t *s1, uint8_t *s2, int stride, int h){
int score=0;
int x,y;
for(y=1; y<h; y++){
for(x=0; x<16; x++){
score+= FFABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
}
s1+= stride;
s2+= stride;
}
return score;
}
#define SQ(a) ((a)*(a))
#define VSSE_INTRA(size) \
static int vsse_intra##size##_c(/*MpegEncContext*/ void *c, uint8_t *s, uint8_t *dummy, int stride, int h){ \
int score=0; \
int x,y; \
\
for(y=1; y<h; y++){ \
for(x=0; x<size; x+=4){ \
score+= SQ(s[x ] - s[x +stride]) + SQ(s[x+1] - s[x+1+stride]) \
+SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]); \
} \
s+= stride; \
} \
\
return score; \
}
VSSE_INTRA(8)
VSSE_INTRA(16)
static int vsse16_c(/*MpegEncContext*/ void *c, uint8_t *s1, uint8_t *s2, int stride, int h){
int score=0;
int x,y;
for(y=1; y<h; y++){
for(x=0; x<16; x++){
score+= SQ(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
}
s1+= stride;
s2+= stride;
}
return score;
}
static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
int size){
int score=0;
int i;
for(i=0; i<size; i++)
score += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
return score;
}
#define WRAPPER8_16_SQ(name8, name16)\
static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\
int score=0;\
score +=name8(s, dst , src , stride, 8);\
score +=name8(s, dst+8 , src+8 , stride, 8);\
if(h==16){\
dst += 8*stride;\
src += 8*stride;\
score +=name8(s, dst , src , stride, 8);\
score +=name8(s, dst+8 , src+8 , stride, 8);\
}\
return score;\
}
WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
#if CONFIG_GPL
WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
#endif
WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
WRAPPER8_16_SQ(rd8x8_c, rd16_c)
WRAPPER8_16_SQ(bit8x8_c, bit16_c)
static inline uint32_t clipf_c_one(uint32_t a, uint32_t mini,
uint32_t maxi, uint32_t maxisign)
{
if(a > mini) return mini;
else if((a^(1U<<31)) > maxisign) return maxi;
else return a;
}
static void vector_clipf_c_opposite_sign(float *dst, const float *src, float *min, float *max, int len){
int i;
uint32_t mini = *(uint32_t*)min;
uint32_t maxi = *(uint32_t*)max;
uint32_t maxisign = maxi ^ (1U<<31);
uint32_t *dsti = (uint32_t*)dst;
const uint32_t *srci = (const uint32_t*)src;
for(i=0; i<len; i+=8) {
dsti[i + 0] = clipf_c_one(srci[i + 0], mini, maxi, maxisign);
dsti[i + 1] = clipf_c_one(srci[i + 1], mini, maxi, maxisign);
dsti[i + 2] = clipf_c_one(srci[i + 2], mini, maxi, maxisign);
dsti[i + 3] = clipf_c_one(srci[i + 3], mini, maxi, maxisign);
dsti[i + 4] = clipf_c_one(srci[i + 4], mini, maxi, maxisign);
dsti[i + 5] = clipf_c_one(srci[i + 5], mini, maxi, maxisign);
dsti[i + 6] = clipf_c_one(srci[i + 6], mini, maxi, maxisign);
dsti[i + 7] = clipf_c_one(srci[i + 7], mini, maxi, maxisign);
}
}
static void vector_clipf_c(float *dst, const float *src, float min, float max, int len){
int i;
if(min < 0 && max > 0) {
vector_clipf_c_opposite_sign(dst, src, &min, &max, len);
} else {
for(i=0; i < len; i+=8) {
dst[i ] = av_clipf(src[i ], min, max);
dst[i + 1] = av_clipf(src[i + 1], min, max);
dst[i + 2] = av_clipf(src[i + 2], min, max);
dst[i + 3] = av_clipf(src[i + 3], min, max);
dst[i + 4] = av_clipf(src[i + 4], min, max);
dst[i + 5] = av_clipf(src[i + 5], min, max);
dst[i + 6] = av_clipf(src[i + 6], min, max);
dst[i + 7] = av_clipf(src[i + 7], min, max);
}
}
}
static int32_t scalarproduct_int16_c(const int16_t * v1, const int16_t * v2, int order)
{
int res = 0;
while (order--)
res += *v1++ * *v2++;
return res;
}
static int32_t scalarproduct_and_madd_int16_c(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul)
{
int res = 0;
while (order--) {
res += *v1 * *v2++;
*v1++ += mul * *v3++;
}
return res;
}
static void apply_window_int16_c(int16_t *output, const int16_t *input,
const int16_t *window, unsigned int len)
{
int i;
int len2 = len >> 1;
for (i = 0; i < len2; i++) {
int16_t w = window[i];
output[i] = (MUL16(input[i], w) + (1 << 14)) >> 15;
output[len-i-1] = (MUL16(input[len-i-1], w) + (1 << 14)) >> 15;
}
}
static void vector_clip_int32_c(int32_t *dst, const int32_t *src, int32_t min,
int32_t max, unsigned int len)
{
do {
*dst++ = av_clip(*src++, min, max);
*dst++ = av_clip(*src++, min, max);
*dst++ = av_clip(*src++, min, max);
*dst++ = av_clip(*src++, min, max);
*dst++ = av_clip(*src++, min, max);
*dst++ = av_clip(*src++, min, max);
*dst++ = av_clip(*src++, min, max);
*dst++ = av_clip(*src++, min, max);
len -= 8;
} while (len > 0);
}
static void jref_idct_put(uint8_t *dest, int line_size, int16_t *block)
{
ff_j_rev_dct (block);
put_pixels_clamped_c(block, dest, line_size);
}
static void jref_idct_add(uint8_t *dest, int line_size, int16_t *block)
{
ff_j_rev_dct (block);
add_pixels_clamped_c(block, dest, line_size);
}
static void ff_jref_idct4_put(uint8_t *dest, int line_size, int16_t *block)
{
ff_j_rev_dct4 (block);
put_pixels_clamped4_c(block, dest, line_size);
}
static void ff_jref_idct4_add(uint8_t *dest, int line_size, int16_t *block)
{
ff_j_rev_dct4 (block);
add_pixels_clamped4_c(block, dest, line_size);
}
static void ff_jref_idct2_put(uint8_t *dest, int line_size, int16_t *block)
{
ff_j_rev_dct2 (block);
put_pixels_clamped2_c(block, dest, line_size);
}
static void ff_jref_idct2_add(uint8_t *dest, int line_size, int16_t *block)
{
ff_j_rev_dct2 (block);
add_pixels_clamped2_c(block, dest, line_size);
}
static void ff_jref_idct1_put(uint8_t *dest, int line_size, int16_t *block)
{
dest[0] = av_clip_uint8((block[0] + 4)>>3);
}
static void ff_jref_idct1_add(uint8_t *dest, int line_size, int16_t *block)
{
dest[0] = av_clip_uint8(dest[0] + ((block[0] + 4)>>3));
}
/* init static data */
av_cold void ff_dsputil_static_init(void)
{
int i;
for(i=0;i<512;i++) {
ff_squareTbl[i] = (i - 256) * (i - 256);
}
for(i=0; i<64; i++) ff_inv_zigzag_direct16[ff_zigzag_direct[i]]= i+1;
}
int ff_check_alignment(void){
static int did_fail=0;
LOCAL_ALIGNED_16(int, aligned, [4]);
if((intptr_t)aligned & 15){
if(!did_fail){
#if HAVE_MMX || HAVE_ALTIVEC
av_log(NULL, AV_LOG_ERROR,
"Compiler did not align stack variables. Libavcodec has been miscompiled\n"
"and may be very slow or crash. This is not a bug in libavcodec,\n"
"but in the compiler. You may try recompiling using gcc >= 4.2.\n"
"Do not report crashes to FFmpeg developers.\n");
#endif
did_fail=1;
}
return -1;
}
return 0;
}
av_cold void ff_dsputil_init(DSPContext* c, AVCodecContext *avctx)
{
ff_check_alignment();
#if CONFIG_ENCODERS
if (avctx->bits_per_raw_sample == 10) {
c->fdct = ff_jpeg_fdct_islow_10;
c->fdct248 = ff_fdct248_islow_10;
} else {
if(avctx->dct_algo==FF_DCT_FASTINT) {
c->fdct = ff_fdct_ifast;
c->fdct248 = ff_fdct_ifast248;
}
else if(avctx->dct_algo==FF_DCT_FAAN) {
c->fdct = ff_faandct;
c->fdct248 = ff_faandct248;
}
else {
c->fdct = ff_jpeg_fdct_islow_8; //slow/accurate/default
c->fdct248 = ff_fdct248_islow_8;
}
}
#endif //CONFIG_ENCODERS
if(avctx->lowres==1){
c->idct_put= ff_jref_idct4_put;
c->idct_add= ff_jref_idct4_add;
c->idct = ff_j_rev_dct4;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}else if(avctx->lowres==2){
c->idct_put= ff_jref_idct2_put;
c->idct_add= ff_jref_idct2_add;
c->idct = ff_j_rev_dct2;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}else if(avctx->lowres==3){
c->idct_put= ff_jref_idct1_put;
c->idct_add= ff_jref_idct1_add;
c->idct = ff_j_rev_dct1;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}else{
if (avctx->bits_per_raw_sample == 10) {
c->idct_put = ff_simple_idct_put_10;
c->idct_add = ff_simple_idct_add_10;
c->idct = ff_simple_idct_10;
c->idct_permutation_type = FF_NO_IDCT_PERM;
} else {
if(avctx->idct_algo==FF_IDCT_INT){
c->idct_put= jref_idct_put;
c->idct_add= jref_idct_add;
c->idct = ff_j_rev_dct;
c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
}else if(avctx->idct_algo==FF_IDCT_FAAN){
c->idct_put= ff_faanidct_put;
c->idct_add= ff_faanidct_add;
c->idct = ff_faanidct;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}else{ //accurate/default
c->idct_put = ff_simple_idct_put_8;
c->idct_add = ff_simple_idct_add_8;
c->idct = ff_simple_idct_8;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}
}
}
c->diff_pixels = diff_pixels_c;
c->put_pixels_clamped = put_pixels_clamped_c;
c->put_signed_pixels_clamped = put_signed_pixels_clamped_c;
c->add_pixels_clamped = add_pixels_clamped_c;
c->sum_abs_dctelem = sum_abs_dctelem_c;
c->gmc1 = gmc1_c;
c->gmc = ff_gmc_c;
c->pix_sum = pix_sum_c;
c->pix_norm1 = pix_norm1_c;
c->fill_block_tab[0] = fill_block16_c;
c->fill_block_tab[1] = fill_block8_c;
/* TODO [0] 16 [1] 8 */
c->pix_abs[0][0] = pix_abs16_c;
c->pix_abs[0][1] = pix_abs16_x2_c;
c->pix_abs[0][2] = pix_abs16_y2_c;
c->pix_abs[0][3] = pix_abs16_xy2_c;
c->pix_abs[1][0] = pix_abs8_c;
c->pix_abs[1][1] = pix_abs8_x2_c;
c->pix_abs[1][2] = pix_abs8_y2_c;
c->pix_abs[1][3] = pix_abs8_xy2_c;
c->put_tpel_pixels_tab[ 0] = put_tpel_pixels_mc00_c;
c->put_tpel_pixels_tab[ 1] = put_tpel_pixels_mc10_c;
c->put_tpel_pixels_tab[ 2] = put_tpel_pixels_mc20_c;
c->put_tpel_pixels_tab[ 4] = put_tpel_pixels_mc01_c;
c->put_tpel_pixels_tab[ 5] = put_tpel_pixels_mc11_c;
c->put_tpel_pixels_tab[ 6] = put_tpel_pixels_mc21_c;
c->put_tpel_pixels_tab[ 8] = put_tpel_pixels_mc02_c;
c->put_tpel_pixels_tab[ 9] = put_tpel_pixels_mc12_c;
c->put_tpel_pixels_tab[10] = put_tpel_pixels_mc22_c;
c->avg_tpel_pixels_tab[ 0] = avg_tpel_pixels_mc00_c;
c->avg_tpel_pixels_tab[ 1] = avg_tpel_pixels_mc10_c;
c->avg_tpel_pixels_tab[ 2] = avg_tpel_pixels_mc20_c;
c->avg_tpel_pixels_tab[ 4] = avg_tpel_pixels_mc01_c;
c->avg_tpel_pixels_tab[ 5] = avg_tpel_pixels_mc11_c;
c->avg_tpel_pixels_tab[ 6] = avg_tpel_pixels_mc21_c;
c->avg_tpel_pixels_tab[ 8] = avg_tpel_pixels_mc02_c;
c->avg_tpel_pixels_tab[ 9] = avg_tpel_pixels_mc12_c;
c->avg_tpel_pixels_tab[10] = avg_tpel_pixels_mc22_c;
#define dspfunc(PFX, IDX, NUM) \
c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \
c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_c; \
c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_c; \
c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_c; \
c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_c; \
c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_c; \
c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_c; \
c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_c; \
c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_c; \
c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_c; \
c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
dspfunc(put_qpel, 0, 16);
dspfunc(put_no_rnd_qpel, 0, 16);
dspfunc(avg_qpel, 0, 16);
/* dspfunc(avg_no_rnd_qpel, 0, 16); */
dspfunc(put_qpel, 1, 8);
dspfunc(put_no_rnd_qpel, 1, 8);
dspfunc(avg_qpel, 1, 8);
/* dspfunc(avg_no_rnd_qpel, 1, 8); */
#undef dspfunc
c->put_mspel_pixels_tab[0]= ff_put_pixels8x8_c;
c->put_mspel_pixels_tab[1]= put_mspel8_mc10_c;
c->put_mspel_pixels_tab[2]= put_mspel8_mc20_c;
c->put_mspel_pixels_tab[3]= put_mspel8_mc30_c;
c->put_mspel_pixels_tab[4]= put_mspel8_mc02_c;
c->put_mspel_pixels_tab[5]= put_mspel8_mc12_c;
c->put_mspel_pixels_tab[6]= put_mspel8_mc22_c;
c->put_mspel_pixels_tab[7]= put_mspel8_mc32_c;
#define SET_CMP_FUNC(name) \
c->name[0]= name ## 16_c;\
c->name[1]= name ## 8x8_c;
SET_CMP_FUNC(hadamard8_diff)
c->hadamard8_diff[4]= hadamard8_intra16_c;
c->hadamard8_diff[5]= hadamard8_intra8x8_c;
SET_CMP_FUNC(dct_sad)
SET_CMP_FUNC(dct_max)
#if CONFIG_GPL
SET_CMP_FUNC(dct264_sad)
#endif
c->sad[0]= pix_abs16_c;
c->sad[1]= pix_abs8_c;
c->sse[0]= sse16_c;
c->sse[1]= sse8_c;
c->sse[2]= sse4_c;
SET_CMP_FUNC(quant_psnr)
SET_CMP_FUNC(rd)
SET_CMP_FUNC(bit)
c->vsad[0]= vsad16_c;
c->vsad[4]= vsad_intra16_c;
c->vsad[5]= vsad_intra8_c;
c->vsse[0]= vsse16_c;
c->vsse[4]= vsse_intra16_c;
c->vsse[5]= vsse_intra8_c;
c->nsse[0]= nsse16_c;
c->nsse[1]= nsse8_c;
#if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
ff_dsputil_init_dwt(c);
#endif
c->ssd_int8_vs_int16 = ssd_int8_vs_int16_c;
c->add_bytes= add_bytes_c;
c->diff_bytes= diff_bytes_c;
c->add_hfyu_median_prediction= add_hfyu_median_prediction_c;
c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_c;
c->add_hfyu_left_prediction = add_hfyu_left_prediction_c;
c->add_hfyu_left_prediction_bgr32 = add_hfyu_left_prediction_bgr32_c;
c->bswap_buf= bswap_buf;
c->bswap16_buf = bswap16_buf;
if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
c->h263_h_loop_filter= h263_h_loop_filter_c;
c->h263_v_loop_filter= h263_v_loop_filter_c;
}
c->try_8x8basis= try_8x8basis_c;
c->add_8x8basis= add_8x8basis_c;
c->vector_clipf = vector_clipf_c;
c->scalarproduct_int16 = scalarproduct_int16_c;
c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_c;
c->apply_window_int16 = apply_window_int16_c;
c->vector_clip_int32 = vector_clip_int32_c;
c->shrink[0]= av_image_copy_plane;
c->shrink[1]= ff_shrink22;
c->shrink[2]= ff_shrink44;
c->shrink[3]= ff_shrink88;
c->add_pixels8 = add_pixels8_c;
#undef FUNC
#undef FUNCC
#define FUNC(f, depth) f ## _ ## depth
#define FUNCC(f, depth) f ## _ ## depth ## _c
c->draw_edges = FUNCC(draw_edges, 8);
c->clear_block = FUNCC(clear_block, 8);
c->clear_blocks = FUNCC(clear_blocks, 8);
#define BIT_DEPTH_FUNCS(depth) \
c->get_pixels = FUNCC(get_pixels, depth);
switch (avctx->bits_per_raw_sample) {
case 9:
case 10:
case 12:
case 14:
BIT_DEPTH_FUNCS(16);
break;
default:
if(avctx->bits_per_raw_sample<=8 || avctx->codec_type != AVMEDIA_TYPE_VIDEO) {
BIT_DEPTH_FUNCS(8);
}
break;
}
if (ARCH_ALPHA)
ff_dsputil_init_alpha(c, avctx);
if (ARCH_ARM)
ff_dsputil_init_arm(c, avctx);
if (ARCH_BFIN)
ff_dsputil_init_bfin(c, avctx);
if (ARCH_PPC)
ff_dsputil_init_ppc(c, avctx);
if (ARCH_SH4)
ff_dsputil_init_sh4(c, avctx);
if (HAVE_VIS)
ff_dsputil_init_vis(c, avctx);
if (ARCH_X86)
ff_dsputil_init_x86(c, avctx);
ff_init_scantable_permutation(c->idct_permutation,
c->idct_permutation_type);
}
av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
{
ff_dsputil_init(c, avctx);
}
av_cold void avpriv_dsputil_init(DSPContext *c, AVCodecContext *avctx)
{
ff_dsputil_init(c, avctx);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5814_0 |
crossvul-cpp_data_bad_5752_0 | /*
* linux/ipc/msg.c
* Copyright (C) 1992 Krishna Balasubramanian
*
* Removed all the remaining kerneld mess
* Catch the -EFAULT stuff properly
* Use GFP_KERNEL for messages as in 1.2
* Fixed up the unchecked user space derefs
* Copyright (C) 1998 Alan Cox & Andi Kleen
*
* /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
*
* mostly rewritten, threaded and wake-one semantics added
* MSGMAX limit removed, sysctl's added
* (c) 1999 Manfred Spraul <manfred@colorfullife.com>
*
* support for audit of ipc object properties and permission changes
* Dustin Kirkland <dustin.kirkland@us.ibm.com>
*
* namespaces support
* OpenVZ, SWsoft Inc.
* Pavel Emelianov <xemul@openvz.org>
*/
#include <linux/capability.h>
#include <linux/msg.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/list.h>
#include <linux/security.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
#include <linux/seq_file.h>
#include <linux/rwsem.h>
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#include "util.h"
/*
* one msg_receiver structure for each sleeping receiver:
*/
struct msg_receiver {
struct list_head r_list;
struct task_struct *r_tsk;
int r_mode;
long r_msgtype;
long r_maxsize;
struct msg_msg *volatile r_msg;
};
/* one msg_sender for each sleeping sender */
struct msg_sender {
struct list_head list;
struct task_struct *tsk;
};
#define SEARCH_ANY 1
#define SEARCH_EQUAL 2
#define SEARCH_NOTEQUAL 3
#define SEARCH_LESSEQUAL 4
#define SEARCH_NUMBER 5
#define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
static int newque(struct ipc_namespace *, struct ipc_params *);
#ifdef CONFIG_PROC_FS
static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
#endif
/*
* Scale msgmni with the available lowmem size: the memory dedicated to msg
* queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
* Also take into account the number of nsproxies created so far.
* This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
*/
void recompute_msgmni(struct ipc_namespace *ns)
{
struct sysinfo i;
unsigned long allowed;
int nb_ns;
si_meminfo(&i);
allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
/ MSGMNB;
nb_ns = atomic_read(&nr_ipc_ns);
allowed /= nb_ns;
if (allowed < MSGMNI) {
ns->msg_ctlmni = MSGMNI;
return;
}
if (allowed > IPCMNI / nb_ns) {
ns->msg_ctlmni = IPCMNI / nb_ns;
return;
}
ns->msg_ctlmni = allowed;
}
void msg_init_ns(struct ipc_namespace *ns)
{
ns->msg_ctlmax = MSGMAX;
ns->msg_ctlmnb = MSGMNB;
recompute_msgmni(ns);
atomic_set(&ns->msg_bytes, 0);
atomic_set(&ns->msg_hdrs, 0);
ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
}
#ifdef CONFIG_IPC_NS
void msg_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &msg_ids(ns), freeque);
idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
}
#endif
void __init msg_init(void)
{
msg_init_ns(&init_ipc_ns);
printk(KERN_INFO "msgmni has been set to %d\n",
init_ipc_ns.msg_ctlmni);
ipc_init_proc_interface("sysvipc/msg",
" key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
IPC_MSG_IDS, sysvipc_msg_proc_show);
}
/*
* msg_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
if (IS_ERR(ipcp))
return (struct msg_queue *)ipcp;
return container_of(ipcp, struct msg_queue, q_perm);
}
static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns,
int id)
{
struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id);
if (IS_ERR(ipcp))
return (struct msg_queue *)ipcp;
return container_of(ipcp, struct msg_queue, q_perm);
}
static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
{
ipc_rmid(&msg_ids(ns), &s->q_perm);
}
/**
* newque - Create a new msg queue
* @ns: namespace
* @params: ptr to the structure that contains the key and msgflg
*
* Called with msg_ids.rw_mutex held (writer)
*/
static int newque(struct ipc_namespace *ns, struct ipc_params *params)
{
struct msg_queue *msq;
int id, retval;
key_t key = params->key;
int msgflg = params->flg;
msq = ipc_rcu_alloc(sizeof(*msq));
if (!msq)
return -ENOMEM;
msq->q_perm.mode = msgflg & S_IRWXUGO;
msq->q_perm.key = key;
msq->q_perm.security = NULL;
retval = security_msg_queue_alloc(msq);
if (retval) {
ipc_rcu_putref(msq);
return retval;
}
/*
* ipc_addid() locks msq
*/
id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
if (id < 0) {
security_msg_queue_free(msq);
ipc_rcu_putref(msq);
return id;
}
msq->q_stime = msq->q_rtime = 0;
msq->q_ctime = get_seconds();
msq->q_cbytes = msq->q_qnum = 0;
msq->q_qbytes = ns->msg_ctlmnb;
msq->q_lspid = msq->q_lrpid = 0;
INIT_LIST_HEAD(&msq->q_messages);
INIT_LIST_HEAD(&msq->q_receivers);
INIT_LIST_HEAD(&msq->q_senders);
msg_unlock(msq);
return msq->q_perm.id;
}
static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
{
mss->tsk = current;
current->state = TASK_INTERRUPTIBLE;
list_add_tail(&mss->list, &msq->q_senders);
}
static inline void ss_del(struct msg_sender *mss)
{
if (mss->list.next != NULL)
list_del(&mss->list);
}
static void ss_wakeup(struct list_head *h, int kill)
{
struct list_head *tmp;
tmp = h->next;
while (tmp != h) {
struct msg_sender *mss;
mss = list_entry(tmp, struct msg_sender, list);
tmp = tmp->next;
if (kill)
mss->list.next = NULL;
wake_up_process(mss->tsk);
}
}
static void expunge_all(struct msg_queue *msq, int res)
{
struct list_head *tmp;
tmp = msq->q_receivers.next;
while (tmp != &msq->q_receivers) {
struct msg_receiver *msr;
msr = list_entry(tmp, struct msg_receiver, r_list);
tmp = tmp->next;
msr->r_msg = NULL;
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = ERR_PTR(res);
}
}
/*
* freeque() wakes up waiters on the sender and receiver waiting queue,
* removes the message queue from message queue ID IDR, and cleans up all the
* messages associated with this queue.
*
* msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
* before freeque() is called. msg_ids.rw_mutex remains locked on exit.
*/
static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
{
struct list_head *tmp;
struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
expunge_all(msq, -EIDRM);
ss_wakeup(&msq->q_senders, 1);
msg_rmid(ns, msq);
msg_unlock(msq);
tmp = msq->q_messages.next;
while (tmp != &msq->q_messages) {
struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list);
tmp = tmp->next;
atomic_dec(&ns->msg_hdrs);
free_msg(msg);
}
atomic_sub(msq->q_cbytes, &ns->msg_bytes);
security_msg_queue_free(msq);
ipc_rcu_putref(msq);
}
/*
* Called with msg_ids.rw_mutex and ipcp locked.
*/
static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
{
struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
return security_msg_queue_associate(msq, msgflg);
}
SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
{
struct ipc_namespace *ns;
struct ipc_ops msg_ops;
struct ipc_params msg_params;
ns = current->nsproxy->ipc_ns;
msg_ops.getnew = newque;
msg_ops.associate = msg_security;
msg_ops.more_checks = NULL;
msg_params.key = key;
msg_params.flg = msgflg;
return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
}
static inline unsigned long
copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
{
switch(version) {
case IPC_64:
return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD:
{
struct msqid_ds out;
memset(&out, 0, sizeof(out));
ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
out.msg_stime = in->msg_stime;
out.msg_rtime = in->msg_rtime;
out.msg_ctime = in->msg_ctime;
if (in->msg_cbytes > USHRT_MAX)
out.msg_cbytes = USHRT_MAX;
else
out.msg_cbytes = in->msg_cbytes;
out.msg_lcbytes = in->msg_cbytes;
if (in->msg_qnum > USHRT_MAX)
out.msg_qnum = USHRT_MAX;
else
out.msg_qnum = in->msg_qnum;
if (in->msg_qbytes > USHRT_MAX)
out.msg_qbytes = USHRT_MAX;
else
out.msg_qbytes = in->msg_qbytes;
out.msg_lqbytes = in->msg_qbytes;
out.msg_lspid = in->msg_lspid;
out.msg_lrpid = in->msg_lrpid;
return copy_to_user(buf, &out, sizeof(out));
}
default:
return -EINVAL;
}
}
static inline unsigned long
copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
{
switch(version) {
case IPC_64:
if (copy_from_user(out, buf, sizeof(*out)))
return -EFAULT;
return 0;
case IPC_OLD:
{
struct msqid_ds tbuf_old;
if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
return -EFAULT;
out->msg_perm.uid = tbuf_old.msg_perm.uid;
out->msg_perm.gid = tbuf_old.msg_perm.gid;
out->msg_perm.mode = tbuf_old.msg_perm.mode;
if (tbuf_old.msg_qbytes == 0)
out->msg_qbytes = tbuf_old.msg_lqbytes;
else
out->msg_qbytes = tbuf_old.msg_qbytes;
return 0;
}
default:
return -EINVAL;
}
}
/*
* This function handles some msgctl commands which require the rw_mutex
* to be held in write mode.
* NOTE: no locks must be held, the rw_mutex is taken inside this function.
*/
static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
struct msqid_ds __user *buf, int version)
{
struct kern_ipc_perm *ipcp;
struct msqid64_ds uninitialized_var(msqid64);
struct msg_queue *msq;
int err;
if (cmd == IPC_SET) {
if (copy_msqid_from_user(&msqid64, buf, version))
return -EFAULT;
}
ipcp = ipcctl_pre_down(ns, &msg_ids(ns), msqid, cmd,
&msqid64.msg_perm, msqid64.msg_qbytes);
if (IS_ERR(ipcp))
return PTR_ERR(ipcp);
msq = container_of(ipcp, struct msg_queue, q_perm);
err = security_msg_queue_msgctl(msq, cmd);
if (err)
goto out_unlock;
switch (cmd) {
case IPC_RMID:
freeque(ns, ipcp);
goto out_up;
case IPC_SET:
if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
!capable(CAP_SYS_RESOURCE)) {
err = -EPERM;
goto out_unlock;
}
err = ipc_update_perm(&msqid64.msg_perm, ipcp);
if (err)
goto out_unlock;
msq->q_qbytes = msqid64.msg_qbytes;
msq->q_ctime = get_seconds();
/* sleeping receivers might be excluded by
* stricter permissions.
*/
expunge_all(msq, -EAGAIN);
/* sleeping senders might be able to send
* due to a larger queue size.
*/
ss_wakeup(&msq->q_senders, 0);
break;
default:
err = -EINVAL;
}
out_unlock:
msg_unlock(msq);
out_up:
up_write(&msg_ids(ns).rw_mutex);
return err;
}
SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
{
struct msg_queue *msq;
int err, version;
struct ipc_namespace *ns;
if (msqid < 0 || cmd < 0)
return -EINVAL;
version = ipc_parse_version(&cmd);
ns = current->nsproxy->ipc_ns;
switch (cmd) {
case IPC_INFO:
case MSG_INFO:
{
struct msginfo msginfo;
int max_id;
if (!buf)
return -EFAULT;
/*
* We must not return kernel stack data.
* due to padding, it's not enough
* to set all member fields.
*/
err = security_msg_queue_msgctl(NULL, cmd);
if (err)
return err;
memset(&msginfo, 0, sizeof(msginfo));
msginfo.msgmni = ns->msg_ctlmni;
msginfo.msgmax = ns->msg_ctlmax;
msginfo.msgmnb = ns->msg_ctlmnb;
msginfo.msgssz = MSGSSZ;
msginfo.msgseg = MSGSEG;
down_read(&msg_ids(ns).rw_mutex);
if (cmd == MSG_INFO) {
msginfo.msgpool = msg_ids(ns).in_use;
msginfo.msgmap = atomic_read(&ns->msg_hdrs);
msginfo.msgtql = atomic_read(&ns->msg_bytes);
} else {
msginfo.msgmap = MSGMAP;
msginfo.msgpool = MSGPOOL;
msginfo.msgtql = MSGTQL;
}
max_id = ipc_get_maxid(&msg_ids(ns));
up_read(&msg_ids(ns).rw_mutex);
if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
return -EFAULT;
return (max_id < 0) ? 0 : max_id;
}
case MSG_STAT: /* msqid is an index rather than a msg queue id */
case IPC_STAT:
{
struct msqid64_ds tbuf;
int success_return;
if (!buf)
return -EFAULT;
if (cmd == MSG_STAT) {
msq = msg_lock(ns, msqid);
if (IS_ERR(msq))
return PTR_ERR(msq);
success_return = msq->q_perm.id;
} else {
msq = msg_lock_check(ns, msqid);
if (IS_ERR(msq))
return PTR_ERR(msq);
success_return = 0;
}
err = -EACCES;
if (ipcperms(ns, &msq->q_perm, S_IRUGO))
goto out_unlock;
err = security_msg_queue_msgctl(msq, cmd);
if (err)
goto out_unlock;
memset(&tbuf, 0, sizeof(tbuf));
kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
tbuf.msg_stime = msq->q_stime;
tbuf.msg_rtime = msq->q_rtime;
tbuf.msg_ctime = msq->q_ctime;
tbuf.msg_cbytes = msq->q_cbytes;
tbuf.msg_qnum = msq->q_qnum;
tbuf.msg_qbytes = msq->q_qbytes;
tbuf.msg_lspid = msq->q_lspid;
tbuf.msg_lrpid = msq->q_lrpid;
msg_unlock(msq);
if (copy_msqid_to_user(buf, &tbuf, version))
return -EFAULT;
return success_return;
}
case IPC_SET:
case IPC_RMID:
err = msgctl_down(ns, msqid, cmd, buf, version);
return err;
default:
return -EINVAL;
}
out_unlock:
msg_unlock(msq);
return err;
}
static int testmsg(struct msg_msg *msg, long type, int mode)
{
switch(mode)
{
case SEARCH_ANY:
case SEARCH_NUMBER:
return 1;
case SEARCH_LESSEQUAL:
if (msg->m_type <=type)
return 1;
break;
case SEARCH_EQUAL:
if (msg->m_type == type)
return 1;
break;
case SEARCH_NOTEQUAL:
if (msg->m_type != type)
return 1;
break;
}
return 0;
}
static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
{
struct list_head *tmp;
tmp = msq->q_receivers.next;
while (tmp != &msq->q_receivers) {
struct msg_receiver *msr;
msr = list_entry(tmp, struct msg_receiver, r_list);
tmp = tmp->next;
if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
!security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
msr->r_msgtype, msr->r_mode)) {
list_del(&msr->r_list);
if (msr->r_maxsize < msg->m_ts) {
msr->r_msg = NULL;
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = ERR_PTR(-E2BIG);
} else {
msr->r_msg = NULL;
msq->q_lrpid = task_pid_vnr(msr->r_tsk);
msq->q_rtime = get_seconds();
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = msg;
return 1;
}
}
}
return 0;
}
long do_msgsnd(int msqid, long mtype, void __user *mtext,
size_t msgsz, int msgflg)
{
struct msg_queue *msq;
struct msg_msg *msg;
int err;
struct ipc_namespace *ns;
ns = current->nsproxy->ipc_ns;
if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
return -EINVAL;
if (mtype < 1)
return -EINVAL;
msg = load_msg(mtext, msgsz);
if (IS_ERR(msg))
return PTR_ERR(msg);
msg->m_type = mtype;
msg->m_ts = msgsz;
msq = msg_lock_check(ns, msqid);
if (IS_ERR(msq)) {
err = PTR_ERR(msq);
goto out_free;
}
for (;;) {
struct msg_sender s;
err = -EACCES;
if (ipcperms(ns, &msq->q_perm, S_IWUGO))
goto out_unlock_free;
err = security_msg_queue_msgsnd(msq, msg, msgflg);
if (err)
goto out_unlock_free;
if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
1 + msq->q_qnum <= msq->q_qbytes) {
break;
}
/* queue full, wait: */
if (msgflg & IPC_NOWAIT) {
err = -EAGAIN;
goto out_unlock_free;
}
ss_add(msq, &s);
ipc_rcu_getref(msq);
msg_unlock(msq);
schedule();
ipc_lock_by_ptr(&msq->q_perm);
ipc_rcu_putref(msq);
if (msq->q_perm.deleted) {
err = -EIDRM;
goto out_unlock_free;
}
ss_del(&s);
if (signal_pending(current)) {
err = -ERESTARTNOHAND;
goto out_unlock_free;
}
}
msq->q_lspid = task_tgid_vnr(current);
msq->q_stime = get_seconds();
if (!pipelined_send(msq, msg)) {
/* no one is waiting for this message, enqueue it */
list_add_tail(&msg->m_list, &msq->q_messages);
msq->q_cbytes += msgsz;
msq->q_qnum++;
atomic_add(msgsz, &ns->msg_bytes);
atomic_inc(&ns->msg_hdrs);
}
err = 0;
msg = NULL;
out_unlock_free:
msg_unlock(msq);
out_free:
if (msg != NULL)
free_msg(msg);
return err;
}
SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
int, msgflg)
{
long mtype;
if (get_user(mtype, &msgp->mtype))
return -EFAULT;
return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
}
static inline int convert_mode(long *msgtyp, int msgflg)
{
if (msgflg & MSG_COPY)
return SEARCH_NUMBER;
/*
* find message of correct type.
* msgtyp = 0 => get first.
* msgtyp > 0 => get first message of matching type.
* msgtyp < 0 => get message with least type must be < abs(msgtype).
*/
if (*msgtyp == 0)
return SEARCH_ANY;
if (*msgtyp < 0) {
*msgtyp = -*msgtyp;
return SEARCH_LESSEQUAL;
}
if (msgflg & MSG_EXCEPT)
return SEARCH_NOTEQUAL;
return SEARCH_EQUAL;
}
static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz)
{
struct msgbuf __user *msgp = dest;
size_t msgsz;
if (put_user(msg->m_type, &msgp->mtype))
return -EFAULT;
msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz;
if (store_msg(msgp->mtext, msg, msgsz))
return -EFAULT;
return msgsz;
}
#ifdef CONFIG_CHECKPOINT_RESTORE
/*
* This function creates new kernel message structure, large enough to store
* bufsz message bytes.
*/
static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
{
struct msg_msg *copy;
/*
* Create dummy message to copy real message to.
*/
copy = load_msg(buf, bufsz);
if (!IS_ERR(copy))
copy->m_ts = bufsz;
return copy;
}
static inline void free_copy(struct msg_msg *copy)
{
if (copy)
free_msg(copy);
}
#else
static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
{
return ERR_PTR(-ENOSYS);
}
static inline void free_copy(struct msg_msg *copy)
{
}
#endif
static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
{
struct msg_msg *msg;
long count = 0;
list_for_each_entry(msg, &msq->q_messages, m_list) {
if (testmsg(msg, *msgtyp, mode) &&
!security_msg_queue_msgrcv(msq, msg, current,
*msgtyp, mode)) {
if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
*msgtyp = msg->m_type - 1;
} else if (mode == SEARCH_NUMBER) {
if (*msgtyp == count)
return msg;
} else
return msg;
count++;
}
}
return ERR_PTR(-EAGAIN);
}
long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
int msgflg,
long (*msg_handler)(void __user *, struct msg_msg *, size_t))
{
struct msg_queue *msq;
struct msg_msg *msg;
int mode;
struct ipc_namespace *ns;
struct msg_msg *copy = NULL;
ns = current->nsproxy->ipc_ns;
if (msqid < 0 || (long) bufsz < 0)
return -EINVAL;
if (msgflg & MSG_COPY) {
copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
if (IS_ERR(copy))
return PTR_ERR(copy);
}
mode = convert_mode(&msgtyp, msgflg);
msq = msg_lock_check(ns, msqid);
if (IS_ERR(msq)) {
free_copy(copy);
return PTR_ERR(msq);
}
for (;;) {
struct msg_receiver msr_d;
msg = ERR_PTR(-EACCES);
if (ipcperms(ns, &msq->q_perm, S_IRUGO))
goto out_unlock;
msg = find_msg(msq, &msgtyp, mode);
if (!IS_ERR(msg)) {
/*
* Found a suitable message.
* Unlink it from the queue.
*/
if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
msg = ERR_PTR(-E2BIG);
goto out_unlock;
}
/*
* If we are copying, then do not unlink message and do
* not update queue parameters.
*/
if (msgflg & MSG_COPY) {
msg = copy_msg(msg, copy);
goto out_unlock;
}
list_del(&msg->m_list);
msq->q_qnum--;
msq->q_rtime = get_seconds();
msq->q_lrpid = task_tgid_vnr(current);
msq->q_cbytes -= msg->m_ts;
atomic_sub(msg->m_ts, &ns->msg_bytes);
atomic_dec(&ns->msg_hdrs);
ss_wakeup(&msq->q_senders, 0);
msg_unlock(msq);
break;
}
/* No message waiting. Wait for a message */
if (msgflg & IPC_NOWAIT) {
msg = ERR_PTR(-ENOMSG);
goto out_unlock;
}
list_add_tail(&msr_d.r_list, &msq->q_receivers);
msr_d.r_tsk = current;
msr_d.r_msgtype = msgtyp;
msr_d.r_mode = mode;
if (msgflg & MSG_NOERROR)
msr_d.r_maxsize = INT_MAX;
else
msr_d.r_maxsize = bufsz;
msr_d.r_msg = ERR_PTR(-EAGAIN);
current->state = TASK_INTERRUPTIBLE;
msg_unlock(msq);
schedule();
/* Lockless receive, part 1:
* Disable preemption. We don't hold a reference to the queue
* and getting a reference would defeat the idea of a lockless
* operation, thus the code relies on rcu to guarantee the
* existence of msq:
* Prior to destruction, expunge_all(-EIRDM) changes r_msg.
* Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
* rcu_read_lock() prevents preemption between reading r_msg
* and the spin_lock() inside ipc_lock_by_ptr().
*/
rcu_read_lock();
/* Lockless receive, part 2:
* Wait until pipelined_send or expunge_all are outside of
* wake_up_process(). There is a race with exit(), see
* ipc/mqueue.c for the details.
*/
msg = (struct msg_msg*)msr_d.r_msg;
while (msg == NULL) {
cpu_relax();
msg = (struct msg_msg *)msr_d.r_msg;
}
/* Lockless receive, part 3:
* If there is a message or an error then accept it without
* locking.
*/
if (msg != ERR_PTR(-EAGAIN)) {
rcu_read_unlock();
break;
}
/* Lockless receive, part 3:
* Acquire the queue spinlock.
*/
ipc_lock_by_ptr(&msq->q_perm);
rcu_read_unlock();
/* Lockless receive, part 4:
* Repeat test after acquiring the spinlock.
*/
msg = (struct msg_msg*)msr_d.r_msg;
if (msg != ERR_PTR(-EAGAIN))
goto out_unlock;
list_del(&msr_d.r_list);
if (signal_pending(current)) {
msg = ERR_PTR(-ERESTARTNOHAND);
out_unlock:
msg_unlock(msq);
break;
}
}
if (IS_ERR(msg)) {
free_copy(copy);
return PTR_ERR(msg);
}
bufsz = msg_handler(buf, msg, bufsz);
free_msg(msg);
return bufsz;
}
SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
long, msgtyp, int, msgflg)
{
return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill);
}
#ifdef CONFIG_PROC_FS
static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
{
struct user_namespace *user_ns = seq_user_ns(s);
struct msg_queue *msq = it;
return seq_printf(s,
"%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
msq->q_perm.key,
msq->q_perm.id,
msq->q_perm.mode,
msq->q_cbytes,
msq->q_qnum,
msq->q_lspid,
msq->q_lrpid,
from_kuid_munged(user_ns, msq->q_perm.uid),
from_kgid_munged(user_ns, msq->q_perm.gid),
from_kuid_munged(user_ns, msq->q_perm.cuid),
from_kgid_munged(user_ns, msq->q_perm.cgid),
msq->q_stime,
msq->q_rtime,
msq->q_ctime);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5752_0 |
crossvul-cpp_data_good_1624_0 | /*
* inode.c
*
* PURPOSE
* Inode handling routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* This file is distributed under the terms of the GNU General Public
* License (GPL). Copies of the GPL can be obtained from:
* ftp://prep.ai.mit.edu/pub/gnu/GPL
* Each contributing author retains all rights to their own work.
*
* (C) 1998 Dave Boynton
* (C) 1998-2004 Ben Fennema
* (C) 1999-2000 Stelias Computing Inc
*
* HISTORY
*
* 10/04/98 dgb Added rudimentary directory functions
* 10/07/98 Fully working udf_block_map! It works!
* 11/25/98 bmap altered to better support extents
* 12/06/98 blf partition support in udf_iget, udf_block_map
* and udf_read_inode
* 12/12/98 rewrote udf_block_map to handle next extents and descs across
* block boundaries (which is not actually allowed)
* 12/20/98 added support for strategy 4096
* 03/07/99 rewrote udf_block_map (again)
* New funcs, inode_bmap, udf_next_aext
* 04/19/99 Support for writing device EA's for major/minor #
*/
#include "udfdecl.h"
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/slab.h>
#include <linux/crc-itu-t.h>
#include <linux/mpage.h>
#include <linux/aio.h>
#include "udf_i.h"
#include "udf_sb.h"
MODULE_AUTHOR("Ben Fennema");
MODULE_DESCRIPTION("Universal Disk Format Filesystem");
MODULE_LICENSE("GPL");
#define EXTENT_MERGE_SIZE 5
static umode_t udf_convert_permissions(struct fileEntry *);
static int udf_update_inode(struct inode *, int);
static int udf_sync_inode(struct inode *inode);
static int udf_alloc_i_data(struct inode *inode, size_t size);
static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
static int8_t udf_insert_aext(struct inode *, struct extent_position,
struct kernel_lb_addr, uint32_t);
static void udf_split_extents(struct inode *, int *, int, int,
struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
static void udf_prealloc_extents(struct inode *, int, int,
struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
static void udf_merge_extents(struct inode *,
struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
static void udf_update_extents(struct inode *,
struct kernel_long_ad[EXTENT_MERGE_SIZE], int, int,
struct extent_position *);
static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
static void __udf_clear_extent_cache(struct inode *inode)
{
struct udf_inode_info *iinfo = UDF_I(inode);
if (iinfo->cached_extent.lstart != -1) {
brelse(iinfo->cached_extent.epos.bh);
iinfo->cached_extent.lstart = -1;
}
}
/* Invalidate extent cache */
static void udf_clear_extent_cache(struct inode *inode)
{
struct udf_inode_info *iinfo = UDF_I(inode);
spin_lock(&iinfo->i_extent_cache_lock);
__udf_clear_extent_cache(inode);
spin_unlock(&iinfo->i_extent_cache_lock);
}
/* Return contents of extent cache */
static int udf_read_extent_cache(struct inode *inode, loff_t bcount,
loff_t *lbcount, struct extent_position *pos)
{
struct udf_inode_info *iinfo = UDF_I(inode);
int ret = 0;
spin_lock(&iinfo->i_extent_cache_lock);
if ((iinfo->cached_extent.lstart <= bcount) &&
(iinfo->cached_extent.lstart != -1)) {
/* Cache hit */
*lbcount = iinfo->cached_extent.lstart;
memcpy(pos, &iinfo->cached_extent.epos,
sizeof(struct extent_position));
if (pos->bh)
get_bh(pos->bh);
ret = 1;
}
spin_unlock(&iinfo->i_extent_cache_lock);
return ret;
}
/* Add extent to extent cache */
static void udf_update_extent_cache(struct inode *inode, loff_t estart,
struct extent_position *pos, int next_epos)
{
struct udf_inode_info *iinfo = UDF_I(inode);
spin_lock(&iinfo->i_extent_cache_lock);
/* Invalidate previously cached extent */
__udf_clear_extent_cache(inode);
if (pos->bh)
get_bh(pos->bh);
memcpy(&iinfo->cached_extent.epos, pos,
sizeof(struct extent_position));
iinfo->cached_extent.lstart = estart;
if (next_epos)
switch (iinfo->i_alloc_type) {
case ICBTAG_FLAG_AD_SHORT:
iinfo->cached_extent.epos.offset -=
sizeof(struct short_ad);
break;
case ICBTAG_FLAG_AD_LONG:
iinfo->cached_extent.epos.offset -=
sizeof(struct long_ad);
}
spin_unlock(&iinfo->i_extent_cache_lock);
}
void udf_evict_inode(struct inode *inode)
{
struct udf_inode_info *iinfo = UDF_I(inode);
int want_delete = 0;
if (!inode->i_nlink && !is_bad_inode(inode)) {
want_delete = 1;
udf_setsize(inode, 0);
udf_update_inode(inode, IS_SYNC(inode));
}
truncate_inode_pages_final(&inode->i_data);
invalidate_inode_buffers(inode);
clear_inode(inode);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
inode->i_size != iinfo->i_lenExtents) {
udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
inode->i_ino, inode->i_mode,
(unsigned long long)inode->i_size,
(unsigned long long)iinfo->i_lenExtents);
}
kfree(iinfo->i_ext.i_data);
iinfo->i_ext.i_data = NULL;
udf_clear_extent_cache(inode);
if (want_delete) {
udf_free_inode(inode);
}
}
static void udf_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
struct udf_inode_info *iinfo = UDF_I(inode);
loff_t isize = inode->i_size;
if (to > isize) {
truncate_pagecache(inode, isize);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem);
}
}
}
static int udf_writepage(struct page *page, struct writeback_control *wbc)
{
return block_write_full_page(page, udf_get_block, wbc);
}
static int udf_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
return mpage_writepages(mapping, wbc, udf_get_block);
}
static int udf_readpage(struct file *file, struct page *page)
{
return mpage_readpage(page, udf_get_block);
}
static int udf_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
return mpage_readpages(mapping, pages, nr_pages, udf_get_block);
}
static int udf_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
int ret;
ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block);
if (unlikely(ret))
udf_write_failed(mapping, pos + len);
return ret;
}
static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
struct iov_iter *iter,
loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block);
if (unlikely(ret < 0 && (rw & WRITE)))
udf_write_failed(mapping, offset + count);
return ret;
}
static sector_t udf_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping, block, udf_get_block);
}
const struct address_space_operations udf_aops = {
.readpage = udf_readpage,
.readpages = udf_readpages,
.writepage = udf_writepage,
.writepages = udf_writepages,
.write_begin = udf_write_begin,
.write_end = generic_write_end,
.direct_IO = udf_direct_IO,
.bmap = udf_bmap,
};
/*
* Expand file stored in ICB to a normal one-block-file
*
* This function requires i_data_sem for writing and releases it.
* This function requires i_mutex held
*/
int udf_expand_file_adinicb(struct inode *inode)
{
struct page *page;
char *kaddr;
struct udf_inode_info *iinfo = UDF_I(inode);
int err;
struct writeback_control udf_wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = 1,
};
WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex));
if (!iinfo->i_lenAlloc) {
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
/* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops;
up_write(&iinfo->i_data_sem);
mark_inode_dirty(inode);
return 0;
}
/*
* Release i_data_sem so that we can lock a page - page lock ranks
* above i_data_sem. i_mutex still protects us against file changes.
*/
up_write(&iinfo->i_data_sem);
page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
if (!page)
return -ENOMEM;
if (!PageUptodate(page)) {
kaddr = kmap(page);
memset(kaddr + iinfo->i_lenAlloc, 0x00,
PAGE_CACHE_SIZE - iinfo->i_lenAlloc);
memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
iinfo->i_lenAlloc);
flush_dcache_page(page);
SetPageUptodate(page);
kunmap(page);
}
down_write(&iinfo->i_data_sem);
memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00,
iinfo->i_lenAlloc);
iinfo->i_lenAlloc = 0;
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
/* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops;
up_write(&iinfo->i_data_sem);
err = inode->i_data.a_ops->writepage(page, &udf_wbc);
if (err) {
/* Restore everything back so that we don't lose data... */
lock_page(page);
kaddr = kmap(page);
down_write(&iinfo->i_data_sem);
memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr,
inode->i_size);
kunmap(page);
unlock_page(page);
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
inode->i_data.a_ops = &udf_adinicb_aops;
up_write(&iinfo->i_data_sem);
}
page_cache_release(page);
mark_inode_dirty(inode);
return err;
}
struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
int *err)
{
int newblock;
struct buffer_head *dbh = NULL;
struct kernel_lb_addr eloc;
uint8_t alloctype;
struct extent_position epos;
struct udf_fileident_bh sfibh, dfibh;
loff_t f_pos = udf_ext0_offset(inode);
int size = udf_ext0_offset(inode) + inode->i_size;
struct fileIdentDesc cfi, *sfi, *dfi;
struct udf_inode_info *iinfo = UDF_I(inode);
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
alloctype = ICBTAG_FLAG_AD_SHORT;
else
alloctype = ICBTAG_FLAG_AD_LONG;
if (!inode->i_size) {
iinfo->i_alloc_type = alloctype;
mark_inode_dirty(inode);
return NULL;
}
/* alloc block, and copy data to it */
*block = udf_new_block(inode->i_sb, inode,
iinfo->i_location.partitionReferenceNum,
iinfo->i_location.logicalBlockNum, err);
if (!(*block))
return NULL;
newblock = udf_get_pblock(inode->i_sb, *block,
iinfo->i_location.partitionReferenceNum,
0);
if (!newblock)
return NULL;
dbh = udf_tgetblk(inode->i_sb, newblock);
if (!dbh)
return NULL;
lock_buffer(dbh);
memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
set_buffer_uptodate(dbh);
unlock_buffer(dbh);
mark_buffer_dirty_inode(dbh, inode);
sfibh.soffset = sfibh.eoffset =
f_pos & (inode->i_sb->s_blocksize - 1);
sfibh.sbh = sfibh.ebh = NULL;
dfibh.soffset = dfibh.eoffset = 0;
dfibh.sbh = dfibh.ebh = dbh;
while (f_pos < size) {
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL,
NULL, NULL, NULL);
if (!sfi) {
brelse(dbh);
return NULL;
}
iinfo->i_alloc_type = alloctype;
sfi->descTag.tagLocation = cpu_to_le32(*block);
dfibh.soffset = dfibh.eoffset;
dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
sfi->fileIdent +
le16_to_cpu(sfi->lengthOfImpUse))) {
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
brelse(dbh);
return NULL;
}
}
mark_buffer_dirty_inode(dbh, inode);
memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0,
iinfo->i_lenAlloc);
iinfo->i_lenAlloc = 0;
eloc.logicalBlockNum = *block;
eloc.partitionReferenceNum =
iinfo->i_location.partitionReferenceNum;
iinfo->i_lenExtents = inode->i_size;
epos.bh = NULL;
epos.block = iinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(inode);
udf_add_aext(inode, &epos, &eloc, inode->i_size, 0);
/* UniqueID stuff */
brelse(epos.bh);
mark_inode_dirty(inode);
return dbh;
}
static int udf_get_block(struct inode *inode, sector_t block,
struct buffer_head *bh_result, int create)
{
int err, new;
sector_t phys = 0;
struct udf_inode_info *iinfo;
if (!create) {
phys = udf_block_map(inode, block);
if (phys)
map_bh(bh_result, inode->i_sb, phys);
return 0;
}
err = -EIO;
new = 0;
iinfo = UDF_I(inode);
down_write(&iinfo->i_data_sem);
if (block == iinfo->i_next_alloc_block + 1) {
iinfo->i_next_alloc_block++;
iinfo->i_next_alloc_goal++;
}
udf_clear_extent_cache(inode);
phys = inode_getblk(inode, block, &err, &new);
if (!phys)
goto abort;
if (new)
set_buffer_new(bh_result);
map_bh(bh_result, inode->i_sb, phys);
abort:
up_write(&iinfo->i_data_sem);
return err;
}
static struct buffer_head *udf_getblk(struct inode *inode, long block,
int create, int *err)
{
struct buffer_head *bh;
struct buffer_head dummy;
dummy.b_state = 0;
dummy.b_blocknr = -1000;
*err = udf_get_block(inode, block, &dummy, create);
if (!*err && buffer_mapped(&dummy)) {
bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
if (buffer_new(&dummy)) {
lock_buffer(bh);
memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
set_buffer_uptodate(bh);
unlock_buffer(bh);
mark_buffer_dirty_inode(bh, inode);
}
return bh;
}
return NULL;
}
/* Extend the file by 'blocks' blocks, return the number of extents added */
static int udf_do_extend_file(struct inode *inode,
struct extent_position *last_pos,
struct kernel_long_ad *last_ext,
sector_t blocks)
{
sector_t add;
int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
struct super_block *sb = inode->i_sb;
struct kernel_lb_addr prealloc_loc = {};
int prealloc_len = 0;
struct udf_inode_info *iinfo;
int err;
/* The previous extent is fake and we should not extend by anything
* - there's nothing to do... */
if (!blocks && fake)
return 0;
iinfo = UDF_I(inode);
/* Round the last extent up to a multiple of block size */
if (last_ext->extLength & (sb->s_blocksize - 1)) {
last_ext->extLength =
(last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
(((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
iinfo->i_lenExtents =
(iinfo->i_lenExtents + sb->s_blocksize - 1) &
~(sb->s_blocksize - 1);
}
/* Last extent are just preallocated blocks? */
if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
EXT_NOT_RECORDED_ALLOCATED) {
/* Save the extent so that we can reattach it to the end */
prealloc_loc = last_ext->extLocation;
prealloc_len = last_ext->extLength;
/* Mark the extent as a hole */
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
last_ext->extLocation.logicalBlockNum = 0;
last_ext->extLocation.partitionReferenceNum = 0;
}
/* Can we merge with the previous extent? */
if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
EXT_NOT_RECORDED_NOT_ALLOCATED) {
add = ((1 << 30) - sb->s_blocksize -
(last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >>
sb->s_blocksize_bits;
if (add > blocks)
add = blocks;
blocks -= add;
last_ext->extLength += add << sb->s_blocksize_bits;
}
if (fake) {
udf_add_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
count++;
} else
udf_write_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
/* Managed to do everything necessary? */
if (!blocks)
goto out;
/* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
last_ext->extLocation.logicalBlockNum = 0;
last_ext->extLocation.partitionReferenceNum = 0;
add = (1 << (30-sb->s_blocksize_bits)) - 1;
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
(add << sb->s_blocksize_bits);
/* Create enough extents to cover the whole hole */
while (blocks > add) {
blocks -= add;
err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
if (err)
return err;
count++;
}
if (blocks) {
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
(blocks << sb->s_blocksize_bits);
err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
if (err)
return err;
count++;
}
out:
/* Do we have some preallocated blocks saved? */
if (prealloc_len) {
err = udf_add_aext(inode, last_pos, &prealloc_loc,
prealloc_len, 1);
if (err)
return err;
last_ext->extLocation = prealloc_loc;
last_ext->extLength = prealloc_len;
count++;
}
/* last_pos should point to the last written extent... */
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
last_pos->offset -= sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
last_pos->offset -= sizeof(struct long_ad);
else
return -EIO;
return count;
}
static int udf_extend_file(struct inode *inode, loff_t newsize)
{
struct extent_position epos;
struct kernel_lb_addr eloc;
uint32_t elen;
int8_t etype;
struct super_block *sb = inode->i_sb;
sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
int adsize;
struct udf_inode_info *iinfo = UDF_I(inode);
struct kernel_long_ad extent;
int err;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
adsize = sizeof(struct long_ad);
else
BUG();
etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
/* File has extent covering the new size (could happen when extending
* inside a block)? */
if (etype != -1)
return 0;
if (newsize & (sb->s_blocksize - 1))
offset++;
/* Extended file just to the boundary of the last file block? */
if (offset == 0)
return 0;
/* Truncate is extending the file by 'offset' blocks */
if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
(epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
/* File has no extents at all or has empty last
* indirect extent! Create a fake extent... */
extent.extLocation.logicalBlockNum = 0;
extent.extLocation.partitionReferenceNum = 0;
extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
} else {
epos.offset -= adsize;
etype = udf_next_aext(inode, &epos, &extent.extLocation,
&extent.extLength, 0);
extent.extLength |= etype << 30;
}
err = udf_do_extend_file(inode, &epos, &extent, offset);
if (err < 0)
goto out;
err = 0;
iinfo->i_lenExtents = newsize;
out:
brelse(epos.bh);
return err;
}
static sector_t inode_getblk(struct inode *inode, sector_t block,
int *err, int *new)
{
struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
struct extent_position prev_epos, cur_epos, next_epos;
int count = 0, startnum = 0, endnum = 0;
uint32_t elen = 0, tmpelen;
struct kernel_lb_addr eloc, tmpeloc;
int c = 1;
loff_t lbcount = 0, b_off = 0;
uint32_t newblocknum, newblock;
sector_t offset = 0;
int8_t etype;
struct udf_inode_info *iinfo = UDF_I(inode);
int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
int lastblock = 0;
bool isBeyondEOF;
*err = 0;
*new = 0;
prev_epos.offset = udf_file_entry_alloc_offset(inode);
prev_epos.block = iinfo->i_location;
prev_epos.bh = NULL;
cur_epos = next_epos = prev_epos;
b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
/* find the extent which contains the block we are looking for.
alternate between laarr[0] and laarr[1] for locations of the
current extent, and the previous extent */
do {
if (prev_epos.bh != cur_epos.bh) {
brelse(prev_epos.bh);
get_bh(cur_epos.bh);
prev_epos.bh = cur_epos.bh;
}
if (cur_epos.bh != next_epos.bh) {
brelse(cur_epos.bh);
get_bh(next_epos.bh);
cur_epos.bh = next_epos.bh;
}
lbcount += elen;
prev_epos.block = cur_epos.block;
cur_epos.block = next_epos.block;
prev_epos.offset = cur_epos.offset;
cur_epos.offset = next_epos.offset;
etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1);
if (etype == -1)
break;
c = !c;
laarr[c].extLength = (etype << 30) | elen;
laarr[c].extLocation = eloc;
if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
pgoal = eloc.logicalBlockNum +
((elen + inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits);
count++;
} while (lbcount + elen <= b_off);
b_off -= lbcount;
offset = b_off >> inode->i_sb->s_blocksize_bits;
/*
* Move prev_epos and cur_epos into indirect extent if we are at
* the pointer to it
*/
udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
/* if the extent is allocated and recorded, return the block
if the extent is not a multiple of the blocksize, round up */
if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
if (elen & (inode->i_sb->s_blocksize - 1)) {
elen = EXT_RECORDED_ALLOCATED |
((elen + inode->i_sb->s_blocksize - 1) &
~(inode->i_sb->s_blocksize - 1));
udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
}
brelse(prev_epos.bh);
brelse(cur_epos.bh);
brelse(next_epos.bh);
newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
return newblock;
}
/* Are we beyond EOF? */
if (etype == -1) {
int ret;
isBeyondEOF = 1;
if (count) {
if (c)
laarr[0] = laarr[1];
startnum = 1;
} else {
/* Create a fake extent when there's not one */
memset(&laarr[0].extLocation, 0x00,
sizeof(struct kernel_lb_addr));
laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
/* Will udf_do_extend_file() create real extent from
a fake one? */
startnum = (offset > 0);
}
/* Create extents for the hole between EOF and offset */
ret = udf_do_extend_file(inode, &prev_epos, laarr, offset);
if (ret < 0) {
brelse(prev_epos.bh);
brelse(cur_epos.bh);
brelse(next_epos.bh);
*err = ret;
return 0;
}
c = 0;
offset = 0;
count += ret;
/* We are not covered by a preallocated extent? */
if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) !=
EXT_NOT_RECORDED_ALLOCATED) {
/* Is there any real extent? - otherwise we overwrite
* the fake one... */
if (count)
c = !c;
laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
inode->i_sb->s_blocksize;
memset(&laarr[c].extLocation, 0x00,
sizeof(struct kernel_lb_addr));
count++;
}
endnum = c + 1;
lastblock = 1;
} else {
isBeyondEOF = 0;
endnum = startnum = ((count > 2) ? 2 : count);
/* if the current extent is in position 0,
swap it with the previous */
if (!c && count != 1) {
laarr[2] = laarr[0];
laarr[0] = laarr[1];
laarr[1] = laarr[2];
c = 1;
}
/* if the current block is located in an extent,
read the next extent */
etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0);
if (etype != -1) {
laarr[c + 1].extLength = (etype << 30) | elen;
laarr[c + 1].extLocation = eloc;
count++;
startnum++;
endnum++;
} else
lastblock = 1;
}
/* if the current extent is not recorded but allocated, get the
* block in the extent corresponding to the requested block */
if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
else { /* otherwise, allocate a new block */
if (iinfo->i_next_alloc_block == block)
goal = iinfo->i_next_alloc_goal;
if (!goal) {
if (!(goal = pgoal)) /* XXX: what was intended here? */
goal = iinfo->i_location.logicalBlockNum + 1;
}
newblocknum = udf_new_block(inode->i_sb, inode,
iinfo->i_location.partitionReferenceNum,
goal, err);
if (!newblocknum) {
brelse(prev_epos.bh);
brelse(cur_epos.bh);
brelse(next_epos.bh);
*err = -ENOSPC;
return 0;
}
if (isBeyondEOF)
iinfo->i_lenExtents += inode->i_sb->s_blocksize;
}
/* if the extent the requsted block is located in contains multiple
* blocks, split the extent into at most three extents. blocks prior
* to requested block, requested block, and blocks after requested
* block */
udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
#ifdef UDF_PREALLOCATE
/* We preallocate blocks only for regular files. It also makes sense
* for directories but there's a problem when to drop the
* preallocation. We might use some delayed work for that but I feel
* it's overengineering for a filesystem like UDF. */
if (S_ISREG(inode->i_mode))
udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
#endif
/* merge any continuous blocks in laarr */
udf_merge_extents(inode, laarr, &endnum);
/* write back the new extents, inserting new extents if the new number
* of extents is greater than the old number, and deleting extents if
* the new number of extents is less than the old number */
udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
brelse(prev_epos.bh);
brelse(cur_epos.bh);
brelse(next_epos.bh);
newblock = udf_get_pblock(inode->i_sb, newblocknum,
iinfo->i_location.partitionReferenceNum, 0);
if (!newblock) {
*err = -EIO;
return 0;
}
*new = 1;
iinfo->i_next_alloc_block = block;
iinfo->i_next_alloc_goal = newblocknum;
inode->i_ctime = current_fs_time(inode->i_sb);
if (IS_SYNC(inode))
udf_sync_inode(inode);
else
mark_inode_dirty(inode);
return newblock;
}
static void udf_split_extents(struct inode *inode, int *c, int offset,
int newblocknum,
struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
int *endnum)
{
unsigned long blocksize = inode->i_sb->s_blocksize;
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
(laarr[*c].extLength >> 30) ==
(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
int curr = *c;
int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) >> blocksize_bits;
int8_t etype = (laarr[curr].extLength >> 30);
if (blen == 1)
;
else if (!offset || blen == offset + 1) {
laarr[curr + 2] = laarr[curr + 1];
laarr[curr + 1] = laarr[curr];
} else {
laarr[curr + 3] = laarr[curr + 1];
laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
}
if (offset) {
if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
udf_free_blocks(inode->i_sb, inode,
&laarr[curr].extLocation,
0, offset);
laarr[curr].extLength =
EXT_NOT_RECORDED_NOT_ALLOCATED |
(offset << blocksize_bits);
laarr[curr].extLocation.logicalBlockNum = 0;
laarr[curr].extLocation.
partitionReferenceNum = 0;
} else
laarr[curr].extLength = (etype << 30) |
(offset << blocksize_bits);
curr++;
(*c)++;
(*endnum)++;
}
laarr[curr].extLocation.logicalBlockNum = newblocknum;
if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
laarr[curr].extLocation.partitionReferenceNum =
UDF_I(inode)->i_location.partitionReferenceNum;
laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
blocksize;
curr++;
if (blen != offset + 1) {
if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
laarr[curr].extLocation.logicalBlockNum +=
offset + 1;
laarr[curr].extLength = (etype << 30) |
((blen - (offset + 1)) << blocksize_bits);
curr++;
(*endnum)++;
}
}
}
static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
int *endnum)
{
int start, length = 0, currlength = 0, i;
if (*endnum >= (c + 1)) {
if (!lastblock)
return;
else
start = c;
} else {
if ((laarr[c + 1].extLength >> 30) ==
(EXT_NOT_RECORDED_ALLOCATED >> 30)) {
start = c + 1;
length = currlength =
(((laarr[c + 1].extLength &
UDF_EXTENT_LENGTH_MASK) +
inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits);
} else
start = c;
}
for (i = start + 1; i <= *endnum; i++) {
if (i == *endnum) {
if (lastblock)
length += UDF_DEFAULT_PREALLOC_BLOCKS;
} else if ((laarr[i].extLength >> 30) ==
(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
length += (((laarr[i].extLength &
UDF_EXTENT_LENGTH_MASK) +
inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits);
} else
break;
}
if (length) {
int next = laarr[start].extLocation.logicalBlockNum +
(((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits);
int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
laarr[start].extLocation.partitionReferenceNum,
next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ?
length : UDF_DEFAULT_PREALLOC_BLOCKS) -
currlength);
if (numalloc) {
if (start == (c + 1))
laarr[start].extLength +=
(numalloc <<
inode->i_sb->s_blocksize_bits);
else {
memmove(&laarr[c + 2], &laarr[c + 1],
sizeof(struct long_ad) * (*endnum - (c + 1)));
(*endnum)++;
laarr[c + 1].extLocation.logicalBlockNum = next;
laarr[c + 1].extLocation.partitionReferenceNum =
laarr[c].extLocation.
partitionReferenceNum;
laarr[c + 1].extLength =
EXT_NOT_RECORDED_ALLOCATED |
(numalloc <<
inode->i_sb->s_blocksize_bits);
start = c + 1;
}
for (i = start + 1; numalloc && i < *endnum; i++) {
int elen = ((laarr[i].extLength &
UDF_EXTENT_LENGTH_MASK) +
inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits;
if (elen > numalloc) {
laarr[i].extLength -=
(numalloc <<
inode->i_sb->s_blocksize_bits);
numalloc = 0;
} else {
numalloc -= elen;
if (*endnum > (i + 1))
memmove(&laarr[i],
&laarr[i + 1],
sizeof(struct long_ad) *
(*endnum - (i + 1)));
i--;
(*endnum)--;
}
}
UDF_I(inode)->i_lenExtents +=
numalloc << inode->i_sb->s_blocksize_bits;
}
}
}
static void udf_merge_extents(struct inode *inode,
struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
int *endnum)
{
int i;
unsigned long blocksize = inode->i_sb->s_blocksize;
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
for (i = 0; i < (*endnum - 1); i++) {
struct kernel_long_ad *li /*l[i]*/ = &laarr[i];
struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
if (((li->extLength >> 30) == (lip1->extLength >> 30)) &&
(((li->extLength >> 30) ==
(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
((lip1->extLocation.logicalBlockNum -
li->extLocation.logicalBlockNum) ==
(((li->extLength & UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) >> blocksize_bits)))) {
if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
(lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
lip1->extLength = (lip1->extLength -
(li->extLength &
UDF_EXTENT_LENGTH_MASK) +
UDF_EXTENT_LENGTH_MASK) &
~(blocksize - 1);
li->extLength = (li->extLength &
UDF_EXTENT_FLAG_MASK) +
(UDF_EXTENT_LENGTH_MASK + 1) -
blocksize;
lip1->extLocation.logicalBlockNum =
li->extLocation.logicalBlockNum +
((li->extLength &
UDF_EXTENT_LENGTH_MASK) >>
blocksize_bits);
} else {
li->extLength = lip1->extLength +
(((li->extLength &
UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) & ~(blocksize - 1));
if (*endnum > (i + 2))
memmove(&laarr[i + 1], &laarr[i + 2],
sizeof(struct long_ad) *
(*endnum - (i + 2)));
i--;
(*endnum)--;
}
} else if (((li->extLength >> 30) ==
(EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
((lip1->extLength >> 30) ==
(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0,
((li->extLength &
UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) >> blocksize_bits);
li->extLocation.logicalBlockNum = 0;
li->extLocation.partitionReferenceNum = 0;
if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
(lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
lip1->extLength = (lip1->extLength -
(li->extLength &
UDF_EXTENT_LENGTH_MASK) +
UDF_EXTENT_LENGTH_MASK) &
~(blocksize - 1);
li->extLength = (li->extLength &
UDF_EXTENT_FLAG_MASK) +
(UDF_EXTENT_LENGTH_MASK + 1) -
blocksize;
} else {
li->extLength = lip1->extLength +
(((li->extLength &
UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) & ~(blocksize - 1));
if (*endnum > (i + 2))
memmove(&laarr[i + 1], &laarr[i + 2],
sizeof(struct long_ad) *
(*endnum - (i + 2)));
i--;
(*endnum)--;
}
} else if ((li->extLength >> 30) ==
(EXT_NOT_RECORDED_ALLOCATED >> 30)) {
udf_free_blocks(inode->i_sb, inode,
&li->extLocation, 0,
((li->extLength &
UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) >> blocksize_bits);
li->extLocation.logicalBlockNum = 0;
li->extLocation.partitionReferenceNum = 0;
li->extLength = (li->extLength &
UDF_EXTENT_LENGTH_MASK) |
EXT_NOT_RECORDED_NOT_ALLOCATED;
}
}
}
static void udf_update_extents(struct inode *inode,
struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
int startnum, int endnum,
struct extent_position *epos)
{
int start = 0, i;
struct kernel_lb_addr tmploc;
uint32_t tmplen;
if (startnum > endnum) {
for (i = 0; i < (startnum - endnum); i++)
udf_delete_aext(inode, *epos, laarr[i].extLocation,
laarr[i].extLength);
} else if (startnum < endnum) {
for (i = 0; i < (endnum - startnum); i++) {
udf_insert_aext(inode, *epos, laarr[i].extLocation,
laarr[i].extLength);
udf_next_aext(inode, epos, &laarr[i].extLocation,
&laarr[i].extLength, 1);
start++;
}
}
for (i = start; i < endnum; i++) {
udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
udf_write_aext(inode, epos, &laarr[i].extLocation,
laarr[i].extLength, 1);
}
}
struct buffer_head *udf_bread(struct inode *inode, int block,
int create, int *err)
{
struct buffer_head *bh = NULL;
bh = udf_getblk(inode, block, create, err);
if (!bh)
return NULL;
if (buffer_uptodate(bh))
return bh;
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
brelse(bh);
*err = -EIO;
return NULL;
}
int udf_setsize(struct inode *inode, loff_t newsize)
{
int err;
struct udf_inode_info *iinfo;
int bsize = 1 << inode->i_blkbits;
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
return -EINVAL;
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return -EPERM;
iinfo = UDF_I(inode);
if (newsize > inode->i_size) {
down_write(&iinfo->i_data_sem);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
if (bsize <
(udf_file_entry_alloc_offset(inode) + newsize)) {
err = udf_expand_file_adinicb(inode);
if (err)
return err;
down_write(&iinfo->i_data_sem);
} else {
iinfo->i_lenAlloc = newsize;
goto set_size;
}
}
err = udf_extend_file(inode, newsize);
if (err) {
up_write(&iinfo->i_data_sem);
return err;
}
set_size:
truncate_setsize(inode, newsize);
up_write(&iinfo->i_data_sem);
} else {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize,
0x00, bsize - newsize -
udf_file_entry_alloc_offset(inode));
iinfo->i_lenAlloc = newsize;
truncate_setsize(inode, newsize);
up_write(&iinfo->i_data_sem);
goto update_time;
}
err = block_truncate_page(inode->i_mapping, newsize,
udf_get_block);
if (err)
return err;
down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
truncate_setsize(inode, newsize);
udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem);
}
update_time:
inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
if (IS_SYNC(inode))
udf_sync_inode(inode);
else
mark_inode_dirty(inode);
return 0;
}
/*
* Maximum length of linked list formed by ICB hierarchy. The chosen number is
* arbitrary - just that we hopefully don't limit any real use of rewritten
* inode on write-once media but avoid looping for too long on corrupted media.
*/
#define UDF_MAX_ICB_NESTING 1024
static int udf_read_inode(struct inode *inode, bool hidden_inode)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
struct extendedFileEntry *efe;
uint16_t ident;
struct udf_inode_info *iinfo = UDF_I(inode);
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
struct kernel_lb_addr *iloc = &iinfo->i_location;
unsigned int link_count;
unsigned int indirections = 0;
int bs = inode->i_sb->s_blocksize;
int ret = -EIO;
reread:
if (iloc->logicalBlockNum >=
sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
udf_debug("block=%d, partition=%d out of range\n",
iloc->logicalBlockNum, iloc->partitionReferenceNum);
return -EIO;
}
/*
* Set defaults, but the inode is still incomplete!
* Note: get_new_inode() sets the following on a new inode:
* i_sb = sb
* i_no = ino
* i_flags = sb->s_flags
* i_state = 0
* clean_inode(): zero fills and sets
* i_count = 1
* i_nlink = 1
* i_op = NULL;
*/
bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
if (!bh) {
udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino);
return -EIO;
}
if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
ident != TAG_IDENT_USE) {
udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n",
inode->i_ino, ident);
goto out;
}
fe = (struct fileEntry *)bh->b_data;
efe = (struct extendedFileEntry *)bh->b_data;
if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
struct buffer_head *ibh;
ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
if (ident == TAG_IDENT_IE && ibh) {
struct kernel_lb_addr loc;
struct indirectEntry *ie;
ie = (struct indirectEntry *)ibh->b_data;
loc = lelb_to_cpu(ie->indirectICB.extLocation);
if (ie->indirectICB.extLength) {
brelse(ibh);
memcpy(&iinfo->i_location, &loc,
sizeof(struct kernel_lb_addr));
if (++indirections > UDF_MAX_ICB_NESTING) {
udf_err(inode->i_sb,
"too many ICBs in ICB hierarchy"
" (max %d supported)\n",
UDF_MAX_ICB_NESTING);
goto out;
}
brelse(bh);
goto reread;
}
}
brelse(ibh);
} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
udf_err(inode->i_sb, "unsupported strategy type: %d\n",
le16_to_cpu(fe->icbTag.strategyType));
goto out;
}
if (fe->icbTag.strategyType == cpu_to_le16(4))
iinfo->i_strat4096 = 0;
else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
iinfo->i_strat4096 = 1;
iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
ICBTAG_FLAG_AD_MASK;
iinfo->i_unique = 0;
iinfo->i_lenEAttr = 0;
iinfo->i_lenExtents = 0;
iinfo->i_lenAlloc = 0;
iinfo->i_next_alloc_block = 0;
iinfo->i_next_alloc_goal = 0;
if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
iinfo->i_efe = 1;
iinfo->i_use = 0;
ret = udf_alloc_i_data(inode, bs -
sizeof(struct extendedFileEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct extendedFileEntry),
bs - sizeof(struct extendedFileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
iinfo->i_efe = 0;
iinfo->i_use = 0;
ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct fileEntry),
bs - sizeof(struct fileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
iinfo->i_efe = 0;
iinfo->i_use = 1;
iinfo->i_lenAlloc = le32_to_cpu(
((struct unallocSpaceEntry *)bh->b_data)->
lengthAllocDescs);
ret = udf_alloc_i_data(inode, bs -
sizeof(struct unallocSpaceEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct unallocSpaceEntry),
bs - sizeof(struct unallocSpaceEntry));
return 0;
}
ret = -EIO;
read_lock(&sbi->s_cred_lock);
i_uid_write(inode, le32_to_cpu(fe->uid));
if (!uid_valid(inode->i_uid) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
i_gid_write(inode, le32_to_cpu(fe->gid));
if (!gid_valid(inode->i_gid) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_fmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_fmode;
else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_dmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_dmode;
else
inode->i_mode = udf_convert_permissions(fe);
inode->i_mode &= ~sbi->s_umask;
read_unlock(&sbi->s_cred_lock);
link_count = le16_to_cpu(fe->fileLinkCount);
if (!link_count) {
if (!hidden_inode) {
ret = -ESTALE;
goto out;
}
link_count = 1;
}
set_nlink(inode, link_count);
inode->i_size = le64_to_cpu(fe->informationLength);
iinfo->i_lenExtents = inode->i_size;
if (iinfo->i_efe == 0) {
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime))
inode->i_atime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_mtime,
fe->modificationTime))
inode->i_mtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime))
inode->i_ctime = sbi->s_record_time;
iinfo->i_unique = le64_to_cpu(fe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
} else {
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime))
inode->i_atime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_mtime,
efe->modificationTime))
inode->i_mtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime))
iinfo->i_crtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime))
inode->i_ctime = sbi->s_record_time;
iinfo->i_unique = le64_to_cpu(efe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
}
inode->i_generation = iinfo->i_unique;
/*
* Sanity check length of allocation descriptors and extended attrs to
* avoid integer overflows
*/
if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
goto out;
/* Now do exact checks */
if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
goto out;
/* Sanity checks for files in ICB so that we don't get confused later */
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
/*
* For file in ICB data is stored in allocation descriptor
* so sizes should match
*/
if (iinfo->i_lenAlloc != inode->i_size)
goto out;
/* File in ICB has to fit in there... */
if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
goto out;
}
switch (fe->icbTag.fileType) {
case ICBTAG_FILE_TYPE_DIRECTORY:
inode->i_op = &udf_dir_inode_operations;
inode->i_fop = &udf_dir_operations;
inode->i_mode |= S_IFDIR;
inc_nlink(inode);
break;
case ICBTAG_FILE_TYPE_REALTIME:
case ICBTAG_FILE_TYPE_REGULAR:
case ICBTAG_FILE_TYPE_UNDEF:
case ICBTAG_FILE_TYPE_VAT20:
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
inode->i_data.a_ops = &udf_adinicb_aops;
else
inode->i_data.a_ops = &udf_aops;
inode->i_op = &udf_file_inode_operations;
inode->i_fop = &udf_file_operations;
inode->i_mode |= S_IFREG;
break;
case ICBTAG_FILE_TYPE_BLOCK:
inode->i_mode |= S_IFBLK;
break;
case ICBTAG_FILE_TYPE_CHAR:
inode->i_mode |= S_IFCHR;
break;
case ICBTAG_FILE_TYPE_FIFO:
init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
break;
case ICBTAG_FILE_TYPE_SOCKET:
init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
break;
case ICBTAG_FILE_TYPE_SYMLINK:
inode->i_data.a_ops = &udf_symlink_aops;
inode->i_op = &udf_symlink_inode_operations;
inode->i_mode = S_IFLNK | S_IRWXUGO;
break;
case ICBTAG_FILE_TYPE_MAIN:
udf_debug("METADATA FILE-----\n");
break;
case ICBTAG_FILE_TYPE_MIRROR:
udf_debug("METADATA MIRROR FILE-----\n");
break;
case ICBTAG_FILE_TYPE_BITMAP:
udf_debug("METADATA BITMAP FILE-----\n");
break;
default:
udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n",
inode->i_ino, fe->icbTag.fileType);
goto out;
}
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
struct deviceSpec *dsea =
(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
if (dsea) {
init_special_inode(inode, inode->i_mode,
MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
le32_to_cpu(dsea->minorDeviceIdent)));
/* Developer ID ??? */
} else
goto out;
}
ret = 0;
out:
brelse(bh);
return ret;
}
static int udf_alloc_i_data(struct inode *inode, size_t size)
{
struct udf_inode_info *iinfo = UDF_I(inode);
iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL);
if (!iinfo->i_ext.i_data) {
udf_err(inode->i_sb, "(ino %ld) no free memory\n",
inode->i_ino);
return -ENOMEM;
}
return 0;
}
static umode_t udf_convert_permissions(struct fileEntry *fe)
{
umode_t mode;
uint32_t permissions;
uint32_t flags;
permissions = le32_to_cpu(fe->permissions);
flags = le16_to_cpu(fe->icbTag.flags);
mode = ((permissions) & S_IRWXO) |
((permissions >> 2) & S_IRWXG) |
((permissions >> 4) & S_IRWXU) |
((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
return mode;
}
int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
{
return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
}
static int udf_sync_inode(struct inode *inode)
{
return udf_update_inode(inode, 1);
}
static int udf_update_inode(struct inode *inode, int do_sync)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
struct extendedFileEntry *efe;
uint64_t lb_recorded;
uint32_t udfperms;
uint16_t icbflags;
uint16_t crclen;
int err = 0;
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
struct udf_inode_info *iinfo = UDF_I(inode);
bh = udf_tgetblk(inode->i_sb,
udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0));
if (!bh) {
udf_debug("getblk failure\n");
return -ENOMEM;
}
lock_buffer(bh);
memset(bh->b_data, 0, inode->i_sb->s_blocksize);
fe = (struct fileEntry *)bh->b_data;
efe = (struct extendedFileEntry *)bh->b_data;
if (iinfo->i_use) {
struct unallocSpaceEntry *use =
(struct unallocSpaceEntry *)bh->b_data;
use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
sizeof(struct unallocSpaceEntry));
use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
use->descTag.tagLocation =
cpu_to_le32(iinfo->i_location.logicalBlockNum);
crclen = sizeof(struct unallocSpaceEntry) +
iinfo->i_lenAlloc - sizeof(struct tag);
use->descTag.descCRCLength = cpu_to_le16(crclen);
use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
sizeof(struct tag),
crclen));
use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
goto out;
}
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
fe->uid = cpu_to_le32(-1);
else
fe->uid = cpu_to_le32(i_uid_read(inode));
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
fe->gid = cpu_to_le32(-1);
else
fe->gid = cpu_to_le32(i_gid_read(inode));
udfperms = ((inode->i_mode & S_IRWXO)) |
((inode->i_mode & S_IRWXG) << 2) |
((inode->i_mode & S_IRWXU) << 4);
udfperms |= (le32_to_cpu(fe->permissions) &
(FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
fe->permissions = cpu_to_le32(udfperms);
if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
else
fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
fe->informationLength = cpu_to_le64(inode->i_size);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
struct regid *eid;
struct deviceSpec *dsea =
(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
if (!dsea) {
dsea = (struct deviceSpec *)
udf_add_extendedattr(inode,
sizeof(struct deviceSpec) +
sizeof(struct regid), 12, 0x3);
dsea->attrType = cpu_to_le32(12);
dsea->attrSubtype = 1;
dsea->attrLength = cpu_to_le32(
sizeof(struct deviceSpec) +
sizeof(struct regid));
dsea->impUseLength = cpu_to_le32(sizeof(struct regid));
}
eid = (struct regid *)dsea->impUse;
memset(eid, 0, sizeof(struct regid));
strcpy(eid->ident, UDF_ID_DEVELOPER);
eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
eid->identSuffix[1] = UDF_OS_ID_LINUX;
dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
}
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
lb_recorded = 0; /* No extents => no blocks! */
else
lb_recorded =
(inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
(blocksize_bits - 9);
if (iinfo->i_efe == 0) {
memcpy(bh->b_data + sizeof(struct fileEntry),
iinfo->i_ext.i_data,
inode->i_sb->s_blocksize - sizeof(struct fileEntry));
fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
memset(&(fe->impIdent), 0, sizeof(struct regid));
strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
fe->uniqueID = cpu_to_le64(iinfo->i_unique);
fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
crclen = sizeof(struct fileEntry);
} else {
memcpy(bh->b_data + sizeof(struct extendedFileEntry),
iinfo->i_ext.i_data,
inode->i_sb->s_blocksize -
sizeof(struct extendedFileEntry));
efe->objectSize = cpu_to_le64(inode->i_size);
efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec ||
(iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec &&
iinfo->i_crtime.tv_nsec > inode->i_atime.tv_nsec))
iinfo->i_crtime = inode->i_atime;
if (iinfo->i_crtime.tv_sec > inode->i_mtime.tv_sec ||
(iinfo->i_crtime.tv_sec == inode->i_mtime.tv_sec &&
iinfo->i_crtime.tv_nsec > inode->i_mtime.tv_nsec))
iinfo->i_crtime = inode->i_mtime;
if (iinfo->i_crtime.tv_sec > inode->i_ctime.tv_sec ||
(iinfo->i_crtime.tv_sec == inode->i_ctime.tv_sec &&
iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec))
iinfo->i_crtime = inode->i_ctime;
udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
memset(&(efe->impIdent), 0, sizeof(struct regid));
strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
efe->uniqueID = cpu_to_le64(iinfo->i_unique);
efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
crclen = sizeof(struct extendedFileEntry);
}
if (iinfo->i_strat4096) {
fe->icbTag.strategyType = cpu_to_le16(4096);
fe->icbTag.strategyParameter = cpu_to_le16(1);
fe->icbTag.numEntries = cpu_to_le16(2);
} else {
fe->icbTag.strategyType = cpu_to_le16(4);
fe->icbTag.numEntries = cpu_to_le16(1);
}
if (S_ISDIR(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
else if (S_ISREG(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
else if (S_ISLNK(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
else if (S_ISBLK(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
else if (S_ISCHR(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
else if (S_ISFIFO(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
else if (S_ISSOCK(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
icbflags = iinfo->i_alloc_type |
((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
(le16_to_cpu(fe->icbTag.flags) &
~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
fe->icbTag.flags = cpu_to_le16(icbflags);
if (sbi->s_udfrev >= 0x0200)
fe->descTag.descVersion = cpu_to_le16(3);
else
fe->descTag.descVersion = cpu_to_le16(2);
fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
fe->descTag.tagLocation = cpu_to_le32(
iinfo->i_location.logicalBlockNum);
crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag);
fe->descTag.descCRCLength = cpu_to_le16(crclen);
fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag),
crclen));
fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
out:
set_buffer_uptodate(bh);
unlock_buffer(bh);
/* write the data blocks */
mark_buffer_dirty(bh);
if (do_sync) {
sync_dirty_buffer(bh);
if (buffer_write_io_error(bh)) {
udf_warn(inode->i_sb, "IO error syncing udf inode [%08lx]\n",
inode->i_ino);
err = -EIO;
}
}
brelse(bh);
return err;
}
struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino,
bool hidden_inode)
{
unsigned long block = udf_get_lb_pblock(sb, ino, 0);
struct inode *inode = iget_locked(sb, block);
int err;
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
err = udf_read_inode(inode, hidden_inode);
if (err < 0) {
iget_failed(inode);
return ERR_PTR(err);
}
unlock_new_inode(inode);
return inode;
}
int udf_add_aext(struct inode *inode, struct extent_position *epos,
struct kernel_lb_addr *eloc, uint32_t elen, int inc)
{
int adsize;
struct short_ad *sad = NULL;
struct long_ad *lad = NULL;
struct allocExtDesc *aed;
uint8_t *ptr;
struct udf_inode_info *iinfo = UDF_I(inode);
if (!epos->bh)
ptr = iinfo->i_ext.i_data + epos->offset -
udf_file_entry_alloc_offset(inode) +
iinfo->i_lenEAttr;
else
ptr = epos->bh->b_data + epos->offset;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
adsize = sizeof(struct long_ad);
else
return -EIO;
if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
unsigned char *sptr, *dptr;
struct buffer_head *nbh;
int err, loffset;
struct kernel_lb_addr obloc = epos->block;
epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
obloc.partitionReferenceNum,
obloc.logicalBlockNum, &err);
if (!epos->block.logicalBlockNum)
return -ENOSPC;
nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
&epos->block,
0));
if (!nbh)
return -EIO;
lock_buffer(nbh);
memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
set_buffer_uptodate(nbh);
unlock_buffer(nbh);
mark_buffer_dirty_inode(nbh, inode);
aed = (struct allocExtDesc *)(nbh->b_data);
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
aed->previousAllocExtLocation =
cpu_to_le32(obloc.logicalBlockNum);
if (epos->offset + adsize > inode->i_sb->s_blocksize) {
loffset = epos->offset;
aed->lengthAllocDescs = cpu_to_le32(adsize);
sptr = ptr - adsize;
dptr = nbh->b_data + sizeof(struct allocExtDesc);
memcpy(dptr, sptr, adsize);
epos->offset = sizeof(struct allocExtDesc) + adsize;
} else {
loffset = epos->offset + adsize;
aed->lengthAllocDescs = cpu_to_le32(0);
sptr = ptr;
epos->offset = sizeof(struct allocExtDesc);
if (epos->bh) {
aed = (struct allocExtDesc *)epos->bh->b_data;
le32_add_cpu(&aed->lengthAllocDescs, adsize);
} else {
iinfo->i_lenAlloc += adsize;
mark_inode_dirty(inode);
}
}
if (UDF_SB(inode->i_sb)->s_udfrev >= 0x0200)
udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
epos->block.logicalBlockNum, sizeof(struct tag));
else
udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
epos->block.logicalBlockNum, sizeof(struct tag));
switch (iinfo->i_alloc_type) {
case ICBTAG_FLAG_AD_SHORT:
sad = (struct short_ad *)sptr;
sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
inode->i_sb->s_blocksize);
sad->extPosition =
cpu_to_le32(epos->block.logicalBlockNum);
break;
case ICBTAG_FLAG_AD_LONG:
lad = (struct long_ad *)sptr;
lad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
inode->i_sb->s_blocksize);
lad->extLocation = cpu_to_lelb(epos->block);
memset(lad->impUse, 0x00, sizeof(lad->impUse));
break;
}
if (epos->bh) {
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
udf_update_tag(epos->bh->b_data, loffset);
else
udf_update_tag(epos->bh->b_data,
sizeof(struct allocExtDesc));
mark_buffer_dirty_inode(epos->bh, inode);
brelse(epos->bh);
} else {
mark_inode_dirty(inode);
}
epos->bh = nbh;
}
udf_write_aext(inode, epos, eloc, elen, inc);
if (!epos->bh) {
iinfo->i_lenAlloc += adsize;
mark_inode_dirty(inode);
} else {
aed = (struct allocExtDesc *)epos->bh->b_data;
le32_add_cpu(&aed->lengthAllocDescs, adsize);
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
udf_update_tag(epos->bh->b_data,
epos->offset + (inc ? 0 : adsize));
else
udf_update_tag(epos->bh->b_data,
sizeof(struct allocExtDesc));
mark_buffer_dirty_inode(epos->bh, inode);
}
return 0;
}
void udf_write_aext(struct inode *inode, struct extent_position *epos,
struct kernel_lb_addr *eloc, uint32_t elen, int inc)
{
int adsize;
uint8_t *ptr;
struct short_ad *sad;
struct long_ad *lad;
struct udf_inode_info *iinfo = UDF_I(inode);
if (!epos->bh)
ptr = iinfo->i_ext.i_data + epos->offset -
udf_file_entry_alloc_offset(inode) +
iinfo->i_lenEAttr;
else
ptr = epos->bh->b_data + epos->offset;
switch (iinfo->i_alloc_type) {
case ICBTAG_FLAG_AD_SHORT:
sad = (struct short_ad *)ptr;
sad->extLength = cpu_to_le32(elen);
sad->extPosition = cpu_to_le32(eloc->logicalBlockNum);
adsize = sizeof(struct short_ad);
break;
case ICBTAG_FLAG_AD_LONG:
lad = (struct long_ad *)ptr;
lad->extLength = cpu_to_le32(elen);
lad->extLocation = cpu_to_lelb(*eloc);
memset(lad->impUse, 0x00, sizeof(lad->impUse));
adsize = sizeof(struct long_ad);
break;
default:
return;
}
if (epos->bh) {
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) {
struct allocExtDesc *aed =
(struct allocExtDesc *)epos->bh->b_data;
udf_update_tag(epos->bh->b_data,
le32_to_cpu(aed->lengthAllocDescs) +
sizeof(struct allocExtDesc));
}
mark_buffer_dirty_inode(epos->bh, inode);
} else {
mark_inode_dirty(inode);
}
if (inc)
epos->offset += adsize;
}
int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
{
int8_t etype;
while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
(EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
int block;
epos->block = *eloc;
epos->offset = sizeof(struct allocExtDesc);
brelse(epos->bh);
block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
epos->bh = udf_tread(inode->i_sb, block);
if (!epos->bh) {
udf_debug("reading block %d failed!\n", block);
return -1;
}
}
return etype;
}
int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
{
int alen;
int8_t etype;
uint8_t *ptr;
struct short_ad *sad;
struct long_ad *lad;
struct udf_inode_info *iinfo = UDF_I(inode);
if (!epos->bh) {
if (!epos->offset)
epos->offset = udf_file_entry_alloc_offset(inode);
ptr = iinfo->i_ext.i_data + epos->offset -
udf_file_entry_alloc_offset(inode) +
iinfo->i_lenEAttr;
alen = udf_file_entry_alloc_offset(inode) +
iinfo->i_lenAlloc;
} else {
if (!epos->offset)
epos->offset = sizeof(struct allocExtDesc);
ptr = epos->bh->b_data + epos->offset;
alen = sizeof(struct allocExtDesc) +
le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
lengthAllocDescs);
}
switch (iinfo->i_alloc_type) {
case ICBTAG_FLAG_AD_SHORT:
sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc);
if (!sad)
return -1;
etype = le32_to_cpu(sad->extLength) >> 30;
eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
eloc->partitionReferenceNum =
iinfo->i_location.partitionReferenceNum;
*elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
break;
case ICBTAG_FLAG_AD_LONG:
lad = udf_get_filelongad(ptr, alen, &epos->offset, inc);
if (!lad)
return -1;
etype = le32_to_cpu(lad->extLength) >> 30;
*eloc = lelb_to_cpu(lad->extLocation);
*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
break;
default:
udf_debug("alloc_type = %d unsupported\n", iinfo->i_alloc_type);
return -1;
}
return etype;
}
static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
struct kernel_lb_addr neloc, uint32_t nelen)
{
struct kernel_lb_addr oeloc;
uint32_t oelen;
int8_t etype;
if (epos.bh)
get_bh(epos.bh);
while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
udf_write_aext(inode, &epos, &neloc, nelen, 1);
neloc = oeloc;
nelen = (etype << 30) | oelen;
}
udf_add_aext(inode, &epos, &neloc, nelen, 1);
brelse(epos.bh);
return (nelen >> 30);
}
int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
struct kernel_lb_addr eloc, uint32_t elen)
{
struct extent_position oepos;
int adsize;
int8_t etype;
struct allocExtDesc *aed;
struct udf_inode_info *iinfo;
if (epos.bh) {
get_bh(epos.bh);
get_bh(epos.bh);
}
iinfo = UDF_I(inode);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
adsize = sizeof(struct long_ad);
else
adsize = 0;
oepos = epos;
if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
return -1;
while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1);
if (oepos.bh != epos.bh) {
oepos.block = epos.block;
brelse(oepos.bh);
get_bh(epos.bh);
oepos.bh = epos.bh;
oepos.offset = epos.offset - adsize;
}
}
memset(&eloc, 0x00, sizeof(struct kernel_lb_addr));
elen = 0;
if (epos.bh != oepos.bh) {
udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1);
udf_write_aext(inode, &oepos, &eloc, elen, 1);
udf_write_aext(inode, &oepos, &eloc, elen, 1);
if (!oepos.bh) {
iinfo->i_lenAlloc -= (adsize * 2);
mark_inode_dirty(inode);
} else {
aed = (struct allocExtDesc *)oepos.bh->b_data;
le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
udf_update_tag(oepos.bh->b_data,
oepos.offset - (2 * adsize));
else
udf_update_tag(oepos.bh->b_data,
sizeof(struct allocExtDesc));
mark_buffer_dirty_inode(oepos.bh, inode);
}
} else {
udf_write_aext(inode, &oepos, &eloc, elen, 1);
if (!oepos.bh) {
iinfo->i_lenAlloc -= adsize;
mark_inode_dirty(inode);
} else {
aed = (struct allocExtDesc *)oepos.bh->b_data;
le32_add_cpu(&aed->lengthAllocDescs, -adsize);
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
udf_update_tag(oepos.bh->b_data,
epos.offset - adsize);
else
udf_update_tag(oepos.bh->b_data,
sizeof(struct allocExtDesc));
mark_buffer_dirty_inode(oepos.bh, inode);
}
}
brelse(epos.bh);
brelse(oepos.bh);
return (elen >> 30);
}
int8_t inode_bmap(struct inode *inode, sector_t block,
struct extent_position *pos, struct kernel_lb_addr *eloc,
uint32_t *elen, sector_t *offset)
{
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
loff_t lbcount = 0, bcount =
(loff_t) block << blocksize_bits;
int8_t etype;
struct udf_inode_info *iinfo;
iinfo = UDF_I(inode);
if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) {
pos->offset = 0;
pos->block = iinfo->i_location;
pos->bh = NULL;
}
*elen = 0;
do {
etype = udf_next_aext(inode, pos, eloc, elen, 1);
if (etype == -1) {
*offset = (bcount - lbcount) >> blocksize_bits;
iinfo->i_lenExtents = lbcount;
return -1;
}
lbcount += *elen;
} while (lbcount <= bcount);
/* update extent cache */
udf_update_extent_cache(inode, lbcount - *elen, pos, 1);
*offset = (bcount + *elen - lbcount) >> blocksize_bits;
return etype;
}
long udf_block_map(struct inode *inode, sector_t block)
{
struct kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
struct extent_position epos = {};
int ret;
down_read(&UDF_I(inode)->i_data_sem);
if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
(EXT_RECORDED_ALLOCATED >> 30))
ret = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
else
ret = 0;
up_read(&UDF_I(inode)->i_data_sem);
brelse(epos.bh);
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
return udf_fixed_to_variable(ret);
else
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_1624_0 |
crossvul-cpp_data_bad_5508_0 | /* crc32.c -- compute the CRC-32 of a data stream
* Copyright (C) 1995-2006, 2010, 2011, 2012 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*
* Thanks to Rodney Brown <rbrown64@csc.com.au> for his contribution of faster
* CRC methods: exclusive-oring 32 bits of data at a time, and pre-computing
* tables for updating the shift register in one step with three exclusive-ors
* instead of four steps with four exclusive-ors. This results in about a
* factor of two increase in speed on a Power PC G4 (PPC7455) using gcc -O3.
*/
/* @(#) $Id$ */
/*
Note on the use of DYNAMIC_CRC_TABLE: there is no mutex or semaphore
protection on the static variables used to control the first-use generation
of the crc tables. Therefore, if you #define DYNAMIC_CRC_TABLE, you should
first call get_crc_table() to initialize the tables before allowing more than
one thread to use crc32().
DYNAMIC_CRC_TABLE and MAKECRCH can be #defined to write out crc32.h.
*/
#ifdef MAKECRCH
# include <stdio.h>
# ifndef DYNAMIC_CRC_TABLE
# define DYNAMIC_CRC_TABLE
# endif /* !DYNAMIC_CRC_TABLE */
#endif /* MAKECRCH */
#include "zutil.h" /* for STDC and FAR definitions */
#define local static
/* Definitions for doing the crc four data bytes at a time. */
#if !defined(NOBYFOUR) && defined(Z_U4)
# define BYFOUR
#endif
#ifdef BYFOUR
local unsigned long crc32_little OF((unsigned long,
const unsigned char FAR *, unsigned));
local unsigned long crc32_big OF((unsigned long,
const unsigned char FAR *, unsigned));
# define TBLS 8
#else
# define TBLS 1
#endif /* BYFOUR */
/* Local functions for crc concatenation */
local unsigned long gf2_matrix_times OF((unsigned long *mat,
unsigned long vec));
local void gf2_matrix_square OF((unsigned long *square, unsigned long *mat));
local uLong crc32_combine_ OF((uLong crc1, uLong crc2, z_off64_t len2));
#ifdef DYNAMIC_CRC_TABLE
local volatile int crc_table_empty = 1;
local z_crc_t FAR crc_table[TBLS][256];
local void make_crc_table OF((void));
#ifdef MAKECRCH
local void write_table OF((FILE *, const z_crc_t FAR *));
#endif /* MAKECRCH */
/*
Generate tables for a byte-wise 32-bit CRC calculation on the polynomial:
x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1.
Polynomials over GF(2) are represented in binary, one bit per coefficient,
with the lowest powers in the most significant bit. Then adding polynomials
is just exclusive-or, and multiplying a polynomial by x is a right shift by
one. If we call the above polynomial p, and represent a byte as the
polynomial q, also with the lowest power in the most significant bit (so the
byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p,
where a mod b means the remainder after dividing a by b.
This calculation is done using the shift-register method of multiplying and
taking the remainder. The register is initialized to zero, and for each
incoming bit, x^32 is added mod p to the register if the bit is a one (where
x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by
x (which is shifting right by one and adding x^32 mod p if the bit shifted
out is a one). We start with the highest power (least significant bit) of
q and repeat for all eight bits of q.
The first table is simply the CRC of all possible eight bit values. This is
all the information needed to generate CRCs on data a byte at a time for all
combinations of CRC register values and incoming bytes. The remaining tables
allow for word-at-a-time CRC calculation for both big-endian and little-
endian machines, where a word is four bytes.
*/
local void make_crc_table()
{
z_crc_t c;
int n, k;
z_crc_t poly; /* polynomial exclusive-or pattern */
/* terms of polynomial defining this crc (except x^32): */
static volatile int first = 1; /* flag to limit concurrent making */
static const unsigned char p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26};
/* See if another task is already doing this (not thread-safe, but better
than nothing -- significantly reduces duration of vulnerability in
case the advice about DYNAMIC_CRC_TABLE is ignored) */
if (first) {
first = 0;
/* make exclusive-or pattern from polynomial (0xedb88320UL) */
poly = 0;
for (n = 0; n < (int)(sizeof(p)/sizeof(unsigned char)); n++)
poly |= (z_crc_t)1 << (31 - p[n]);
/* generate a crc for every 8-bit value */
for (n = 0; n < 256; n++) {
c = (z_crc_t)n;
for (k = 0; k < 8; k++)
c = c & 1 ? poly ^ (c >> 1) : c >> 1;
crc_table[0][n] = c;
}
#ifdef BYFOUR
/* generate crc for each value followed by one, two, and three zeros,
and then the byte reversal of those as well as the first table */
for (n = 0; n < 256; n++) {
c = crc_table[0][n];
crc_table[4][n] = ZSWAP32(c);
for (k = 1; k < 4; k++) {
c = crc_table[0][c & 0xff] ^ (c >> 8);
crc_table[k][n] = c;
crc_table[k + 4][n] = ZSWAP32(c);
}
}
#endif /* BYFOUR */
crc_table_empty = 0;
}
else { /* not first */
/* wait for the other guy to finish (not efficient, but rare) */
while (crc_table_empty)
;
}
#ifdef MAKECRCH
/* write out CRC tables to crc32.h */
{
FILE *out;
out = fopen("crc32.h", "w");
if (out == NULL) return;
fprintf(out, "/* crc32.h -- tables for rapid CRC calculation\n");
fprintf(out, " * Generated automatically by crc32.c\n */\n\n");
fprintf(out, "local const z_crc_t FAR ");
fprintf(out, "crc_table[TBLS][256] =\n{\n {\n");
write_table(out, crc_table[0]);
# ifdef BYFOUR
fprintf(out, "#ifdef BYFOUR\n");
for (k = 1; k < 8; k++) {
fprintf(out, " },\n {\n");
write_table(out, crc_table[k]);
}
fprintf(out, "#endif\n");
# endif /* BYFOUR */
fprintf(out, " }\n};\n");
fclose(out);
}
#endif /* MAKECRCH */
}
#ifdef MAKECRCH
local void write_table(out, table)
FILE *out;
const z_crc_t FAR *table;
{
int n;
for (n = 0; n < 256; n++)
fprintf(out, "%s0x%08lxUL%s", n % 5 ? "" : " ",
(unsigned long)(table[n]),
n == 255 ? "\n" : (n % 5 == 4 ? ",\n" : ", "));
}
#endif /* MAKECRCH */
#else /* !DYNAMIC_CRC_TABLE */
/* ========================================================================
* Tables of CRC-32s of all single-byte values, made by make_crc_table().
*/
#include "crc32.h"
#endif /* DYNAMIC_CRC_TABLE */
/* =========================================================================
* This function can be used by asm versions of crc32()
*/
const z_crc_t FAR * ZEXPORT get_crc_table()
{
#ifdef DYNAMIC_CRC_TABLE
if (crc_table_empty)
make_crc_table();
#endif /* DYNAMIC_CRC_TABLE */
return (const z_crc_t FAR *)crc_table;
}
/* ========================================================================= */
#define DO1 crc = crc_table[0][((int)crc ^ (*buf++)) & 0xff] ^ (crc >> 8)
#define DO8 DO1; DO1; DO1; DO1; DO1; DO1; DO1; DO1
/* ========================================================================= */
unsigned long ZEXPORT crc32(crc, buf, len)
unsigned long crc;
const unsigned char FAR *buf;
uInt len;
{
if (buf == Z_NULL) return 0UL;
#ifdef DYNAMIC_CRC_TABLE
if (crc_table_empty)
make_crc_table();
#endif /* DYNAMIC_CRC_TABLE */
#ifdef BYFOUR
if (sizeof(void *) == sizeof(ptrdiff_t)) {
z_crc_t endian;
endian = 1;
if (*((unsigned char *)(&endian)))
return crc32_little(crc, buf, len);
else
return crc32_big(crc, buf, len);
}
#endif /* BYFOUR */
crc = crc ^ 0xffffffffUL;
while (len >= 8) {
DO8;
len -= 8;
}
if (len) do {
DO1;
} while (--len);
return crc ^ 0xffffffffUL;
}
#ifdef BYFOUR
/* ========================================================================= */
#define DOLIT4 c ^= *buf4++; \
c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \
crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24]
#define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4
/* ========================================================================= */
local unsigned long crc32_little(crc, buf, len)
unsigned long crc;
const unsigned char FAR *buf;
unsigned len;
{
register z_crc_t c;
register const z_crc_t FAR *buf4;
c = (z_crc_t)crc;
c = ~c;
while (len && ((ptrdiff_t)buf & 3)) {
c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8);
len--;
}
buf4 = (const z_crc_t FAR *)(const void FAR *)buf;
while (len >= 32) {
DOLIT32;
len -= 32;
}
while (len >= 4) {
DOLIT4;
len -= 4;
}
buf = (const unsigned char FAR *)buf4;
if (len) do {
c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8);
} while (--len);
c = ~c;
return (unsigned long)c;
}
/* ========================================================================= */
#define DOBIG4 c ^= *++buf4; \
c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
#define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
/* ========================================================================= */
local unsigned long crc32_big(crc, buf, len)
unsigned long crc;
const unsigned char FAR *buf;
unsigned len;
{
register z_crc_t c;
register const z_crc_t FAR *buf4;
c = ZSWAP32((z_crc_t)crc);
c = ~c;
while (len && ((ptrdiff_t)buf & 3)) {
c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8);
len--;
}
buf4 = (const z_crc_t FAR *)(const void FAR *)buf;
buf4--;
while (len >= 32) {
DOBIG32;
len -= 32;
}
while (len >= 4) {
DOBIG4;
len -= 4;
}
buf4++;
buf = (const unsigned char FAR *)buf4;
if (len) do {
c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8);
} while (--len);
c = ~c;
return (unsigned long)(ZSWAP32(c));
}
#endif /* BYFOUR */
#define GF2_DIM 32 /* dimension of GF(2) vectors (length of CRC) */
/* ========================================================================= */
local unsigned long gf2_matrix_times(mat, vec)
unsigned long *mat;
unsigned long vec;
{
unsigned long sum;
sum = 0;
while (vec) {
if (vec & 1)
sum ^= *mat;
vec >>= 1;
mat++;
}
return sum;
}
/* ========================================================================= */
local void gf2_matrix_square(square, mat)
unsigned long *square;
unsigned long *mat;
{
int n;
for (n = 0; n < GF2_DIM; n++)
square[n] = gf2_matrix_times(mat, mat[n]);
}
/* ========================================================================= */
local uLong crc32_combine_(crc1, crc2, len2)
uLong crc1;
uLong crc2;
z_off64_t len2;
{
int n;
unsigned long row;
unsigned long even[GF2_DIM]; /* even-power-of-two zeros operator */
unsigned long odd[GF2_DIM]; /* odd-power-of-two zeros operator */
/* degenerate case (also disallow negative lengths) */
if (len2 <= 0)
return crc1;
/* put operator for one zero bit in odd */
odd[0] = 0xedb88320UL; /* CRC-32 polynomial */
row = 1;
for (n = 1; n < GF2_DIM; n++) {
odd[n] = row;
row <<= 1;
}
/* put operator for two zero bits in even */
gf2_matrix_square(even, odd);
/* put operator for four zero bits in odd */
gf2_matrix_square(odd, even);
/* apply len2 zeros to crc1 (first square will put the operator for one
zero byte, eight zero bits, in even) */
do {
/* apply zeros operator for this bit of len2 */
gf2_matrix_square(even, odd);
if (len2 & 1)
crc1 = gf2_matrix_times(even, crc1);
len2 >>= 1;
/* if no more bits set, then done */
if (len2 == 0)
break;
/* another iteration of the loop with odd and even swapped */
gf2_matrix_square(odd, even);
if (len2 & 1)
crc1 = gf2_matrix_times(odd, crc1);
len2 >>= 1;
/* if no more bits set, then done */
} while (len2 != 0);
/* return combined crc */
crc1 ^= crc2;
return crc1;
}
/* ========================================================================= */
uLong ZEXPORT crc32_combine(crc1, crc2, len2)
uLong crc1;
uLong crc2;
z_off_t len2;
{
return crc32_combine_(crc1, crc2, len2);
}
uLong ZEXPORT crc32_combine64(crc1, crc2, len2)
uLong crc1;
uLong crc2;
z_off64_t len2;
{
return crc32_combine_(crc1, crc2, len2);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5508_0 |
crossvul-cpp_data_good_5640_0 | /*
* Performance events core code:
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* For licensing details see kernel-base/COPYING
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/idr.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/sysfs.h>
#include <linux/dcache.h>
#include <linux/percpu.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
#include <linux/vmstat.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
#include <linux/ftrace_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm_types.h>
#include "internal.h"
#include <asm/irq_regs.h>
struct remote_function_call {
struct task_struct *p;
int (*func)(void *info);
void *info;
int ret;
};
static void remote_function(void *data)
{
struct remote_function_call *tfc = data;
struct task_struct *p = tfc->p;
if (p) {
tfc->ret = -EAGAIN;
if (task_cpu(p) != smp_processor_id() || !task_curr(p))
return;
}
tfc->ret = tfc->func(tfc->info);
}
/**
* task_function_call - call a function on the cpu on which a task runs
* @p: the task to evaluate
* @func: the function to be called
* @info: the function call argument
*
* Calls the function @func when the task is currently running. This might
* be on the current CPU, which just calls the function directly
*
* returns: @func return value, or
* -ESRCH - when the process isn't running
* -EAGAIN - when the process moved away
*/
static int
task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
{
struct remote_function_call data = {
.p = p,
.func = func,
.info = info,
.ret = -ESRCH, /* No such (running) process */
};
if (task_curr(p))
smp_call_function_single(task_cpu(p), remote_function, &data, 1);
return data.ret;
}
/**
* cpu_function_call - call a function on the cpu
* @func: the function to be called
* @info: the function call argument
*
* Calls the function @func on the remote cpu.
*
* returns: @func return value or -ENXIO when the cpu is offline
*/
static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
{
struct remote_function_call data = {
.p = NULL,
.func = func,
.info = info,
.ret = -ENXIO, /* No such CPU */
};
smp_call_function_single(cpu, remote_function, &data, 1);
return data.ret;
}
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
PERF_FLAG_FD_OUTPUT |\
PERF_FLAG_PID_CGROUP)
/*
* branch priv levels that need permission checks
*/
#define PERF_SAMPLE_BRANCH_PERM_PLM \
(PERF_SAMPLE_BRANCH_KERNEL |\
PERF_SAMPLE_BRANCH_HV)
enum event_type_t {
EVENT_FLEXIBLE = 0x1,
EVENT_PINNED = 0x2,
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};
/*
* perf_sched_events : >0 events exist
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
*/
struct static_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
/*
* max perf event sample rate
*/
#define DEFAULT_MAX_SAMPLE_RATE 100000
int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
static int max_samples_per_tick __read_mostly =
DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
int perf_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
return 0;
}
static atomic64_t perf_event_id;
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type);
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task);
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
static void ring_buffer_attach(struct perf_event *event,
struct ring_buffer *rb);
void __weak perf_event_print_debug(void) { }
extern __weak const char *perf_pmu_name(void)
{
return "pmu";
}
static inline u64 perf_clock(void)
{
return local_clock();
}
static inline struct perf_cpu_context *
__get_cpu_context(struct perf_event_context *ctx)
{
return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
}
static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
raw_spin_lock(&cpuctx->ctx.lock);
if (ctx)
raw_spin_lock(&ctx->lock);
}
static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
if (ctx)
raw_spin_unlock(&ctx->lock);
raw_spin_unlock(&cpuctx->ctx.lock);
}
#ifdef CONFIG_CGROUP_PERF
/*
* Must ensure cgroup is pinned (css_get) before calling
* this function. In other words, we cannot call this function
* if there is no cgroup event for the current CPU context.
*/
static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task)
{
return container_of(task_subsys_state(task, perf_subsys_id),
struct perf_cgroup, css);
}
static inline bool
perf_cgroup_match(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
return !event->cgrp || event->cgrp == cpuctx->cgrp;
}
static inline bool perf_tryget_cgroup(struct perf_event *event)
{
return css_tryget(&event->cgrp->css);
}
static inline void perf_put_cgroup(struct perf_event *event)
{
css_put(&event->cgrp->css);
}
static inline void perf_detach_cgroup(struct perf_event *event)
{
perf_put_cgroup(event);
event->cgrp = NULL;
}
static inline int is_cgroup_event(struct perf_event *event)
{
return event->cgrp != NULL;
}
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
return t->time;
}
static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
{
struct perf_cgroup_info *info;
u64 now;
now = perf_clock();
info = this_cpu_ptr(cgrp->info);
info->time += now - info->timestamp;
info->timestamp = now;
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
struct perf_cgroup *cgrp_out = cpuctx->cgrp;
if (cgrp_out)
__update_cgrp_time(cgrp_out);
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
struct perf_cgroup *cgrp;
/*
* ensure we access cgroup data only when needed and
* when we know the cgroup is pinned (css_get)
*/
if (!is_cgroup_event(event))
return;
cgrp = perf_cgroup_from_task(current);
/*
* Do not update time when cgroup is not active
*/
if (cgrp == event->cgrp)
__update_cgrp_time(event->cgrp);
}
static inline void
perf_cgroup_set_timestamp(struct task_struct *task,
struct perf_event_context *ctx)
{
struct perf_cgroup *cgrp;
struct perf_cgroup_info *info;
/*
* ctx->lock held by caller
* ensure we do not access cgroup data
* unless we have the cgroup pinned (css_get)
*/
if (!task || !ctx->nr_cgroups)
return;
cgrp = perf_cgroup_from_task(task);
info = this_cpu_ptr(cgrp->info);
info->timestamp = ctx->timestamp;
}
#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
/*
* reschedule events based on the cgroup constraint of task.
*
* mode SWOUT : schedule out everything
* mode SWIN : schedule in based on cgroup for next
*/
void perf_cgroup_switch(struct task_struct *task, int mode)
{
struct perf_cpu_context *cpuctx;
struct pmu *pmu;
unsigned long flags;
/*
* disable interrupts to avoid geting nr_cgroup
* changes via __perf_event_disable(). Also
* avoids preemption.
*/
local_irq_save(flags);
/*
* we reschedule only in the presence of cgroup
* constrained events.
*/
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
continue; /* ensure we process each cpuctx once */
/*
* perf_cgroup_events says at least one
* context on this CPU has cgroup events.
*
* ctx->nr_cgroups reports the number of cgroup
* events for a context.
*/
if (cpuctx->ctx.nr_cgroups > 0) {
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
if (mode & PERF_CGROUP_SWOUT) {
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
/*
* must not be done before ctxswout due
* to event_filter_match() in event_sched_out()
*/
cpuctx->cgrp = NULL;
}
if (mode & PERF_CGROUP_SWIN) {
WARN_ON_ONCE(cpuctx->cgrp);
/*
* set cgrp before ctxsw in to allow
* event_filter_match() to not have to pass
* task around
*/
cpuctx->cgrp = perf_cgroup_from_task(task);
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
}
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
}
rcu_read_unlock();
local_irq_restore(flags);
}
static inline void perf_cgroup_sched_out(struct task_struct *task,
struct task_struct *next)
{
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
/*
* we come here when we know perf_cgroup_events > 0
*/
cgrp1 = perf_cgroup_from_task(task);
/*
* next is NULL when called from perf_event_enable_on_exec()
* that will systematically cause a cgroup_switch()
*/
if (next)
cgrp2 = perf_cgroup_from_task(next);
/*
* only schedule out current cgroup events if we know
* that we are switching to a different cgroup. Otherwise,
* do no touch the cgroup events.
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
}
static inline void perf_cgroup_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
/*
* we come here when we know perf_cgroup_events > 0
*/
cgrp1 = perf_cgroup_from_task(task);
/* prev can never be NULL */
cgrp2 = perf_cgroup_from_task(prev);
/*
* only need to schedule in cgroup events if we are changing
* cgroup during ctxsw. Cgroup events were not scheduled
* out of ctxsw out if that was not the case.
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
}
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
{
struct perf_cgroup *cgrp;
struct cgroup_subsys_state *css;
struct fd f = fdget(fd);
int ret = 0;
if (!f.file)
return -EBADF;
css = cgroup_css_from_dir(f.file, perf_subsys_id);
if (IS_ERR(css)) {
ret = PTR_ERR(css);
goto out;
}
cgrp = container_of(css, struct perf_cgroup, css);
event->cgrp = cgrp;
/* must be done before we fput() the file */
if (!perf_tryget_cgroup(event)) {
event->cgrp = NULL;
ret = -ENOENT;
goto out;
}
/*
* all events in a group must monitor
* the same cgroup because a task belongs
* to only one perf cgroup at a time
*/
if (group_leader && group_leader->cgrp != cgrp) {
perf_detach_cgroup(event);
ret = -EINVAL;
}
out:
fdput(f);
return ret;
}
static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
event->shadow_ctx_time = now - t->timestamp;
}
static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
/*
* when the current task's perf cgroup does not match
* the event's, we need to remember to call the
* perf_mark_enable() function the first time a task with
* a matching perf cgroup is scheduled in.
*/
if (is_cgroup_event(event) && !perf_cgroup_match(event))
event->cgrp_defer_enabled = 1;
}
static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
struct perf_event_context *ctx)
{
struct perf_event *sub;
u64 tstamp = perf_event_time(event);
if (!event->cgrp_defer_enabled)
return;
event->cgrp_defer_enabled = 0;
event->tstamp_enabled = tstamp - event->total_time_enabled;
list_for_each_entry(sub, &event->sibling_list, group_entry) {
if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
sub->tstamp_enabled = tstamp - sub->total_time_enabled;
sub->cgrp_defer_enabled = 0;
}
}
}
#else /* !CONFIG_CGROUP_PERF */
static inline bool
perf_cgroup_match(struct perf_event *event)
{
return true;
}
static inline void perf_detach_cgroup(struct perf_event *event)
{}
static inline int is_cgroup_event(struct perf_event *event)
{
return 0;
}
static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
{
return 0;
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
}
static inline void perf_cgroup_sched_out(struct task_struct *task,
struct task_struct *next)
{
}
static inline void perf_cgroup_sched_in(struct task_struct *prev,
struct task_struct *task)
{
}
static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
{
return -EINVAL;
}
static inline void
perf_cgroup_set_timestamp(struct task_struct *task,
struct perf_event_context *ctx)
{
}
void
perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
{
}
static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
}
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
return 0;
}
static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
}
static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
struct perf_event_context *ctx)
{
}
#endif
void perf_pmu_disable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
if (!(*count)++)
pmu->pmu_disable(pmu);
}
void perf_pmu_enable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
if (!--(*count))
pmu->pmu_enable(pmu);
}
static DEFINE_PER_CPU(struct list_head, rotation_list);
/*
* perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
* because they're strictly cpu affine and rotate_start is called with IRQs
* disabled, while rotate_context is called from IRQ context.
*/
static void perf_pmu_rotate_start(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
struct list_head *head = &__get_cpu_var(rotation_list);
WARN_ON(!irqs_disabled());
if (list_empty(&cpuctx->rotation_list))
list_add(&cpuctx->rotation_list, head);
}
static void get_ctx(struct perf_event_context *ctx)
{
WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
}
static void put_ctx(struct perf_event_context *ctx)
{
if (atomic_dec_and_test(&ctx->refcount)) {
if (ctx->parent_ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task)
put_task_struct(ctx->task);
kfree_rcu(ctx, rcu_head);
}
}
static void unclone_ctx(struct perf_event_context *ctx)
{
if (ctx->parent_ctx) {
put_ctx(ctx->parent_ctx);
ctx->parent_ctx = NULL;
}
}
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
{
/*
* only top level events have the pid namespace they were created in
*/
if (event->parent)
event = event->parent;
return task_tgid_nr_ns(p, event->ns);
}
static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
{
/*
* only top level events have the pid namespace they were created in
*/
if (event->parent)
event = event->parent;
return task_pid_nr_ns(p, event->ns);
}
/*
* If we inherit events we want to return the parent event id
* to userspace.
*/
static u64 primary_event_id(struct perf_event *event)
{
u64 id = event->id;
if (event->parent)
id = event->parent->id;
return id;
}
/*
* Get the perf_event_context for a task and lock it.
* This has to cope with with the fact that until it is locked,
* the context could get moved to another task.
*/
static struct perf_event_context *
perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
{
struct perf_event_context *ctx;
rcu_read_lock();
retry:
ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
if (ctx) {
/*
* If this context is a clone of another, it might
* get swapped for another underneath us by
* perf_event_task_sched_out, though the
* rcu_read_lock() protects us from any context
* getting freed. Lock the context and check if it
* got swapped before we could get the lock, and retry
* if so. If we locked the right context, then it
* can't get swapped on us any more.
*/
raw_spin_lock_irqsave(&ctx->lock, *flags);
if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
raw_spin_unlock_irqrestore(&ctx->lock, *flags);
goto retry;
}
if (!atomic_inc_not_zero(&ctx->refcount)) {
raw_spin_unlock_irqrestore(&ctx->lock, *flags);
ctx = NULL;
}
}
rcu_read_unlock();
return ctx;
}
/*
* Get the context for a task and increment its pin_count so it
* can't get swapped to another task. This also increments its
* reference count so that the context can't get freed.
*/
static struct perf_event_context *
perf_pin_task_context(struct task_struct *task, int ctxn)
{
struct perf_event_context *ctx;
unsigned long flags;
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return ctx;
}
static void perf_unpin_context(struct perf_event_context *ctx)
{
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
--ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
/*
* Update the record of the current time in a context.
*/
static void update_context_time(struct perf_event_context *ctx)
{
u64 now = perf_clock();
ctx->time += now - ctx->timestamp;
ctx->timestamp = now;
}
static u64 perf_event_time(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
if (is_cgroup_event(event))
return perf_cgroup_event_time(event);
return ctx ? ctx->time : 0;
}
/*
* Update the total_time_enabled and total_time_running fields for a event.
* The caller of this function needs to hold the ctx->lock.
*/
static void update_event_times(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
u64 run_end;
if (event->state < PERF_EVENT_STATE_INACTIVE ||
event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
return;
/*
* in cgroup mode, time_enabled represents
* the time the event was enabled AND active
* tasks were in the monitored cgroup. This is
* independent of the activity of the context as
* there may be a mix of cgroup and non-cgroup events.
*
* That is why we treat cgroup events differently
* here.
*/
if (is_cgroup_event(event))
run_end = perf_cgroup_event_time(event);
else if (ctx->is_active)
run_end = ctx->time;
else
run_end = event->tstamp_stopped;
event->total_time_enabled = run_end - event->tstamp_enabled;
if (event->state == PERF_EVENT_STATE_INACTIVE)
run_end = event->tstamp_stopped;
else
run_end = perf_event_time(event);
event->total_time_running = run_end - event->tstamp_running;
}
/*
* Update total_time_enabled and total_time_running for all events in a group.
*/
static void update_group_times(struct perf_event *leader)
{
struct perf_event *event;
update_event_times(leader);
list_for_each_entry(event, &leader->sibling_list, group_entry)
update_event_times(event);
}
static struct list_head *
ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
{
if (event->attr.pinned)
return &ctx->pinned_groups;
else
return &ctx->flexible_groups;
}
/*
* Add a event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
{
WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
event->attach_state |= PERF_ATTACH_CONTEXT;
/*
* If we're a stand alone event or group leader, we go to the context
* list, group events are kept attached to the group so that
* perf_group_detach can, at all times, locate all siblings.
*/
if (event->group_leader == event) {
struct list_head *list;
if (is_software_event(event))
event->group_flags |= PERF_GROUP_SOFTWARE;
list = ctx_group_list(event, ctx);
list_add_tail(&event->group_entry, list);
}
if (is_cgroup_event(event))
ctx->nr_cgroups++;
if (has_branch_stack(event))
ctx->nr_branch_stack++;
list_add_rcu(&event->event_entry, &ctx->event_list);
if (!ctx->nr_events)
perf_pmu_rotate_start(ctx->pmu);
ctx->nr_events++;
if (event->attr.inherit_stat)
ctx->nr_stat++;
}
/*
* Initialize event state based on the perf_event_attr::disabled.
*/
static inline void perf_event__state_init(struct perf_event *event)
{
event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
PERF_EVENT_STATE_INACTIVE;
}
/*
* Called at perf_event creation and when events are attached/detached from a
* group.
*/
static void perf_event__read_size(struct perf_event *event)
{
int entry = sizeof(u64); /* value */
int size = 0;
int nr = 1;
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_GROUP) {
nr += event->group_leader->nr_siblings;
size += sizeof(u64);
}
size += entry * nr;
event->read_size = size;
}
static void perf_event__header_size(struct perf_event *event)
{
struct perf_sample_data *data;
u64 sample_type = event->attr.sample_type;
u16 size = 0;
perf_event__read_size(event);
if (sample_type & PERF_SAMPLE_IP)
size += sizeof(data->ip);
if (sample_type & PERF_SAMPLE_ADDR)
size += sizeof(data->addr);
if (sample_type & PERF_SAMPLE_PERIOD)
size += sizeof(data->period);
if (sample_type & PERF_SAMPLE_READ)
size += event->read_size;
event->header_size = size;
}
static void perf_event__id_header_size(struct perf_event *event)
{
struct perf_sample_data *data;
u64 sample_type = event->attr.sample_type;
u16 size = 0;
if (sample_type & PERF_SAMPLE_TID)
size += sizeof(data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
size += sizeof(data->time);
if (sample_type & PERF_SAMPLE_ID)
size += sizeof(data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
size += sizeof(data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
size += sizeof(data->cpu_entry);
event->id_header_size = size;
}
static void perf_group_attach(struct perf_event *event)
{
struct perf_event *group_leader = event->group_leader, *pos;
/*
* We can have double attach due to group movement in perf_event_open.
*/
if (event->attach_state & PERF_ATTACH_GROUP)
return;
event->attach_state |= PERF_ATTACH_GROUP;
if (group_leader == event)
return;
if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
!is_software_event(event))
group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
list_add_tail(&event->group_entry, &group_leader->sibling_list);
group_leader->nr_siblings++;
perf_event__header_size(group_leader);
list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
perf_event__header_size(pos);
}
/*
* Remove a event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx;
/*
* We can have double detach due to exit/hot-unplug + close.
*/
if (!(event->attach_state & PERF_ATTACH_CONTEXT))
return;
event->attach_state &= ~PERF_ATTACH_CONTEXT;
if (is_cgroup_event(event)) {
ctx->nr_cgroups--;
cpuctx = __get_cpu_context(ctx);
/*
* if there are no more cgroup events
* then cler cgrp to avoid stale pointer
* in update_cgrp_time_from_cpuctx()
*/
if (!ctx->nr_cgroups)
cpuctx->cgrp = NULL;
}
if (has_branch_stack(event))
ctx->nr_branch_stack--;
ctx->nr_events--;
if (event->attr.inherit_stat)
ctx->nr_stat--;
list_del_rcu(&event->event_entry);
if (event->group_leader == event)
list_del_init(&event->group_entry);
update_group_times(event);
/*
* If event was in error state, then keep it
* that way, otherwise bogus counts will be
* returned on read(). The only way to get out
* of error state is by explicit re-enabling
* of the event
*/
if (event->state > PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_OFF;
}
static void perf_group_detach(struct perf_event *event)
{
struct perf_event *sibling, *tmp;
struct list_head *list = NULL;
/*
* We can have double detach due to exit/hot-unplug + close.
*/
if (!(event->attach_state & PERF_ATTACH_GROUP))
return;
event->attach_state &= ~PERF_ATTACH_GROUP;
/*
* If this is a sibling, remove it from its group.
*/
if (event->group_leader != event) {
list_del_init(&event->group_entry);
event->group_leader->nr_siblings--;
goto out;
}
if (!list_empty(&event->group_entry))
list = &event->group_entry;
/*
* If this was a group event with sibling events then
* upgrade the siblings to singleton events by adding them
* to whatever list we are on.
*/
list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
if (list)
list_move_tail(&sibling->group_entry, list);
sibling->group_leader = sibling;
/* Inherit group flags from the previous leader */
sibling->group_flags = event->group_flags;
}
out:
perf_event__header_size(event->group_leader);
list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
perf_event__header_size(tmp);
}
static inline int
event_filter_match(struct perf_event *event)
{
return (event->cpu == -1 || event->cpu == smp_processor_id())
&& perf_cgroup_match(event);
}
static void
event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
u64 delta;
/*
* An event which could not be activated because of
* filter mismatch still needs to have its timings
* maintained, otherwise bogus information is return
* via read() for time_enabled, time_running:
*/
if (event->state == PERF_EVENT_STATE_INACTIVE
&& !event_filter_match(event)) {
delta = tstamp - event->tstamp_stopped;
event->tstamp_running += delta;
event->tstamp_stopped = tstamp;
}
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) {
event->pending_disable = 0;
event->state = PERF_EVENT_STATE_OFF;
}
event->tstamp_stopped = tstamp;
event->pmu->del(event, 0);
event->oncpu = -1;
if (!is_software_event(event))
cpuctx->active_oncpu--;
ctx->nr_active--;
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
}
static void
group_sched_out(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event;
int state = group_event->state;
event_sched_out(group_event, cpuctx, ctx);
/*
* Schedule out siblings (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry)
event_sched_out(event, cpuctx, ctx);
if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
cpuctx->exclusive = 0;
}
/*
* Cross CPU call to remove a performance event
*
* We disable the event on the hardware level first. After that we
* remove it from the context list.
*/
static int __perf_remove_from_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
raw_spin_lock(&ctx->lock);
event_sched_out(event, cpuctx, ctx);
list_del_event(event, ctx);
if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
ctx->is_active = 0;
cpuctx->task_ctx = NULL;
}
raw_spin_unlock(&ctx->lock);
return 0;
}
/*
* Remove the event from a task's (or a CPU's) list of events.
*
* CPU events are removed with a smp call. For task events we only
* call when the task is on a CPU.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This is OK when called from perf_release since
* that only calls us on the top-level context, which can't be a clone.
* When called from perf_event_exit_task, it's OK because the
* context has been detached from its task.
*/
static void perf_remove_from_context(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
lockdep_assert_held(&ctx->mutex);
if (!task) {
/*
* Per cpu events are removed via an smp call and
* the removal is always successful.
*/
cpu_function_call(event->cpu, __perf_remove_from_context, event);
return;
}
retry:
if (!task_function_call(task, __perf_remove_from_context, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If we failed to find a running task, but find the context active now
* that we've acquired the ctx->lock, retry.
*/
if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
/*
* Since the task isn't running, its safe to remove the event, us
* holding the ctx->lock ensures the task won't get scheduled in.
*/
list_del_event(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
}
/*
* Cross CPU call to disable a performance event
*/
int __perf_event_disable(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
/*
* If this is a per-task event, need to check whether this
* event's task is the current task on this cpu.
*
* Can trigger due to concurrent perf_event_context_sched_out()
* flipping contexts around.
*/
if (ctx->task && cpuctx->task_ctx != ctx)
return -EINVAL;
raw_spin_lock(&ctx->lock);
/*
* If the event is on, turn it off.
* If it is in error state, leave it in error state.
*/
if (event->state >= PERF_EVENT_STATE_INACTIVE) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
update_group_times(event);
if (event == event->group_leader)
group_sched_out(event, cpuctx, ctx);
else
event_sched_out(event, cpuctx, ctx);
event->state = PERF_EVENT_STATE_OFF;
}
raw_spin_unlock(&ctx->lock);
return 0;
}
/*
* Disable a event.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This condition is satisifed when called through
* perf_event_for_each_child or perf_event_for_each because they
* hold the top-level event's child_mutex, so any descendant that
* goes to exit will block in sync_child_event.
* When called from perf_pending_event it's OK because event->ctx
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
void perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) {
/*
* Disable the event on the cpu that it's on
*/
cpu_function_call(event->cpu, __perf_event_disable, event);
return;
}
retry:
if (!task_function_call(task, __perf_event_disable, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If the event is still active, we need to retry the cross-call.
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
raw_spin_unlock_irq(&ctx->lock);
/*
* Reload the task pointer, it might have been changed by
* a concurrent perf_event_context_sched_out().
*/
task = ctx->task;
goto retry;
}
/*
* Since we have the lock this context can't be scheduled
* in, so we can change the state safely.
*/
if (event->state == PERF_EVENT_STATE_INACTIVE) {
update_group_times(event);
event->state = PERF_EVENT_STATE_OFF;
}
raw_spin_unlock_irq(&ctx->lock);
}
EXPORT_SYMBOL_GPL(perf_event_disable);
static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx,
u64 tstamp)
{
/*
* use the correct time source for the time snapshot
*
* We could get by without this by leveraging the
* fact that to get to this function, the caller
* has most likely already called update_context_time()
* and update_cgrp_time_xx() and thus both timestamp
* are identical (or very close). Given that tstamp is,
* already adjusted for cgroup, we could say that:
* tstamp - ctx->timestamp
* is equivalent to
* tstamp - cgrp->timestamp.
*
* Then, in perf_output_read(), the calculation would
* work with no changes because:
* - event is guaranteed scheduled in
* - no scheduled out in between
* - thus the timestamp would be the same
*
* But this is a bit hairy.
*
* So instead, we have an explicit cgroup call to remain
* within the time time source all along. We believe it
* is cleaner and simpler to understand.
*/
if (is_cgroup_event(event))
perf_cgroup_set_shadow_time(event, tstamp);
else
event->shadow_ctx_time = tstamp - ctx->timestamp;
}
#define MAX_INTERRUPTS (~0ULL)
static void perf_log_throttle(struct perf_event *event, int enable);
static int
event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
event->state = PERF_EVENT_STATE_ACTIVE;
event->oncpu = smp_processor_id();
/*
* Unthrottle events, since we scheduled we might have missed several
* ticks already, also for a heavily scheduling task there is little
* guarantee it'll get a tick in a timely manner.
*/
if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
perf_log_throttle(event, 1);
event->hw.interrupts = 0;
}
/*
* The new state must be visible before we turn it on in the hardware:
*/
smp_wmb();
if (event->pmu->add(event, PERF_EF_START)) {
event->state = PERF_EVENT_STATE_INACTIVE;
event->oncpu = -1;
return -EAGAIN;
}
event->tstamp_running += tstamp - event->tstamp_stopped;
perf_set_shadow_time(event, ctx, tstamp);
if (!is_software_event(event))
cpuctx->active_oncpu++;
ctx->nr_active++;
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq++;
if (event->attr.exclusive)
cpuctx->exclusive = 1;
return 0;
}
static int
group_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event, *partial_group = NULL;
struct pmu *pmu = group_event->pmu;
u64 now = ctx->time;
bool simulate = false;
if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
pmu->start_txn(pmu);
if (event_sched_in(group_event, cpuctx, ctx)) {
pmu->cancel_txn(pmu);
return -EAGAIN;
}
/*
* Schedule in siblings as one group (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event_sched_in(event, cpuctx, ctx)) {
partial_group = event;
goto group_error;
}
}
if (!pmu->commit_txn(pmu))
return 0;
group_error:
/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
* The events up to the failed event are scheduled out normally,
* tstamp_stopped will be updated.
*
* The failed events and the remaining siblings need to have
* their timings updated as if they had gone thru event_sched_in()
* and event_sched_out(). This is required to get consistent timings
* across the group. This also takes care of the case where the group
* could never be scheduled by ensuring tstamp_stopped is set to mark
* the time the event was actually stopped, such that time delta
* calculation in update_event_times() is correct.
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event == partial_group)
simulate = true;
if (simulate) {
event->tstamp_running += now - event->tstamp_stopped;
event->tstamp_stopped = now;
} else {
event_sched_out(event, cpuctx, ctx);
}
}
event_sched_out(group_event, cpuctx, ctx);
pmu->cancel_txn(pmu);
return -EAGAIN;
}
/*
* Work out whether we can put this event group on the CPU now.
*/
static int group_can_go_on(struct perf_event *event,
struct perf_cpu_context *cpuctx,
int can_add_hw)
{
/*
* Groups consisting entirely of software events can always go on.
*/
if (event->group_flags & PERF_GROUP_SOFTWARE)
return 1;
/*
* If an exclusive group is already on, no other hardware
* events can go on.
*/
if (cpuctx->exclusive)
return 0;
/*
* If this group is exclusive and there are already
* events on the CPU, it can't go on.
*/
if (event->attr.exclusive && cpuctx->active_oncpu)
return 0;
/*
* Otherwise, try to add it if all previous groups were able
* to go on.
*/
return can_add_hw;
}
static void add_event_to_ctx(struct perf_event *event,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
list_add_event(event, ctx);
perf_group_attach(event);
event->tstamp_enabled = tstamp;
event->tstamp_running = tstamp;
event->tstamp_stopped = tstamp;
}
static void task_ctx_sched_out(struct perf_event_context *ctx);
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task);
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
struct task_struct *task)
{
cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
if (ctx)
ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
if (ctx)
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
}
/*
* Cross CPU call to install and enable a performance event
*
* Must be called with ctx->mutex held
*/
static int __perf_install_in_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
struct task_struct *task = current;
perf_ctx_lock(cpuctx, task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
/*
* If there was an active task_ctx schedule it out.
*/
if (task_ctx)
task_ctx_sched_out(task_ctx);
/*
* If the context we're installing events in is not the
* active task_ctx, flip them.
*/
if (ctx->task && task_ctx != ctx) {
if (task_ctx)
raw_spin_unlock(&task_ctx->lock);
raw_spin_lock(&ctx->lock);
task_ctx = ctx;
}
if (task_ctx) {
cpuctx->task_ctx = task_ctx;
task = task_ctx->task;
}
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
update_context_time(ctx);
/*
* update cgrp time only if current cgrp
* matches event->cgrp. Must be done before
* calling add_event_to_ctx()
*/
update_cgrp_time_from_event(event);
add_event_to_ctx(event, ctx);
/*
* Schedule everything back in
*/
perf_event_sched_in(cpuctx, task_ctx, task);
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, task_ctx);
return 0;
}
/*
* Attach a performance event to a context
*
* First we add the event to the list with the hardware enable bit
* in event->hw_config cleared.
*
* If the event is attached to a task which is on a CPU we use a smp
* call to enable it in the task context. The task might have been
* scheduled away, but we check this in the smp call again.
*/
static void
perf_install_in_context(struct perf_event_context *ctx,
struct perf_event *event,
int cpu)
{
struct task_struct *task = ctx->task;
lockdep_assert_held(&ctx->mutex);
event->ctx = ctx;
if (event->cpu != -1)
event->cpu = cpu;
if (!task) {
/*
* Per cpu events are installed via an smp call and
* the install is always successful.
*/
cpu_function_call(cpu, __perf_install_in_context, event);
return;
}
retry:
if (!task_function_call(task, __perf_install_in_context, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If we failed to find a running task, but find the context active now
* that we've acquired the ctx->lock, retry.
*/
if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
/*
* Since the task isn't running, its safe to add the event, us holding
* the ctx->lock ensures the task won't get scheduled in.
*/
add_event_to_ctx(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
}
/*
* Put a event into inactive state and update time fields.
* Enabling the leader of a group effectively enables all
* the group members that aren't explicitly disabled, so we
* have to update their ->tstamp_enabled also.
* Note: this works for group members as well as group leaders
* since the non-leader members' sibling_lists will be empty.
*/
static void __perf_event_mark_enabled(struct perf_event *event)
{
struct perf_event *sub;
u64 tstamp = perf_event_time(event);
event->state = PERF_EVENT_STATE_INACTIVE;
event->tstamp_enabled = tstamp - event->total_time_enabled;
list_for_each_entry(sub, &event->sibling_list, group_entry) {
if (sub->state >= PERF_EVENT_STATE_INACTIVE)
sub->tstamp_enabled = tstamp - sub->total_time_enabled;
}
}
/*
* Cross CPU call to enable a performance event
*/
static int __perf_event_enable(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_event *leader = event->group_leader;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
int err;
if (WARN_ON_ONCE(!ctx->is_active))
return -EINVAL;
raw_spin_lock(&ctx->lock);
update_context_time(ctx);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto unlock;
/*
* set current task's cgroup time reference point
*/
perf_cgroup_set_timestamp(current, ctx);
__perf_event_mark_enabled(event);
if (!event_filter_match(event)) {
if (is_cgroup_event(event))
perf_cgroup_defer_enabled(event);
goto unlock;
}
/*
* If the event is in a group and isn't the group leader,
* then don't put it on unless the group is on.
*/
if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
goto unlock;
if (!group_can_go_on(event, cpuctx, 1)) {
err = -EEXIST;
} else {
if (event == leader)
err = group_sched_in(event, cpuctx, ctx);
else
err = event_sched_in(event, cpuctx, ctx);
}
if (err) {
/*
* If this event can't go on and it's part of a
* group, then the whole group has to come off.
*/
if (leader != event)
group_sched_out(leader, cpuctx, ctx);
if (leader->attr.pinned) {
update_group_times(leader);
leader->state = PERF_EVENT_STATE_ERROR;
}
}
unlock:
raw_spin_unlock(&ctx->lock);
return 0;
}
/*
* Enable a event.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This condition is satisfied when called through
* perf_event_for_each_child or perf_event_for_each as described
* for perf_event_disable.
*/
void perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) {
/*
* Enable the event on the cpu that it's on
*/
cpu_function_call(event->cpu, __perf_event_enable, event);
return;
}
raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto out;
/*
* If the event is in error state, clear that first.
* That way, if we see the event in error state below, we
* know that it has gone back into error state, as distinct
* from the task having been scheduled away before the
* cross-call arrived.
*/
if (event->state == PERF_EVENT_STATE_ERROR)
event->state = PERF_EVENT_STATE_OFF;
retry:
if (!ctx->is_active) {
__perf_event_mark_enabled(event);
goto out;
}
raw_spin_unlock_irq(&ctx->lock);
if (!task_function_call(task, __perf_event_enable, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active and the event is still off,
* we need to retry the cross-call.
*/
if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
/*
* task could have been flipped by a concurrent
* perf_event_context_sched_out()
*/
task = ctx->task;
goto retry;
}
out:
raw_spin_unlock_irq(&ctx->lock);
}
EXPORT_SYMBOL_GPL(perf_event_enable);
int perf_event_refresh(struct perf_event *event, int refresh)
{
/*
* not supported on inherited events
*/
if (event->attr.inherit || !is_sampling_event(event))
return -EINVAL;
atomic_add(refresh, &event->event_limit);
perf_event_enable(event);
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_refresh);
static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
struct perf_event *event;
int is_active = ctx->is_active;
ctx->is_active &= ~event_type;
if (likely(!ctx->nr_events))
return;
update_context_time(ctx);
update_cgrp_time_from_cpuctx(cpuctx);
if (!ctx->nr_active)
return;
perf_pmu_disable(ctx->pmu);
if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
list_for_each_entry(event, &ctx->pinned_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
list_for_each_entry(event, &ctx->flexible_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
perf_pmu_enable(ctx->pmu);
}
/*
* Test whether two contexts are equivalent, i.e. whether they
* have both been cloned from the same version of the same context
* and they both have the same number of enabled events.
* If the number of enabled events is the same, then the set
* of enabled events should be the same, because these are both
* inherited contexts, therefore we can't access individual events
* in them directly with an fd; we can only enable/disable all
* events via prctl, or enable/disable all events in a family
* via ioctl, which will have the same effect on both contexts.
*/
static int context_equiv(struct perf_event_context *ctx1,
struct perf_event_context *ctx2)
{
return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
&& ctx1->parent_gen == ctx2->parent_gen
&& !ctx1->pin_count && !ctx2->pin_count;
}
static void __perf_event_sync_stat(struct perf_event *event,
struct perf_event *next_event)
{
u64 value;
if (!event->attr.inherit_stat)
return;
/*
* Update the event value, we cannot use perf_event_read()
* because we're in the middle of a context switch and have IRQs
* disabled, which upsets smp_call_function_single(), however
* we know the event must be on the current CPU, therefore we
* don't need to use it.
*/
switch (event->state) {
case PERF_EVENT_STATE_ACTIVE:
event->pmu->read(event);
/* fall-through */
case PERF_EVENT_STATE_INACTIVE:
update_event_times(event);
break;
default:
break;
}
/*
* In order to keep per-task stats reliable we need to flip the event
* values when we flip the contexts.
*/
value = local64_read(&next_event->count);
value = local64_xchg(&event->count, value);
local64_set(&next_event->count, value);
swap(event->total_time_enabled, next_event->total_time_enabled);
swap(event->total_time_running, next_event->total_time_running);
/*
* Since we swizzled the values, update the user visible data too.
*/
perf_event_update_userpage(event);
perf_event_update_userpage(next_event);
}
#define list_next_entry(pos, member) \
list_entry(pos->member.next, typeof(*pos), member)
static void perf_event_sync_stat(struct perf_event_context *ctx,
struct perf_event_context *next_ctx)
{
struct perf_event *event, *next_event;
if (!ctx->nr_stat)
return;
update_context_time(ctx);
event = list_first_entry(&ctx->event_list,
struct perf_event, event_entry);
next_event = list_first_entry(&next_ctx->event_list,
struct perf_event, event_entry);
while (&event->event_entry != &ctx->event_list &&
&next_event->event_entry != &next_ctx->event_list) {
__perf_event_sync_stat(event, next_event);
event = list_next_entry(event, event_entry);
next_event = list_next_entry(next_event, event_entry);
}
}
static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
struct task_struct *next)
{
struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
struct perf_event_context *next_ctx;
struct perf_event_context *parent;
struct perf_cpu_context *cpuctx;
int do_switch = 1;
if (likely(!ctx))
return;
cpuctx = __get_cpu_context(ctx);
if (!cpuctx->task_ctx)
return;
rcu_read_lock();
parent = rcu_dereference(ctx->parent_ctx);
next_ctx = next->perf_event_ctxp[ctxn];
if (parent && next_ctx &&
rcu_dereference(next_ctx->parent_ctx) == parent) {
/*
* Looks like the two contexts are clones, so we might be
* able to optimize the context switch. We lock both
* contexts and check that they are clones under the
* lock (including re-checking that neither has been
* uncloned in the meantime). It doesn't matter which
* order we take the locks because no other cpu could
* be trying to lock both of these tasks.
*/
raw_spin_lock(&ctx->lock);
raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
/*
* XXX do we need a memory barrier of sorts
* wrt to rcu_dereference() of perf_event_ctxp
*/
task->perf_event_ctxp[ctxn] = next_ctx;
next->perf_event_ctxp[ctxn] = ctx;
ctx->task = next;
next_ctx->task = task;
do_switch = 0;
perf_event_sync_stat(ctx, next_ctx);
}
raw_spin_unlock(&next_ctx->lock);
raw_spin_unlock(&ctx->lock);
}
rcu_read_unlock();
if (do_switch) {
raw_spin_lock(&ctx->lock);
ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
raw_spin_unlock(&ctx->lock);
}
}
#define for_each_task_context_nr(ctxn) \
for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
/*
* Called from scheduler to remove the events of the current task,
* with interrupts disabled.
*
* We stop each event and update the event value in event->count.
*
* This does not protect us against NMI, but disable()
* sets the disabled bit in the control field of event _before_
* accessing the event control register. If a NMI hits, then it will
* not restart the event.
*/
void __perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next)
{
int ctxn;
for_each_task_context_nr(ctxn)
perf_event_context_sched_out(task, ctxn, next);
/*
* if cgroup events exist on this CPU, then we need
* to check if we have to switch out PMU state.
* cgroup event are system-wide mode only
*/
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
perf_cgroup_sched_out(task, next);
}
static void task_ctx_sched_out(struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
if (!cpuctx->task_ctx)
return;
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return;
ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
}
/*
* Called with IRQs disabled
*/
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
}
static void
ctx_pinned_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
struct perf_event *event;
list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
if (!event_filter_match(event))
continue;
/* may need to reset tstamp_enabled */
if (is_cgroup_event(event))
perf_cgroup_mark_enabled(event, ctx);
if (group_can_go_on(event, cpuctx, 1))
group_sched_in(event, cpuctx, ctx);
/*
* If this pinned group hasn't been scheduled,
* put it in error state.
*/
if (event->state == PERF_EVENT_STATE_INACTIVE) {
update_group_times(event);
event->state = PERF_EVENT_STATE_ERROR;
}
}
}
static void
ctx_flexible_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
struct perf_event *event;
int can_add_hw = 1;
list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
/* Ignore events in OFF or ERROR state */
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
/*
* Listen to the 'cpu' scheduling filter constraint
* of events:
*/
if (!event_filter_match(event))
continue;
/* may need to reset tstamp_enabled */
if (is_cgroup_event(event))
perf_cgroup_mark_enabled(event, ctx);
if (group_can_go_on(event, cpuctx, can_add_hw)) {
if (group_sched_in(event, cpuctx, ctx))
can_add_hw = 0;
}
}
}
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task)
{
u64 now;
int is_active = ctx->is_active;
ctx->is_active |= event_type;
if (likely(!ctx->nr_events))
return;
now = perf_clock();
ctx->timestamp = now;
perf_cgroup_set_timestamp(task, ctx);
/*
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
ctx_pinned_sched_in(ctx, cpuctx);
/* Then walk through the lower prio flexible groups */
if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
ctx_flexible_sched_in(ctx, cpuctx);
}
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task)
{
struct perf_event_context *ctx = &cpuctx->ctx;
ctx_sched_in(ctx, cpuctx, event_type, task);
}
static void perf_event_context_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
cpuctx = __get_cpu_context(ctx);
if (cpuctx->task_ctx == ctx)
return;
perf_ctx_lock(cpuctx, ctx);
perf_pmu_disable(ctx->pmu);
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
* cpu flexible, task flexible.
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx->nr_events)
cpuctx->task_ctx = ctx;
perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
/*
* Since these rotations are per-cpu, we need to ensure the
* cpu-context we got scheduled on is actually rotating.
*/
perf_pmu_rotate_start(ctx->pmu);
}
/*
* When sampling the branck stack in system-wide, it may be necessary
* to flush the stack on context switch. This happens when the branch
* stack does not tag its entries with the pid of the current task.
* Otherwise it becomes impossible to associate a branch entry with a
* task. This ambiguity is more likely to appear when the branch stack
* supports priv level filtering and the user sets it to monitor only
* at the user level (which could be a useful measurement in system-wide
* mode). In that case, the risk is high of having a branch stack with
* branch from multiple tasks. Flushing may mean dropping the existing
* entries or stashing them somewhere in the PMU specific code layer.
*
* This function provides the context switch callback to the lower code
* layer. It is invoked ONLY when there is at least one system-wide context
* with at least one active event using taken branch sampling.
*/
static void perf_branch_stack_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
struct pmu *pmu;
unsigned long flags;
/* no need to flush branch stack if not changing task */
if (prev == task)
return;
local_irq_save(flags);
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
/*
* check if the context has at least one
* event using PERF_SAMPLE_BRANCH_STACK
*/
if (cpuctx->ctx.nr_branch_stack > 0
&& pmu->flush_branch_stack) {
pmu = cpuctx->ctx.pmu;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(pmu);
pmu->flush_branch_stack();
perf_pmu_enable(pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
}
rcu_read_unlock();
local_irq_restore(flags);
}
/*
* Called from scheduler to add the events of the current task
* with interrupts disabled.
*
* We restore the event value and then enable it.
*
* This does not protect us against NMI, but enable()
* sets the enabled bit in the control field of event _before_
* accessing the event control register. If a NMI hits, then it will
* keep the event running.
*/
void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_event_context *ctx;
int ctxn;
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (likely(!ctx))
continue;
perf_event_context_sched_in(ctx, task);
}
/*
* if cgroup events exist on this CPU, then we need
* to check if we have to switch in PMU state.
* cgroup event are system-wide mode only
*/
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
perf_cgroup_sched_in(prev, task);
/* check for system-wide branch_stack events */
if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
perf_branch_stack_sched_in(prev, task);
}
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
u64 frequency = event->attr.sample_freq;
u64 sec = NSEC_PER_SEC;
u64 divisor, dividend;
int count_fls, nsec_fls, frequency_fls, sec_fls;
count_fls = fls64(count);
nsec_fls = fls64(nsec);
frequency_fls = fls64(frequency);
sec_fls = 30;
/*
* We got @count in @nsec, with a target of sample_freq HZ
* the target period becomes:
*
* @count * 10^9
* period = -------------------
* @nsec * sample_freq
*
*/
/*
* Reduce accuracy by one bit such that @a and @b converge
* to a similar magnitude.
*/
#define REDUCE_FLS(a, b) \
do { \
if (a##_fls > b##_fls) { \
a >>= 1; \
a##_fls--; \
} else { \
b >>= 1; \
b##_fls--; \
} \
} while (0)
/*
* Reduce accuracy until either term fits in a u64, then proceed with
* the other, so that finally we can do a u64/u64 division.
*/
while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
REDUCE_FLS(nsec, frequency);
REDUCE_FLS(sec, count);
}
if (count_fls + sec_fls > 64) {
divisor = nsec * frequency;
while (count_fls + sec_fls > 64) {
REDUCE_FLS(count, sec);
divisor >>= 1;
}
dividend = count * sec;
} else {
dividend = count * sec;
while (nsec_fls + frequency_fls > 64) {
REDUCE_FLS(nsec, frequency);
dividend >>= 1;
}
divisor = nsec * frequency;
}
if (!divisor)
return dividend;
return div64_u64(dividend, divisor);
}
static DEFINE_PER_CPU(int, perf_throttled_count);
static DEFINE_PER_CPU(u64, perf_throttled_seq);
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
{
struct hw_perf_event *hwc = &event->hw;
s64 period, sample_period;
s64 delta;
period = perf_calculate_period(event, nsec, count);
delta = (s64)(period - hwc->sample_period);
delta = (delta + 7) / 8; /* low pass filter */
sample_period = hwc->sample_period + delta;
if (!sample_period)
sample_period = 1;
hwc->sample_period = sample_period;
if (local64_read(&hwc->period_left) > 8*sample_period) {
if (disable)
event->pmu->stop(event, PERF_EF_UPDATE);
local64_set(&hwc->period_left, 0);
if (disable)
event->pmu->start(event, PERF_EF_RELOAD);
}
}
/*
* combine freq adjustment with unthrottling to avoid two passes over the
* events. At the same time, make sure, having freq events does not change
* the rate of unthrottling as that would introduce bias.
*/
static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
int needs_unthr)
{
struct perf_event *event;
struct hw_perf_event *hwc;
u64 now, period = TICK_NSEC;
s64 delta;
/*
* only need to iterate over all events iff:
* - context have events in frequency mode (needs freq adjust)
* - there are events to unthrottle on this cpu
*/
if (!(ctx->nr_freq || needs_unthr))
return;
raw_spin_lock(&ctx->lock);
perf_pmu_disable(ctx->pmu);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
if (!event_filter_match(event))
continue;
hwc = &event->hw;
if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
hwc->interrupts = 0;
perf_log_throttle(event, 1);
event->pmu->start(event, 0);
}
if (!event->attr.freq || !event->attr.sample_freq)
continue;
/*
* stop the event and update event->count
*/
event->pmu->stop(event, PERF_EF_UPDATE);
now = local64_read(&event->count);
delta = now - hwc->freq_count_stamp;
hwc->freq_count_stamp = now;
/*
* restart the event
* reload only if value has changed
* we have stopped the event so tell that
* to perf_adjust_period() to avoid stopping it
* twice.
*/
if (delta > 0)
perf_adjust_period(event, period, delta, false);
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
}
perf_pmu_enable(ctx->pmu);
raw_spin_unlock(&ctx->lock);
}
/*
* Round-robin a context's events:
*/
static void rotate_ctx(struct perf_event_context *ctx)
{
/*
* Rotate the first entry last of non-pinned groups. Rotation might be
* disabled by the inheritance code.
*/
if (!ctx->rotate_disable)
list_rotate_left(&ctx->flexible_groups);
}
/*
* perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
* because they're strictly cpu affine and rotate_start is called with IRQs
* disabled, while rotate_context is called from IRQ context.
*/
static void perf_rotate_context(struct perf_cpu_context *cpuctx)
{
struct perf_event_context *ctx = NULL;
int rotate = 0, remove = 1;
if (cpuctx->ctx.nr_events) {
remove = 0;
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
rotate = 1;
}
ctx = cpuctx->task_ctx;
if (ctx && ctx->nr_events) {
remove = 0;
if (ctx->nr_events != ctx->nr_active)
rotate = 1;
}
if (!rotate)
goto done;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx)
ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
rotate_ctx(&cpuctx->ctx);
if (ctx)
rotate_ctx(ctx);
perf_event_sched_in(cpuctx, ctx, current);
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
done:
if (remove)
list_del_init(&cpuctx->rotation_list);
}
void perf_event_task_tick(void)
{
struct list_head *head = &__get_cpu_var(rotation_list);
struct perf_cpu_context *cpuctx, *tmp;
struct perf_event_context *ctx;
int throttled;
WARN_ON(!irqs_disabled());
__this_cpu_inc(perf_throttled_seq);
throttled = __this_cpu_xchg(perf_throttled_count, 0);
list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
ctx = &cpuctx->ctx;
perf_adjust_freq_unthr_context(ctx, throttled);
ctx = cpuctx->task_ctx;
if (ctx)
perf_adjust_freq_unthr_context(ctx, throttled);
if (cpuctx->jiffies_interval == 1 ||
!(jiffies % cpuctx->jiffies_interval))
perf_rotate_context(cpuctx);
}
}
static int event_enable_on_exec(struct perf_event *event,
struct perf_event_context *ctx)
{
if (!event->attr.enable_on_exec)
return 0;
event->attr.enable_on_exec = 0;
if (event->state >= PERF_EVENT_STATE_INACTIVE)
return 0;
__perf_event_mark_enabled(event);
return 1;
}
/*
* Enable all of a task's events that have been marked enable-on-exec.
* This expects task == current.
*/
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
{
struct perf_event *event;
unsigned long flags;
int enabled = 0;
int ret;
local_irq_save(flags);
if (!ctx || !ctx->nr_events)
goto out;
/*
* We must ctxsw out cgroup events to avoid conflict
* when invoking perf_task_event_sched_in() later on
* in this function. Otherwise we end up trying to
* ctxswin cgroup events which are already scheduled
* in.
*/
perf_cgroup_sched_out(current, NULL);
raw_spin_lock(&ctx->lock);
task_ctx_sched_out(ctx);
list_for_each_entry(event, &ctx->event_list, event_entry) {
ret = event_enable_on_exec(event, ctx);
if (ret)
enabled = 1;
}
/*
* Unclone this context if we enabled any event.
*/
if (enabled)
unclone_ctx(ctx);
raw_spin_unlock(&ctx->lock);
/*
* Also calls ctxswin for cgroup events, if any:
*/
perf_event_context_sched_in(ctx, ctx->task);
out:
local_irq_restore(flags);
}
/*
* Cross CPU call to read the hardware event
*/
static void __perf_event_read(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
/*
* If this is a task context, we need to check whether it is
* the current task context of this cpu. If not it has been
* scheduled out before the smp call arrived. In that case
* event->count would have been updated to a recent sample
* when the event was scheduled out.
*/
if (ctx->task && cpuctx->task_ctx != ctx)
return;
raw_spin_lock(&ctx->lock);
if (ctx->is_active) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
update_event_times(event);
if (event->state == PERF_EVENT_STATE_ACTIVE)
event->pmu->read(event);
raw_spin_unlock(&ctx->lock);
}
static inline u64 perf_event_count(struct perf_event *event)
{
return local64_read(&event->count) + atomic64_read(&event->child_count);
}
static u64 perf_event_read(struct perf_event *event)
{
/*
* If event is enabled and currently active on a CPU, update the
* value in the event structure:
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
smp_call_function_single(event->oncpu,
__perf_event_read, event, 1);
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
/*
* may read while context is not active
* (e.g., thread is blocked), in that case
* we cannot update context time
*/
if (ctx->is_active) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
update_event_times(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return perf_event_count(event);
}
/*
* Initialize the perf_event context in a task_struct:
*/
static void __perf_event_init_context(struct perf_event_context *ctx)
{
raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->pinned_groups);
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
atomic_set(&ctx->refcount, 1);
}
static struct perf_event_context *
alloc_perf_context(struct pmu *pmu, struct task_struct *task)
{
struct perf_event_context *ctx;
ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
if (!ctx)
return NULL;
__perf_event_init_context(ctx);
if (task) {
ctx->task = task;
get_task_struct(task);
}
ctx->pmu = pmu;
return ctx;
}
static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)
{
struct task_struct *task;
int err;
rcu_read_lock();
if (!vpid)
task = current;
else
task = find_task_by_vpid(vpid);
if (task)
get_task_struct(task);
rcu_read_unlock();
if (!task)
return ERR_PTR(-ESRCH);
/* Reuse ptrace permission checks for now. */
err = -EACCES;
if (!ptrace_may_access(task, PTRACE_MODE_READ))
goto errout;
return task;
errout:
put_task_struct(task);
return ERR_PTR(err);
}
/*
* Returns a matching context with refcount and pincount.
*/
static struct perf_event_context *
find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
{
struct perf_event_context *ctx;
struct perf_cpu_context *cpuctx;
unsigned long flags;
int ctxn, err;
if (!task) {
/* Must be root to operate on a CPU event: */
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
/*
* We could be clever and allow to attach a event to an
* offline CPU and activate it when the CPU comes up, but
* that's for later.
*/
if (!cpu_online(cpu))
return ERR_PTR(-ENODEV);
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
ctx = &cpuctx->ctx;
get_ctx(ctx);
++ctx->pin_count;
return ctx;
}
err = -EINVAL;
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto errout;
retry:
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
unclone_ctx(ctx);
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
} else {
ctx = alloc_perf_context(pmu, task);
err = -ENOMEM;
if (!ctx)
goto errout;
err = 0;
mutex_lock(&task->perf_event_mutex);
/*
* If it has already passed perf_event_exit_task().
* we must see PF_EXITING, it takes this mutex too.
*/
if (task->flags & PF_EXITING)
err = -ESRCH;
else if (task->perf_event_ctxp[ctxn])
err = -EAGAIN;
else {
get_ctx(ctx);
++ctx->pin_count;
rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
}
mutex_unlock(&task->perf_event_mutex);
if (unlikely(err)) {
put_ctx(ctx);
if (err == -EAGAIN)
goto retry;
goto errout;
}
}
return ctx;
errout:
return ERR_PTR(err);
}
static void perf_event_free_filter(struct perf_event *event);
static void free_event_rcu(struct rcu_head *head)
{
struct perf_event *event;
event = container_of(head, struct perf_event, rcu_head);
if (event->ns)
put_pid_ns(event->ns);
perf_event_free_filter(event);
kfree(event);
}
static void ring_buffer_put(struct ring_buffer *rb);
static void free_event(struct perf_event *event)
{
irq_work_sync(&event->pending);
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_dec_deferred(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
atomic_dec(&nr_comm_events);
if (event->attr.task)
atomic_dec(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
put_callchain_buffers();
if (is_cgroup_event(event)) {
atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_dec_deferred(&perf_sched_events);
}
if (has_branch_stack(event)) {
static_key_slow_dec_deferred(&perf_sched_events);
/* is system-wide event */
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_dec(&per_cpu(perf_branch_stack_events,
event->cpu));
}
}
if (event->rb) {
ring_buffer_put(event->rb);
event->rb = NULL;
}
if (is_cgroup_event(event))
perf_detach_cgroup(event);
if (event->destroy)
event->destroy(event);
if (event->ctx)
put_ctx(event->ctx);
call_rcu(&event->rcu_head, free_event_rcu);
}
int perf_event_release_kernel(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
WARN_ON_ONCE(ctx->parent_ctx);
/*
* There are two ways this annotation is useful:
*
* 1) there is a lock recursion from perf_event_exit_task
* see the comment there.
*
* 2) there is a lock-inversion with mmap_sem through
* perf_event_read_group(), which takes faults while
* holding ctx->mutex, however this is called after
* the last filedesc died, so there is no possibility
* to trigger the AB-BA case.
*/
mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
raw_spin_lock_irq(&ctx->lock);
perf_group_detach(event);
raw_spin_unlock_irq(&ctx->lock);
perf_remove_from_context(event);
mutex_unlock(&ctx->mutex);
free_event(event);
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
/*
* Called when the last reference to the file is gone.
*/
static void put_event(struct perf_event *event)
{
struct task_struct *owner;
if (!atomic_long_dec_and_test(&event->refcount))
return;
rcu_read_lock();
owner = ACCESS_ONCE(event->owner);
/*
* Matches the smp_wmb() in perf_event_exit_task(). If we observe
* !owner it means the list deletion is complete and we can indeed
* free this event, otherwise we need to serialize on
* owner->perf_event_mutex.
*/
smp_read_barrier_depends();
if (owner) {
/*
* Since delayed_put_task_struct() also drops the last
* task reference we can safely take a new reference
* while holding the rcu_read_lock().
*/
get_task_struct(owner);
}
rcu_read_unlock();
if (owner) {
mutex_lock(&owner->perf_event_mutex);
/*
* We have to re-check the event->owner field, if it is cleared
* we raced with perf_event_exit_task(), acquiring the mutex
* ensured they're done, and we can proceed with freeing the
* event.
*/
if (event->owner)
list_del_init(&event->owner_entry);
mutex_unlock(&owner->perf_event_mutex);
put_task_struct(owner);
}
perf_event_release_kernel(event);
}
static int perf_release(struct inode *inode, struct file *file)
{
put_event(file->private_data);
return 0;
}
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
{
struct perf_event *child;
u64 total = 0;
*enabled = 0;
*running = 0;
mutex_lock(&event->child_mutex);
total += perf_event_read(event);
*enabled += event->total_time_enabled +
atomic64_read(&event->child_total_time_enabled);
*running += event->total_time_running +
atomic64_read(&event->child_total_time_running);
list_for_each_entry(child, &event->child_list, child_list) {
total += perf_event_read(child);
*enabled += child->total_time_enabled;
*running += child->total_time_running;
}
mutex_unlock(&event->child_mutex);
return total;
}
EXPORT_SYMBOL_GPL(perf_event_read_value);
static int perf_event_read_group(struct perf_event *event,
u64 read_format, char __user *buf)
{
struct perf_event *leader = event->group_leader, *sub;
int n = 0, size = 0, ret = -EFAULT;
struct perf_event_context *ctx = leader->ctx;
u64 values[5];
u64 count, enabled, running;
mutex_lock(&ctx->mutex);
count = perf_event_read_value(leader, &enabled, &running);
values[n++] = 1 + leader->nr_siblings;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
values[n++] = count;
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
size = n * sizeof(u64);
if (copy_to_user(buf, values, size))
goto unlock;
ret = size;
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0;
values[n++] = perf_event_read_value(sub, &enabled, &running);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
size = n * sizeof(u64);
if (copy_to_user(buf + ret, values, size)) {
ret = -EFAULT;
goto unlock;
}
ret += size;
}
unlock:
mutex_unlock(&ctx->mutex);
return ret;
}
static int perf_event_read_one(struct perf_event *event,
u64 read_format, char __user *buf)
{
u64 enabled, running;
u64 values[4];
int n = 0;
values[n++] = perf_event_read_value(event, &enabled, &running);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
if (copy_to_user(buf, values, n * sizeof(u64)))
return -EFAULT;
return n * sizeof(u64);
}
/*
* Read the performance event - simple non blocking version for now
*/
static ssize_t
perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
{
u64 read_format = event->attr.read_format;
int ret;
/*
* Return end-of-file for a read on a event that is in
* error state (i.e. because it was pinned but it couldn't be
* scheduled on to the CPU at some point).
*/
if (event->state == PERF_EVENT_STATE_ERROR)
return 0;
if (count < event->read_size)
return -ENOSPC;
WARN_ON_ONCE(event->ctx->parent_ctx);
if (read_format & PERF_FORMAT_GROUP)
ret = perf_event_read_group(event, read_format, buf);
else
ret = perf_event_read_one(event, read_format, buf);
return ret;
}
static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct perf_event *event = file->private_data;
return perf_read_hw(event, buf, count);
}
static unsigned int perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
struct ring_buffer *rb;
unsigned int events = POLL_HUP;
/*
* Race between perf_event_set_output() and perf_poll(): perf_poll()
* grabs the rb reference but perf_event_set_output() overrides it.
* Here is the timeline for two threads T1, T2:
* t0: T1, rb = rcu_dereference(event->rb)
* t1: T2, old_rb = event->rb
* t2: T2, event->rb = new rb
* t3: T2, ring_buffer_detach(old_rb)
* t4: T1, ring_buffer_attach(rb1)
* t5: T1, poll_wait(event->waitq)
*
* To avoid this problem, we grab mmap_mutex in perf_poll()
* thereby ensuring that the assignment of the new ring buffer
* and the detachment of the old buffer appear atomic to perf_poll()
*/
mutex_lock(&event->mmap_mutex);
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
ring_buffer_attach(event, rb);
events = atomic_xchg(&rb->poll, 0);
}
rcu_read_unlock();
mutex_unlock(&event->mmap_mutex);
poll_wait(file, &event->waitq, wait);
return events;
}
static void perf_event_reset(struct perf_event *event)
{
(void)perf_event_read(event);
local64_set(&event->count, 0);
perf_event_update_userpage(event);
}
/*
* Holding the top-level event's child_mutex means that any
* descendant process that has inherited this event will block
* in sync_child_event if it goes to exit, thus satisfying the
* task existence requirements of perf_event_enable/disable.
*/
static void perf_event_for_each_child(struct perf_event *event,
void (*func)(struct perf_event *))
{
struct perf_event *child;
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->child_mutex);
func(event);
list_for_each_entry(child, &event->child_list, child_list)
func(child);
mutex_unlock(&event->child_mutex);
}
static void perf_event_for_each(struct perf_event *event,
void (*func)(struct perf_event *))
{
struct perf_event_context *ctx = event->ctx;
struct perf_event *sibling;
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
event = event->group_leader;
perf_event_for_each_child(event, func);
list_for_each_entry(sibling, &event->sibling_list, group_entry)
perf_event_for_each_child(sibling, func);
mutex_unlock(&ctx->mutex);
}
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
struct perf_event_context *ctx = event->ctx;
int ret = 0;
u64 value;
if (!is_sampling_event(event))
return -EINVAL;
if (copy_from_user(&value, arg, sizeof(value)))
return -EFAULT;
if (!value)
return -EINVAL;
raw_spin_lock_irq(&ctx->lock);
if (event->attr.freq) {
if (value > sysctl_perf_event_sample_rate) {
ret = -EINVAL;
goto unlock;
}
event->attr.sample_freq = value;
} else {
event->attr.sample_period = value;
event->hw.sample_period = value;
}
unlock:
raw_spin_unlock_irq(&ctx->lock);
return ret;
}
static const struct file_operations perf_fops;
static inline int perf_fget_light(int fd, struct fd *p)
{
struct fd f = fdget(fd);
if (!f.file)
return -EBADF;
if (f.file->f_op != &perf_fops) {
fdput(f);
return -EBADF;
}
*p = f;
return 0;
}
static int perf_event_set_output(struct perf_event *event,
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct perf_event *event = file->private_data;
void (*func)(struct perf_event *);
u32 flags = arg;
switch (cmd) {
case PERF_EVENT_IOC_ENABLE:
func = perf_event_enable;
break;
case PERF_EVENT_IOC_DISABLE:
func = perf_event_disable;
break;
case PERF_EVENT_IOC_RESET:
func = perf_event_reset;
break;
case PERF_EVENT_IOC_REFRESH:
return perf_event_refresh(event, arg);
case PERF_EVENT_IOC_PERIOD:
return perf_event_period(event, (u64 __user *)arg);
case PERF_EVENT_IOC_SET_OUTPUT:
{
int ret;
if (arg != -1) {
struct perf_event *output_event;
struct fd output;
ret = perf_fget_light(arg, &output);
if (ret)
return ret;
output_event = output.file->private_data;
ret = perf_event_set_output(event, output_event);
fdput(output);
} else {
ret = perf_event_set_output(event, NULL);
}
return ret;
}
case PERF_EVENT_IOC_SET_FILTER:
return perf_event_set_filter(event, (void __user *)arg);
default:
return -ENOTTY;
}
if (flags & PERF_IOC_FLAG_GROUP)
perf_event_for_each(event, func);
else
perf_event_for_each_child(event, func);
return 0;
}
int perf_event_task_enable(void)
{
struct perf_event *event;
mutex_lock(¤t->perf_event_mutex);
list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
perf_event_for_each_child(event, perf_event_enable);
mutex_unlock(¤t->perf_event_mutex);
return 0;
}
int perf_event_task_disable(void)
{
struct perf_event *event;
mutex_lock(¤t->perf_event_mutex);
list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
perf_event_for_each_child(event, perf_event_disable);
mutex_unlock(¤t->perf_event_mutex);
return 0;
}
static int perf_event_index(struct perf_event *event)
{
if (event->hw.state & PERF_HES_STOPPED)
return 0;
if (event->state != PERF_EVENT_STATE_ACTIVE)
return 0;
return event->pmu->event_idx(event);
}
static void calc_timer_values(struct perf_event *event,
u64 *now,
u64 *enabled,
u64 *running)
{
u64 ctx_time;
*now = perf_clock();
ctx_time = event->shadow_ctx_time + *now;
*enabled = ctx_time - event->tstamp_enabled;
*running = ctx_time - event->tstamp_running;
}
void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
{
}
/*
* Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch
* code calls this from NMI context.
*/
void perf_event_update_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
struct ring_buffer *rb;
u64 enabled, running, now;
rcu_read_lock();
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
* was last scheduled in.
*
* we cannot simply called update_context_time()
* because of locking issue as we can be called in
* NMI context
*/
calc_timer_values(event, &now, &enabled, &running);
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
userpg = rb->user_page;
/*
* Disable preemption so as to not let the corresponding user-space
* spin too long if we get preempted.
*/
preempt_disable();
++userpg->lock;
barrier();
userpg->index = perf_event_index(event);
userpg->offset = perf_event_count(event);
if (userpg->index)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
atomic64_read(&event->child_total_time_enabled);
userpg->time_running = running +
atomic64_read(&event->child_total_time_running);
arch_perf_update_userpage(userpg, now);
barrier();
++userpg->lock;
preempt_enable();
unlock:
rcu_read_unlock();
}
static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct perf_event *event = vma->vm_file->private_data;
struct ring_buffer *rb;
int ret = VM_FAULT_SIGBUS;
if (vmf->flags & FAULT_FLAG_MKWRITE) {
if (vmf->pgoff == 0)
ret = 0;
return ret;
}
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
goto unlock;
vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
if (!vmf->page)
goto unlock;
get_page(vmf->page);
vmf->page->mapping = vma->vm_file->f_mapping;
vmf->page->index = vmf->pgoff;
ret = 0;
unlock:
rcu_read_unlock();
return ret;
}
static void ring_buffer_attach(struct perf_event *event,
struct ring_buffer *rb)
{
unsigned long flags;
if (!list_empty(&event->rb_entry))
return;
spin_lock_irqsave(&rb->event_lock, flags);
if (!list_empty(&event->rb_entry))
goto unlock;
list_add(&event->rb_entry, &rb->event_list);
unlock:
spin_unlock_irqrestore(&rb->event_lock, flags);
}
static void ring_buffer_detach(struct perf_event *event,
struct ring_buffer *rb)
{
unsigned long flags;
if (list_empty(&event->rb_entry))
return;
spin_lock_irqsave(&rb->event_lock, flags);
list_del_init(&event->rb_entry);
wake_up_all(&event->waitq);
spin_unlock_irqrestore(&rb->event_lock, flags);
}
static void ring_buffer_wakeup(struct perf_event *event)
{
struct ring_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
wake_up_all(&event->waitq);
unlock:
rcu_read_unlock();
}
static void rb_free_rcu(struct rcu_head *rcu_head)
{
struct ring_buffer *rb;
rb = container_of(rcu_head, struct ring_buffer, rcu_head);
rb_free(rb);
}
static struct ring_buffer *ring_buffer_get(struct perf_event *event)
{
struct ring_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
if (!atomic_inc_not_zero(&rb->refcount))
rb = NULL;
}
rcu_read_unlock();
return rb;
}
static void ring_buffer_put(struct ring_buffer *rb)
{
struct perf_event *event, *n;
unsigned long flags;
if (!atomic_dec_and_test(&rb->refcount))
return;
spin_lock_irqsave(&rb->event_lock, flags);
list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
list_del_init(&event->rb_entry);
wake_up_all(&event->waitq);
}
spin_unlock_irqrestore(&rb->event_lock, flags);
call_rcu(&rb->rcu_head, rb_free_rcu);
}
static void perf_mmap_open(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
atomic_inc(&event->mmap_count);
}
static void perf_mmap_close(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
unsigned long size = perf_data_size(event->rb);
struct user_struct *user = event->mmap_user;
struct ring_buffer *rb = event->rb;
atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
vma->vm_mm->pinned_vm -= event->mmap_locked;
rcu_assign_pointer(event->rb, NULL);
ring_buffer_detach(event, rb);
mutex_unlock(&event->mmap_mutex);
ring_buffer_put(rb);
free_uid(user);
}
}
static const struct vm_operations_struct perf_mmap_vmops = {
.open = perf_mmap_open,
.close = perf_mmap_close,
.fault = perf_mmap_fault,
.page_mkwrite = perf_mmap_fault,
};
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
struct perf_event *event = file->private_data;
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
unsigned long locked, lock_limit;
struct ring_buffer *rb;
unsigned long vma_size;
unsigned long nr_pages;
long user_extra, extra;
int ret = 0, flags = 0;
/*
* Don't allow mmap() of inherited per-task counters. This would
* create a performance issue due to all children writing to the
* same rb.
*/
if (event->cpu == -1 && event->attr.inherit)
return -EINVAL;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma_size = vma->vm_end - vma->vm_start;
nr_pages = (vma_size / PAGE_SIZE) - 1;
/*
* If we have rb pages ensure they're a power-of-two number, so we
* can do bitmasks instead of modulo.
*/
if (nr_pages != 0 && !is_power_of_2(nr_pages))
return -EINVAL;
if (vma_size != PAGE_SIZE * (1 + nr_pages))
return -EINVAL;
if (vma->vm_pgoff != 0)
return -EINVAL;
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->mmap_mutex);
if (event->rb) {
if (event->rb->nr_pages == nr_pages)
atomic_inc(&event->rb->refcount);
else
ret = -EINVAL;
goto unlock;
}
user_extra = nr_pages + 1;
user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
/*
* Increase the limit linearly with more CPUs:
*/
user_lock_limit *= num_online_cpus();
user_locked = atomic_long_read(&user->locked_vm) + user_extra;
extra = 0;
if (user_locked > user_lock_limit)
extra = user_locked - user_lock_limit;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
locked = vma->vm_mm->pinned_vm + extra;
if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
!capable(CAP_IPC_LOCK)) {
ret = -EPERM;
goto unlock;
}
WARN_ON(event->rb);
if (vma->vm_flags & VM_WRITE)
flags |= RING_BUFFER_WRITABLE;
rb = rb_alloc(nr_pages,
event->attr.watermark ? event->attr.wakeup_watermark : 0,
event->cpu, flags);
if (!rb) {
ret = -ENOMEM;
goto unlock;
}
rcu_assign_pointer(event->rb, rb);
atomic_long_add(user_extra, &user->locked_vm);
event->mmap_locked = extra;
event->mmap_user = get_current_user();
vma->vm_mm->pinned_vm += event->mmap_locked;
perf_event_update_userpage(event);
unlock:
if (!ret)
atomic_inc(&event->mmap_count);
mutex_unlock(&event->mmap_mutex);
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &perf_mmap_vmops;
return ret;
}
static int perf_fasync(int fd, struct file *filp, int on)
{
struct inode *inode = file_inode(filp);
struct perf_event *event = filp->private_data;
int retval;
mutex_lock(&inode->i_mutex);
retval = fasync_helper(fd, filp, on, &event->fasync);
mutex_unlock(&inode->i_mutex);
if (retval < 0)
return retval;
return 0;
}
static const struct file_operations perf_fops = {
.llseek = no_llseek,
.release = perf_release,
.read = perf_read,
.poll = perf_poll,
.unlocked_ioctl = perf_ioctl,
.compat_ioctl = perf_ioctl,
.mmap = perf_mmap,
.fasync = perf_fasync,
};
/*
* Perf event wakeup
*
* If there's data, ensure we set the poll() state and publish everything
* to user-space before waking everybody up.
*/
void perf_event_wakeup(struct perf_event *event)
{
ring_buffer_wakeup(event);
if (event->pending_kill) {
kill_fasync(&event->fasync, SIGIO, event->pending_kill);
event->pending_kill = 0;
}
}
static void perf_pending_event(struct irq_work *entry)
{
struct perf_event *event = container_of(entry,
struct perf_event, pending);
if (event->pending_disable) {
event->pending_disable = 0;
__perf_event_disable(event);
}
if (event->pending_wakeup) {
event->pending_wakeup = 0;
perf_event_wakeup(event);
}
}
/*
* We assume there is only KVM supporting the callbacks.
* Later on, we might change it to a list if there is
* another virtualization implementation supporting the callbacks.
*/
struct perf_guest_info_callbacks *perf_guest_cbs;
int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
perf_guest_cbs = cbs;
return 0;
}
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
perf_guest_cbs = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
static void
perf_output_sample_regs(struct perf_output_handle *handle,
struct pt_regs *regs, u64 mask)
{
int bit;
for_each_set_bit(bit, (const unsigned long *) &mask,
sizeof(mask) * BITS_PER_BYTE) {
u64 val;
val = perf_reg_value(regs, bit);
perf_output_put(handle, val);
}
}
static void perf_sample_regs_user(struct perf_regs_user *regs_user,
struct pt_regs *regs)
{
if (!user_mode(regs)) {
if (current->mm)
regs = task_pt_regs(current);
else
regs = NULL;
}
if (regs) {
regs_user->regs = regs;
regs_user->abi = perf_reg_abi(current);
}
}
/*
* Get remaining task size from user stack pointer.
*
* It'd be better to take stack vma map and limit this more
* precisly, but there's no way to get it safely under interrupt,
* so using TASK_SIZE as limit.
*/
static u64 perf_ustack_task_size(struct pt_regs *regs)
{
unsigned long addr = perf_user_stack_pointer(regs);
if (!addr || addr >= TASK_SIZE)
return 0;
return TASK_SIZE - addr;
}
static u16
perf_sample_ustack_size(u16 stack_size, u16 header_size,
struct pt_regs *regs)
{
u64 task_size;
/* No regs, no stack pointer, no dump. */
if (!regs)
return 0;
/*
* Check if we fit in with the requested stack size into the:
* - TASK_SIZE
* If we don't, we limit the size to the TASK_SIZE.
*
* - remaining sample size
* If we don't, we customize the stack size to
* fit in to the remaining sample size.
*/
task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
stack_size = min(stack_size, (u16) task_size);
/* Current header size plus static size and dynamic size. */
header_size += 2 * sizeof(u64);
/* Do we fit in with the current stack dump size? */
if ((u16) (header_size + stack_size) < header_size) {
/*
* If we overflow the maximum size for the sample,
* we customize the stack dump size to fit in.
*/
stack_size = USHRT_MAX - header_size - sizeof(u64);
stack_size = round_up(stack_size, sizeof(u64));
}
return stack_size;
}
static void
perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
struct pt_regs *regs)
{
/* Case of a kernel thread, nothing to dump */
if (!regs) {
u64 size = 0;
perf_output_put(handle, size);
} else {
unsigned long sp;
unsigned int rem;
u64 dyn_size;
/*
* We dump:
* static size
* - the size requested by user or the best one we can fit
* in to the sample max size
* data
* - user stack dump data
* dynamic size
* - the actual dumped size
*/
/* Static size. */
perf_output_put(handle, dump_size);
/* Data. */
sp = perf_user_stack_pointer(regs);
rem = __output_copy_user(handle, (void *) sp, dump_size);
dyn_size = dump_size - rem;
perf_output_skip(handle, rem);
/* Dynamic size. */
perf_output_put(handle, dyn_size);
}
}
static void __perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
u64 sample_type = event->attr.sample_type;
data->type = sample_type;
header->size += event->id_header_size;
if (sample_type & PERF_SAMPLE_TID) {
/* namespace issues */
data->tid_entry.pid = perf_event_pid(event, current);
data->tid_entry.tid = perf_event_tid(event, current);
}
if (sample_type & PERF_SAMPLE_TIME)
data->time = perf_clock();
if (sample_type & PERF_SAMPLE_ID)
data->id = primary_event_id(event);
if (sample_type & PERF_SAMPLE_STREAM_ID)
data->stream_id = event->id;
if (sample_type & PERF_SAMPLE_CPU) {
data->cpu_entry.cpu = raw_smp_processor_id();
data->cpu_entry.reserved = 0;
}
}
void perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
if (event->attr.sample_id_all)
__perf_event_header__init_id(header, data, event);
}
static void __perf_event__output_id_sample(struct perf_output_handle *handle,
struct perf_sample_data *data)
{
u64 sample_type = data->type;
if (sample_type & PERF_SAMPLE_TID)
perf_output_put(handle, data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
perf_output_put(handle, data->time);
if (sample_type & PERF_SAMPLE_ID)
perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
perf_output_put(handle, data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
}
void perf_event__output_id_sample(struct perf_event *event,
struct perf_output_handle *handle,
struct perf_sample_data *sample)
{
if (event->attr.sample_id_all)
__perf_event__output_id_sample(handle, sample);
}
static void perf_output_read_one(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
{
u64 read_format = event->attr.read_format;
u64 values[4];
int n = 0;
values[n++] = perf_event_count(event);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] = enabled +
atomic64_read(&event->child_total_time_enabled);
}
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
values[n++] = running +
atomic64_read(&event->child_total_time_running);
}
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
__output_copy(handle, values, n * sizeof(u64));
}
/*
* XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
*/
static void perf_output_read_group(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
{
struct perf_event *leader = event->group_leader, *sub;
u64 read_format = event->attr.read_format;
u64 values[5];
int n = 0;
values[n++] = 1 + leader->nr_siblings;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
if (leader != event)
leader->pmu->read(leader);
values[n++] = perf_event_count(leader);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
__output_copy(handle, values, n * sizeof(u64));
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0;
if (sub != event)
sub->pmu->read(sub);
values[n++] = perf_event_count(sub);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
__output_copy(handle, values, n * sizeof(u64));
}
}
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
PERF_FORMAT_TOTAL_TIME_RUNNING)
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{
u64 enabled = 0, running = 0, now;
u64 read_format = event->attr.read_format;
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
* was last scheduled in.
*
* we cannot simply called update_context_time()
* because of locking issue as we are called in
* NMI context
*/
if (read_format & PERF_FORMAT_TOTAL_TIMES)
calc_timer_values(event, &now, &enabled, &running);
if (event->attr.read_format & PERF_FORMAT_GROUP)
perf_output_read_group(handle, event, enabled, running);
else
perf_output_read_one(handle, event, enabled, running);
}
void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
u64 sample_type = data->type;
perf_output_put(handle, *header);
if (sample_type & PERF_SAMPLE_IP)
perf_output_put(handle, data->ip);
if (sample_type & PERF_SAMPLE_TID)
perf_output_put(handle, data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
perf_output_put(handle, data->time);
if (sample_type & PERF_SAMPLE_ADDR)
perf_output_put(handle, data->addr);
if (sample_type & PERF_SAMPLE_ID)
perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
perf_output_put(handle, data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
if (sample_type & PERF_SAMPLE_PERIOD)
perf_output_put(handle, data->period);
if (sample_type & PERF_SAMPLE_READ)
perf_output_read(handle, event);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
if (data->callchain) {
int size = 1;
if (data->callchain)
size += data->callchain->nr;
size *= sizeof(u64);
__output_copy(handle, data->callchain, size);
} else {
u64 nr = 0;
perf_output_put(handle, nr);
}
}
if (sample_type & PERF_SAMPLE_RAW) {
if (data->raw) {
perf_output_put(handle, data->raw->size);
__output_copy(handle, data->raw->data,
data->raw->size);
} else {
struct {
u32 size;
u32 data;
} raw = {
.size = sizeof(u32),
.data = 0,
};
perf_output_put(handle, raw);
}
}
if (!event->attr.watermark) {
int wakeup_events = event->attr.wakeup_events;
if (wakeup_events) {
struct ring_buffer *rb = handle->rb;
int events = local_inc_return(&rb->events);
if (events >= wakeup_events) {
local_sub(wakeup_events, &rb->events);
local_inc(&rb->wakeup);
}
}
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
if (data->br_stack) {
size_t size;
size = data->br_stack->nr
* sizeof(struct perf_branch_entry);
perf_output_put(handle, data->br_stack->nr);
perf_output_copy(handle, data->br_stack->entries, size);
} else {
/*
* we always store at least the value of nr
*/
u64 nr = 0;
perf_output_put(handle, nr);
}
}
if (sample_type & PERF_SAMPLE_REGS_USER) {
u64 abi = data->regs_user.abi;
/*
* If there are no regs to dump, notice it through
* first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
*/
perf_output_put(handle, abi);
if (abi) {
u64 mask = event->attr.sample_regs_user;
perf_output_sample_regs(handle,
data->regs_user.regs,
mask);
}
}
if (sample_type & PERF_SAMPLE_STACK_USER)
perf_output_sample_ustack(handle,
data->stack_user_size,
data->regs_user.regs);
}
void perf_prepare_sample(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs)
{
u64 sample_type = event->attr.sample_type;
header->type = PERF_RECORD_SAMPLE;
header->size = sizeof(*header) + event->header_size;
header->misc = 0;
header->misc |= perf_misc_flags(regs);
__perf_event_header__init_id(header, data, event);
if (sample_type & PERF_SAMPLE_IP)
data->ip = perf_instruction_pointer(regs);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int size = 1;
data->callchain = perf_callchain(event, regs);
if (data->callchain)
size += data->callchain->nr;
header->size += size * sizeof(u64);
}
if (sample_type & PERF_SAMPLE_RAW) {
int size = sizeof(u32);
if (data->raw)
size += data->raw->size;
else
size += sizeof(u32);
WARN_ON_ONCE(size & (sizeof(u64)-1));
header->size += size;
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
int size = sizeof(u64); /* nr */
if (data->br_stack) {
size += data->br_stack->nr
* sizeof(struct perf_branch_entry);
}
header->size += size;
}
if (sample_type & PERF_SAMPLE_REGS_USER) {
/* regs dump ABI info */
int size = sizeof(u64);
perf_sample_regs_user(&data->regs_user, regs);
if (data->regs_user.regs) {
u64 mask = event->attr.sample_regs_user;
size += hweight64(mask) * sizeof(u64);
}
header->size += size;
}
if (sample_type & PERF_SAMPLE_STACK_USER) {
/*
* Either we need PERF_SAMPLE_STACK_USER bit to be allways
* processed as the last one or have additional check added
* in case new sample type is added, because we could eat
* up the rest of the sample size.
*/
struct perf_regs_user *uregs = &data->regs_user;
u16 stack_size = event->attr.sample_stack_user;
u16 size = sizeof(u64);
if (!uregs->abi)
perf_sample_regs_user(uregs, regs);
stack_size = perf_sample_ustack_size(stack_size, header->size,
uregs->regs);
/*
* If there is something to dump, add space for the dump
* itself and for the field that tells the dynamic size,
* which is how many have been actually dumped.
*/
if (stack_size)
size += sizeof(u64) + stack_size;
data->stack_user_size = stack_size;
header->size += size;
}
}
static void perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct perf_output_handle handle;
struct perf_event_header header;
/* protect the callchain buffers */
rcu_read_lock();
perf_prepare_sample(&header, data, event, regs);
if (perf_output_begin(&handle, event, header.size))
goto exit;
perf_output_sample(&handle, &header, data, event);
perf_output_end(&handle);
exit:
rcu_read_unlock();
}
/*
* read event_id
*/
struct perf_read_event {
struct perf_event_header header;
u32 pid;
u32 tid;
};
static void
perf_event_read_event(struct perf_event *event,
struct task_struct *task)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
struct perf_read_event read_event = {
.header = {
.type = PERF_RECORD_READ,
.misc = 0,
.size = sizeof(read_event) + event->read_size,
},
.pid = perf_event_pid(event, task),
.tid = perf_event_tid(event, task),
};
int ret;
perf_event_header__init_id(&read_event.header, &sample, event);
ret = perf_output_begin(&handle, event, read_event.header.size);
if (ret)
return;
perf_output_put(&handle, read_event);
perf_output_read(&handle, event);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}
/*
* task tracking -- fork/exit
*
* enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
*/
struct perf_task_event {
struct task_struct *task;
struct perf_event_context *task_ctx;
struct {
struct perf_event_header header;
u32 pid;
u32 ppid;
u32 tid;
u32 ptid;
u64 time;
} event_id;
};
static void perf_event_task_output(struct perf_event *event,
struct perf_task_event *task_event)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
struct task_struct *task = task_event->task;
int ret, size = task_event->event_id.header.size;
perf_event_header__init_id(&task_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
task_event->event_id.header.size);
if (ret)
goto out;
task_event->event_id.pid = perf_event_pid(event, task);
task_event->event_id.ppid = perf_event_pid(event, current);
task_event->event_id.tid = perf_event_tid(event, task);
task_event->event_id.ptid = perf_event_tid(event, current);
perf_output_put(&handle, task_event->event_id);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
task_event->event_id.header.size = size;
}
static int perf_event_task_match(struct perf_event *event)
{
if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
if (!event_filter_match(event))
return 0;
if (event->attr.comm || event->attr.mmap ||
event->attr.mmap_data || event->attr.task)
return 1;
return 0;
}
static void perf_event_task_ctx(struct perf_event_context *ctx,
struct perf_task_event *task_event)
{
struct perf_event *event;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (perf_event_task_match(event))
perf_event_task_output(event, task_event);
}
}
static void perf_event_task_event(struct perf_task_event *task_event)
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
struct pmu *pmu;
int ctxn;
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_task_ctx(&cpuctx->ctx, task_event);
ctx = task_event->task_ctx;
if (!ctx) {
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto next;
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
if (ctx)
perf_event_task_ctx(ctx, task_event);
}
next:
put_cpu_ptr(pmu->pmu_cpu_context);
}
if (task_event->task_ctx)
perf_event_task_ctx(task_event->task_ctx, task_event);
rcu_read_unlock();
}
static void perf_event_task(struct task_struct *task,
struct perf_event_context *task_ctx,
int new)
{
struct perf_task_event task_event;
if (!atomic_read(&nr_comm_events) &&
!atomic_read(&nr_mmap_events) &&
!atomic_read(&nr_task_events))
return;
task_event = (struct perf_task_event){
.task = task,
.task_ctx = task_ctx,
.event_id = {
.header = {
.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
.misc = 0,
.size = sizeof(task_event.event_id),
},
/* .pid */
/* .ppid */
/* .tid */
/* .ptid */
.time = perf_clock(),
},
};
perf_event_task_event(&task_event);
}
void perf_event_fork(struct task_struct *task)
{
perf_event_task(task, NULL, 1);
}
/*
* comm tracking
*/
struct perf_comm_event {
struct task_struct *task;
char *comm;
int comm_size;
struct {
struct perf_event_header header;
u32 pid;
u32 tid;
} event_id;
};
static void perf_event_comm_output(struct perf_event *event,
struct perf_comm_event *comm_event)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = comm_event->event_id.header.size;
int ret;
perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
comm_event->event_id.header.size);
if (ret)
goto out;
comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
perf_output_put(&handle, comm_event->event_id);
__output_copy(&handle, comm_event->comm,
comm_event->comm_size);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
comm_event->event_id.header.size = size;
}
static int perf_event_comm_match(struct perf_event *event)
{
if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
if (!event_filter_match(event))
return 0;
if (event->attr.comm)
return 1;
return 0;
}
static void perf_event_comm_ctx(struct perf_event_context *ctx,
struct perf_comm_event *comm_event)
{
struct perf_event *event;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (perf_event_comm_match(event))
perf_event_comm_output(event, comm_event);
}
}
static void perf_event_comm_event(struct perf_comm_event *comm_event)
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
char comm[TASK_COMM_LEN];
unsigned int size;
struct pmu *pmu;
int ctxn;
memset(comm, 0, sizeof(comm));
strlcpy(comm, comm_event->task->comm, sizeof(comm));
size = ALIGN(strlen(comm)+1, sizeof(u64));
comm_event->comm = comm;
comm_event->comm_size = size;
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_comm_ctx(&cpuctx->ctx, comm_event);
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto next;
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
if (ctx)
perf_event_comm_ctx(ctx, comm_event);
next:
put_cpu_ptr(pmu->pmu_cpu_context);
}
rcu_read_unlock();
}
void perf_event_comm(struct task_struct *task)
{
struct perf_comm_event comm_event;
struct perf_event_context *ctx;
int ctxn;
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (!ctx)
continue;
perf_event_enable_on_exec(ctx);
}
if (!atomic_read(&nr_comm_events))
return;
comm_event = (struct perf_comm_event){
.task = task,
/* .comm */
/* .comm_size */
.event_id = {
.header = {
.type = PERF_RECORD_COMM,
.misc = 0,
/* .size */
},
/* .pid */
/* .tid */
},
};
perf_event_comm_event(&comm_event);
}
/*
* mmap tracking
*/
struct perf_mmap_event {
struct vm_area_struct *vma;
const char *file_name;
int file_size;
struct {
struct perf_event_header header;
u32 pid;
u32 tid;
u64 start;
u64 len;
u64 pgoff;
} event_id;
};
static void perf_event_mmap_output(struct perf_event *event,
struct perf_mmap_event *mmap_event)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = mmap_event->event_id.header.size;
int ret;
perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
mmap_event->event_id.header.size);
if (ret)
goto out;
mmap_event->event_id.pid = perf_event_pid(event, current);
mmap_event->event_id.tid = perf_event_tid(event, current);
perf_output_put(&handle, mmap_event->event_id);
__output_copy(&handle, mmap_event->file_name,
mmap_event->file_size);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
mmap_event->event_id.header.size = size;
}
static int perf_event_mmap_match(struct perf_event *event,
struct perf_mmap_event *mmap_event,
int executable)
{
if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
if (!event_filter_match(event))
return 0;
if ((!executable && event->attr.mmap_data) ||
(executable && event->attr.mmap))
return 1;
return 0;
}
static void perf_event_mmap_ctx(struct perf_event_context *ctx,
struct perf_mmap_event *mmap_event,
int executable)
{
struct perf_event *event;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (perf_event_mmap_match(event, mmap_event, executable))
perf_event_mmap_output(event, mmap_event);
}
}
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
struct vm_area_struct *vma = mmap_event->vma;
struct file *file = vma->vm_file;
unsigned int size;
char tmp[16];
char *buf = NULL;
const char *name;
struct pmu *pmu;
int ctxn;
memset(tmp, 0, sizeof(tmp));
if (file) {
/*
* d_path works from the end of the rb backwards, so we
* need to add enough zero bytes after the string to handle
* the 64bit alignment we do later.
*/
buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
if (!buf) {
name = strncpy(tmp, "//enomem", sizeof(tmp));
goto got_name;
}
name = d_path(&file->f_path, buf, PATH_MAX);
if (IS_ERR(name)) {
name = strncpy(tmp, "//toolong", sizeof(tmp));
goto got_name;
}
} else {
if (arch_vma_name(mmap_event->vma)) {
name = strncpy(tmp, arch_vma_name(mmap_event->vma),
sizeof(tmp) - 1);
tmp[sizeof(tmp) - 1] = '\0';
goto got_name;
}
if (!vma->vm_mm) {
name = strncpy(tmp, "[vdso]", sizeof(tmp));
goto got_name;
} else if (vma->vm_start <= vma->vm_mm->start_brk &&
vma->vm_end >= vma->vm_mm->brk) {
name = strncpy(tmp, "[heap]", sizeof(tmp));
goto got_name;
} else if (vma->vm_start <= vma->vm_mm->start_stack &&
vma->vm_end >= vma->vm_mm->start_stack) {
name = strncpy(tmp, "[stack]", sizeof(tmp));
goto got_name;
}
name = strncpy(tmp, "//anon", sizeof(tmp));
goto got_name;
}
got_name:
size = ALIGN(strlen(name)+1, sizeof(u64));
mmap_event->file_name = name;
mmap_event->file_size = size;
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
vma->vm_flags & VM_EXEC);
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto next;
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
if (ctx) {
perf_event_mmap_ctx(ctx, mmap_event,
vma->vm_flags & VM_EXEC);
}
next:
put_cpu_ptr(pmu->pmu_cpu_context);
}
rcu_read_unlock();
kfree(buf);
}
void perf_event_mmap(struct vm_area_struct *vma)
{
struct perf_mmap_event mmap_event;
if (!atomic_read(&nr_mmap_events))
return;
mmap_event = (struct perf_mmap_event){
.vma = vma,
/* .file_name */
/* .file_size */
.event_id = {
.header = {
.type = PERF_RECORD_MMAP,
.misc = PERF_RECORD_MISC_USER,
/* .size */
},
/* .pid */
/* .tid */
.start = vma->vm_start,
.len = vma->vm_end - vma->vm_start,
.pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
},
};
perf_event_mmap_event(&mmap_event);
}
/*
* IRQ throttle logging
*/
static void perf_log_throttle(struct perf_event *event, int enable)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
int ret;
struct {
struct perf_event_header header;
u64 time;
u64 id;
u64 stream_id;
} throttle_event = {
.header = {
.type = PERF_RECORD_THROTTLE,
.misc = 0,
.size = sizeof(throttle_event),
},
.time = perf_clock(),
.id = primary_event_id(event),
.stream_id = event->id,
};
if (enable)
throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
perf_event_header__init_id(&throttle_event.header, &sample, event);
ret = perf_output_begin(&handle, event,
throttle_event.header.size);
if (ret)
return;
perf_output_put(&handle, throttle_event);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}
/*
* Generic event overflow handling, sampling.
*/
static int __perf_event_overflow(struct perf_event *event,
int throttle, struct perf_sample_data *data,
struct pt_regs *regs)
{
int events = atomic_read(&event->event_limit);
struct hw_perf_event *hwc = &event->hw;
u64 seq;
int ret = 0;
/*
* Non-sampling counters might still use the PMI to fold short
* hardware counters, ignore those.
*/
if (unlikely(!is_sampling_event(event)))
return 0;
seq = __this_cpu_read(perf_throttled_seq);
if (seq != hwc->interrupts_seq) {
hwc->interrupts_seq = seq;
hwc->interrupts = 1;
} else {
hwc->interrupts++;
if (unlikely(throttle
&& hwc->interrupts >= max_samples_per_tick)) {
__this_cpu_inc(perf_throttled_count);
hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(event, 0);
ret = 1;
}
}
if (event->attr.freq) {
u64 now = perf_clock();
s64 delta = now - hwc->freq_time_stamp;
hwc->freq_time_stamp = now;
if (delta > 0 && delta < 2*TICK_NSEC)
perf_adjust_period(event, delta, hwc->last_period, true);
}
/*
* XXX event_limit might not quite work as expected on inherited
* events
*/
event->pending_kill = POLL_IN;
if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1;
event->pending_kill = POLL_HUP;
event->pending_disable = 1;
irq_work_queue(&event->pending);
}
if (event->overflow_handler)
event->overflow_handler(event, data, regs);
else
perf_event_output(event, data, regs);
if (event->fasync && event->pending_kill) {
event->pending_wakeup = 1;
irq_work_queue(&event->pending);
}
return ret;
}
int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
return __perf_event_overflow(event, 1, data, regs);
}
/*
* Generic software event infrastructure
*/
struct swevent_htable {
struct swevent_hlist *swevent_hlist;
struct mutex hlist_mutex;
int hlist_refcount;
/* Recursion avoidance in each contexts */
int recursion[PERF_NR_CONTEXTS];
};
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
/*
* We directly increment event->count and keep a second value in
* event->hw.period_left to count intervals. This period event
* is kept in the range [-sample_period, 0] so that we can use the
* sign as trigger.
*/
static u64 perf_swevent_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 period = hwc->last_period;
u64 nr, offset;
s64 old, val;
hwc->last_period = hwc->sample_period;
again:
old = val = local64_read(&hwc->period_left);
if (val < 0)
return 0;
nr = div64_u64(period + val, period);
offset = nr * period;
val -= offset;
if (local64_cmpxchg(&hwc->period_left, old, val) != old)
goto again;
return nr;
}
static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
int throttle = 0;
if (!overflow)
overflow = perf_swevent_set_period(event);
if (hwc->interrupts == MAX_INTERRUPTS)
return;
for (; overflow; overflow--) {
if (__perf_event_overflow(event, throttle,
data, regs)) {
/*
* We inhibit the overflow from happening when
* hwc->interrupts == MAX_INTERRUPTS.
*/
break;
}
throttle = 1;
}
}
static void perf_swevent_event(struct perf_event *event, u64 nr,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
local64_add(nr, &event->count);
if (!regs)
return;
if (!is_sampling_event(event))
return;
if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
data->period = nr;
return perf_swevent_overflow(event, 1, data, regs);
} else
data->period = event->hw.last_period;
if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
return perf_swevent_overflow(event, 1, data, regs);
if (local64_add_negative(nr, &hwc->period_left))
return;
perf_swevent_overflow(event, 0, data, regs);
}
static int perf_exclude_event(struct perf_event *event,
struct pt_regs *regs)
{
if (event->hw.state & PERF_HES_STOPPED)
return 1;
if (regs) {
if (event->attr.exclude_user && user_mode(regs))
return 1;
if (event->attr.exclude_kernel && !user_mode(regs))
return 1;
}
return 0;
}
static int perf_swevent_match(struct perf_event *event,
enum perf_type_id type,
u32 event_id,
struct perf_sample_data *data,
struct pt_regs *regs)
{
if (event->attr.type != type)
return 0;
if (event->attr.config != event_id)
return 0;
if (perf_exclude_event(event, regs))
return 0;
return 1;
}
static inline u64 swevent_hash(u64 type, u32 event_id)
{
u64 val = event_id | (type << 32);
return hash_64(val, SWEVENT_HLIST_BITS);
}
static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
{
u64 hash = swevent_hash(type, event_id);
return &hlist->heads[hash];
}
/* For the read side: events when they trigger */
static inline struct hlist_head *
find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
{
struct swevent_hlist *hlist;
hlist = rcu_dereference(swhash->swevent_hlist);
if (!hlist)
return NULL;
return __find_swevent_head(hlist, type, event_id);
}
/* For the event head insertion and removal in the hlist */
static inline struct hlist_head *
find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
{
struct swevent_hlist *hlist;
u32 event_id = event->attr.config;
u64 type = event->attr.type;
/*
* Event scheduling is always serialized against hlist allocation
* and release. Which makes the protected version suitable here.
* The context lock guarantees that.
*/
hlist = rcu_dereference_protected(swhash->swevent_hlist,
lockdep_is_held(&event->ctx->lock));
if (!hlist)
return NULL;
return __find_swevent_head(hlist, type, event_id);
}
static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
u64 nr,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
struct perf_event *event;
struct hlist_head *head;
rcu_read_lock();
head = find_swevent_head_rcu(swhash, type, event_id);
if (!head)
goto end;
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_swevent_match(event, type, event_id, data, regs))
perf_swevent_event(event, nr, data, regs);
}
end:
rcu_read_unlock();
}
int perf_swevent_get_recursion_context(void)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
return get_recursion_context(swhash->recursion);
}
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
inline void perf_swevent_put_recursion_context(int rctx)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
put_recursion_context(swhash->recursion, rctx);
}
void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data;
int rctx;
preempt_disable_notrace();
rctx = perf_swevent_get_recursion_context();
if (rctx < 0)
return;
perf_sample_data_init(&data, addr, 0);
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
perf_swevent_put_recursion_context(rctx);
preempt_enable_notrace();
}
static void perf_swevent_read(struct perf_event *event)
{
}
static int perf_swevent_add(struct perf_event *event, int flags)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
struct hw_perf_event *hwc = &event->hw;
struct hlist_head *head;
if (is_sampling_event(event)) {
hwc->last_period = hwc->sample_period;
perf_swevent_set_period(event);
}
hwc->state = !(flags & PERF_EF_START);
head = find_swevent_head(swhash, event);
if (WARN_ON_ONCE(!head))
return -EINVAL;
hlist_add_head_rcu(&event->hlist_entry, head);
return 0;
}
static void perf_swevent_del(struct perf_event *event, int flags)
{
hlist_del_rcu(&event->hlist_entry);
}
static void perf_swevent_start(struct perf_event *event, int flags)
{
event->hw.state = 0;
}
static void perf_swevent_stop(struct perf_event *event, int flags)
{
event->hw.state = PERF_HES_STOPPED;
}
/* Deref the hlist from the update side */
static inline struct swevent_hlist *
swevent_hlist_deref(struct swevent_htable *swhash)
{
return rcu_dereference_protected(swhash->swevent_hlist,
lockdep_is_held(&swhash->hlist_mutex));
}
static void swevent_hlist_release(struct swevent_htable *swhash)
{
struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
if (!hlist)
return;
rcu_assign_pointer(swhash->swevent_hlist, NULL);
kfree_rcu(hlist, rcu_head);
}
static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
if (!--swhash->hlist_refcount)
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
}
static void swevent_hlist_put(struct perf_event *event)
{
int cpu;
if (event->cpu != -1) {
swevent_hlist_put_cpu(event, event->cpu);
return;
}
for_each_possible_cpu(cpu)
swevent_hlist_put_cpu(event, cpu);
}
static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
int err = 0;
mutex_lock(&swhash->hlist_mutex);
if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
struct swevent_hlist *hlist;
hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
if (!hlist) {
err = -ENOMEM;
goto exit;
}
rcu_assign_pointer(swhash->swevent_hlist, hlist);
}
swhash->hlist_refcount++;
exit:
mutex_unlock(&swhash->hlist_mutex);
return err;
}
static int swevent_hlist_get(struct perf_event *event)
{
int err;
int cpu, failed_cpu;
if (event->cpu != -1)
return swevent_hlist_get_cpu(event, event->cpu);
get_online_cpus();
for_each_possible_cpu(cpu) {
err = swevent_hlist_get_cpu(event, cpu);
if (err) {
failed_cpu = cpu;
goto fail;
}
}
put_online_cpus();
return 0;
fail:
for_each_possible_cpu(cpu) {
if (cpu == failed_cpu)
break;
swevent_hlist_put_cpu(event, cpu);
}
put_online_cpus();
return err;
}
struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
static void sw_perf_event_destroy(struct perf_event *event)
{
u64 event_id = event->attr.config;
WARN_ON(event->parent);
static_key_slow_dec(&perf_swevent_enabled[event_id]);
swevent_hlist_put(event);
}
static int perf_swevent_init(struct perf_event *event)
{
u64 event_id = event->attr.config;
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
switch (event_id) {
case PERF_COUNT_SW_CPU_CLOCK:
case PERF_COUNT_SW_TASK_CLOCK:
return -ENOENT;
default:
break;
}
if (event_id >= PERF_COUNT_SW_MAX)
return -ENOENT;
if (!event->parent) {
int err;
err = swevent_hlist_get(event);
if (err)
return err;
static_key_slow_inc(&perf_swevent_enabled[event_id]);
event->destroy = sw_perf_event_destroy;
}
return 0;
}
static int perf_swevent_event_idx(struct perf_event *event)
{
return 0;
}
static struct pmu perf_swevent = {
.task_ctx_nr = perf_sw_context,
.event_init = perf_swevent_init,
.add = perf_swevent_add,
.del = perf_swevent_del,
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
.event_idx = perf_swevent_event_idx,
};
#ifdef CONFIG_EVENT_TRACING
static int perf_tp_filter_match(struct perf_event *event,
struct perf_sample_data *data)
{
void *record = data->raw->data;
if (likely(!event->filter) || filter_match_preds(event->filter, record))
return 1;
return 0;
}
static int perf_tp_event_match(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
if (event->hw.state & PERF_HES_STOPPED)
return 0;
/*
* All tracepoints are from kernel-space.
*/
if (event->attr.exclude_kernel)
return 0;
if (!perf_tp_filter_match(event, data))
return 0;
return 1;
}
void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
struct pt_regs *regs, struct hlist_head *head, int rctx,
struct task_struct *task)
{
struct perf_sample_data data;
struct perf_event *event;
struct perf_raw_record raw = {
.size = entry_size,
.data = record,
};
perf_sample_data_init(&data, addr, 0);
data.raw = &raw;
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
/*
* If we got specified a target task, also iterate its context and
* deliver this event there too.
*/
if (task && task != current) {
struct perf_event_context *ctx;
struct trace_entry *entry = record;
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
if (!ctx)
goto unlock;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->attr.type != PERF_TYPE_TRACEPOINT)
continue;
if (event->attr.config != entry->type)
continue;
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
unlock:
rcu_read_unlock();
}
perf_swevent_put_recursion_context(rctx);
}
EXPORT_SYMBOL_GPL(perf_tp_event);
static void tp_perf_event_destroy(struct perf_event *event)
{
perf_trace_destroy(event);
}
static int perf_tp_event_init(struct perf_event *event)
{
int err;
if (event->attr.type != PERF_TYPE_TRACEPOINT)
return -ENOENT;
/*
* no branch sampling for tracepoint events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
err = perf_trace_init(event);
if (err)
return err;
event->destroy = tp_perf_event_destroy;
return 0;
}
static struct pmu perf_tracepoint = {
.task_ctx_nr = perf_sw_context,
.event_init = perf_tp_event_init,
.add = perf_trace_add,
.del = perf_trace_del,
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
.event_idx = perf_swevent_event_idx,
};
static inline void perf_tp_register(void)
{
perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
}
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
char *filter_str;
int ret;
if (event->attr.type != PERF_TYPE_TRACEPOINT)
return -EINVAL;
filter_str = strndup_user(arg, PAGE_SIZE);
if (IS_ERR(filter_str))
return PTR_ERR(filter_str);
ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
kfree(filter_str);
return ret;
}
static void perf_event_free_filter(struct perf_event *event)
{
ftrace_profile_free_filter(event);
}
#else
static inline void perf_tp_register(void)
{
}
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
return -ENOENT;
}
static void perf_event_free_filter(struct perf_event *event)
{
}
#endif /* CONFIG_EVENT_TRACING */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
void perf_bp_event(struct perf_event *bp, void *data)
{
struct perf_sample_data sample;
struct pt_regs *regs = data;
perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
if (!bp->hw.state && !perf_exclude_event(bp, regs))
perf_swevent_event(bp, 1, &sample, regs);
}
#endif
/*
* hrtimer based swevent callback
*/
static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
{
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_sample_data data;
struct pt_regs *regs;
struct perf_event *event;
u64 period;
event = container_of(hrtimer, struct perf_event, hw.hrtimer);
if (event->state != PERF_EVENT_STATE_ACTIVE)
return HRTIMER_NORESTART;
event->pmu->read(event);
perf_sample_data_init(&data, 0, event->hw.last_period);
regs = get_irq_regs();
if (regs && !perf_exclude_event(event, regs)) {
if (!(event->attr.exclude_idle && is_idle_task(current)))
if (__perf_event_overflow(event, 1, &data, regs))
ret = HRTIMER_NORESTART;
}
period = max_t(u64, 10000, event->hw.sample_period);
hrtimer_forward_now(hrtimer, ns_to_ktime(period));
return ret;
}
static void perf_swevent_start_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
s64 period;
if (!is_sampling_event(event))
return;
period = local64_read(&hwc->period_left);
if (period) {
if (period < 0)
period = 10000;
local64_set(&hwc->period_left, 0);
} else {
period = max_t(u64, 10000, hwc->sample_period);
}
__hrtimer_start_range_ns(&hwc->hrtimer,
ns_to_ktime(period), 0,
HRTIMER_MODE_REL_PINNED, 0);
}
static void perf_swevent_cancel_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (is_sampling_event(event)) {
ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
local64_set(&hwc->period_left, ktime_to_ns(remaining));
hrtimer_cancel(&hwc->hrtimer);
}
}
static void perf_swevent_init_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (!is_sampling_event(event))
return;
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
/*
* Since hrtimers have a fixed rate, we can do a static freq->period
* mapping and avoid the whole period adjust feedback stuff.
*/
if (event->attr.freq) {
long freq = event->attr.sample_freq;
event->attr.sample_period = NSEC_PER_SEC / freq;
hwc->sample_period = event->attr.sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
hwc->last_period = hwc->sample_period;
event->attr.freq = 0;
}
}
/*
* Software event: cpu wall time clock
*/
static void cpu_clock_event_update(struct perf_event *event)
{
s64 prev;
u64 now;
now = local_clock();
prev = local64_xchg(&event->hw.prev_count, now);
local64_add(now - prev, &event->count);
}
static void cpu_clock_event_start(struct perf_event *event, int flags)
{
local64_set(&event->hw.prev_count, local_clock());
perf_swevent_start_hrtimer(event);
}
static void cpu_clock_event_stop(struct perf_event *event, int flags)
{
perf_swevent_cancel_hrtimer(event);
cpu_clock_event_update(event);
}
static int cpu_clock_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
cpu_clock_event_start(event, flags);
return 0;
}
static void cpu_clock_event_del(struct perf_event *event, int flags)
{
cpu_clock_event_stop(event, flags);
}
static void cpu_clock_event_read(struct perf_event *event)
{
cpu_clock_event_update(event);
}
static int cpu_clock_event_init(struct perf_event *event)
{
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
perf_swevent_init_hrtimer(event);
return 0;
}
static struct pmu perf_cpu_clock = {
.task_ctx_nr = perf_sw_context,
.event_init = cpu_clock_event_init,
.add = cpu_clock_event_add,
.del = cpu_clock_event_del,
.start = cpu_clock_event_start,
.stop = cpu_clock_event_stop,
.read = cpu_clock_event_read,
.event_idx = perf_swevent_event_idx,
};
/*
* Software event: task time clock
*/
static void task_clock_event_update(struct perf_event *event, u64 now)
{
u64 prev;
s64 delta;
prev = local64_xchg(&event->hw.prev_count, now);
delta = now - prev;
local64_add(delta, &event->count);
}
static void task_clock_event_start(struct perf_event *event, int flags)
{
local64_set(&event->hw.prev_count, event->ctx->time);
perf_swevent_start_hrtimer(event);
}
static void task_clock_event_stop(struct perf_event *event, int flags)
{
perf_swevent_cancel_hrtimer(event);
task_clock_event_update(event, event->ctx->time);
}
static int task_clock_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
task_clock_event_start(event, flags);
return 0;
}
static void task_clock_event_del(struct perf_event *event, int flags)
{
task_clock_event_stop(event, PERF_EF_UPDATE);
}
static void task_clock_event_read(struct perf_event *event)
{
u64 now = perf_clock();
u64 delta = now - event->ctx->timestamp;
u64 time = event->ctx->time + delta;
task_clock_event_update(event, time);
}
static int task_clock_event_init(struct perf_event *event)
{
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
perf_swevent_init_hrtimer(event);
return 0;
}
static struct pmu perf_task_clock = {
.task_ctx_nr = perf_sw_context,
.event_init = task_clock_event_init,
.add = task_clock_event_add,
.del = task_clock_event_del,
.start = task_clock_event_start,
.stop = task_clock_event_stop,
.read = task_clock_event_read,
.event_idx = perf_swevent_event_idx,
};
static void perf_pmu_nop_void(struct pmu *pmu)
{
}
static int perf_pmu_nop_int(struct pmu *pmu)
{
return 0;
}
static void perf_pmu_start_txn(struct pmu *pmu)
{
perf_pmu_disable(pmu);
}
static int perf_pmu_commit_txn(struct pmu *pmu)
{
perf_pmu_enable(pmu);
return 0;
}
static void perf_pmu_cancel_txn(struct pmu *pmu)
{
perf_pmu_enable(pmu);
}
static int perf_event_idx_default(struct perf_event *event)
{
return event->hw.idx + 1;
}
/*
* Ensures all contexts with the same task_ctx_nr have the same
* pmu_cpu_context too.
*/
static void *find_pmu_context(int ctxn)
{
struct pmu *pmu;
if (ctxn < 0)
return NULL;
list_for_each_entry(pmu, &pmus, entry) {
if (pmu->task_ctx_nr == ctxn)
return pmu->pmu_cpu_context;
}
return NULL;
}
static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
{
int cpu;
for_each_possible_cpu(cpu) {
struct perf_cpu_context *cpuctx;
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
if (cpuctx->unique_pmu == old_pmu)
cpuctx->unique_pmu = pmu;
}
}
static void free_pmu_context(struct pmu *pmu)
{
struct pmu *i;
mutex_lock(&pmus_lock);
/*
* Like a real lame refcount.
*/
list_for_each_entry(i, &pmus, entry) {
if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
update_pmu_context(i, pmu);
goto out;
}
}
free_percpu(pmu->pmu_cpu_context);
out:
mutex_unlock(&pmus_lock);
}
static struct idr pmu_idr;
static ssize_t
type_show(struct device *dev, struct device_attribute *attr, char *page)
{
struct pmu *pmu = dev_get_drvdata(dev);
return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
}
static struct device_attribute pmu_dev_attrs[] = {
__ATTR_RO(type),
__ATTR_NULL,
};
static int pmu_bus_running;
static struct bus_type pmu_bus = {
.name = "event_source",
.dev_attrs = pmu_dev_attrs,
};
static void pmu_dev_release(struct device *dev)
{
kfree(dev);
}
static int pmu_dev_alloc(struct pmu *pmu)
{
int ret = -ENOMEM;
pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!pmu->dev)
goto out;
pmu->dev->groups = pmu->attr_groups;
device_initialize(pmu->dev);
ret = dev_set_name(pmu->dev, "%s", pmu->name);
if (ret)
goto free_dev;
dev_set_drvdata(pmu->dev, pmu);
pmu->dev->bus = &pmu_bus;
pmu->dev->release = pmu_dev_release;
ret = device_add(pmu->dev);
if (ret)
goto free_dev;
out:
return ret;
free_dev:
put_device(pmu->dev);
goto out;
}
static struct lock_class_key cpuctx_mutex;
static struct lock_class_key cpuctx_lock;
int perf_pmu_register(struct pmu *pmu, char *name, int type)
{
int cpu, ret;
mutex_lock(&pmus_lock);
ret = -ENOMEM;
pmu->pmu_disable_count = alloc_percpu(int);
if (!pmu->pmu_disable_count)
goto unlock;
pmu->type = -1;
if (!name)
goto skip_type;
pmu->name = name;
if (type < 0) {
type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
if (type < 0) {
ret = type;
goto free_pdc;
}
}
pmu->type = type;
if (pmu_bus_running) {
ret = pmu_dev_alloc(pmu);
if (ret)
goto free_idr;
}
skip_type:
pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
if (pmu->pmu_cpu_context)
goto got_cpu_context;
ret = -ENOMEM;
pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
if (!pmu->pmu_cpu_context)
goto free_dev;
for_each_possible_cpu(cpu) {
struct perf_cpu_context *cpuctx;
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
__perf_event_init_context(&cpuctx->ctx);
lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
cpuctx->ctx.type = cpu_context;
cpuctx->ctx.pmu = pmu;
cpuctx->jiffies_interval = 1;
INIT_LIST_HEAD(&cpuctx->rotation_list);
cpuctx->unique_pmu = pmu;
}
got_cpu_context:
if (!pmu->start_txn) {
if (pmu->pmu_enable) {
/*
* If we have pmu_enable/pmu_disable calls, install
* transaction stubs that use that to try and batch
* hardware accesses.
*/
pmu->start_txn = perf_pmu_start_txn;
pmu->commit_txn = perf_pmu_commit_txn;
pmu->cancel_txn = perf_pmu_cancel_txn;
} else {
pmu->start_txn = perf_pmu_nop_void;
pmu->commit_txn = perf_pmu_nop_int;
pmu->cancel_txn = perf_pmu_nop_void;
}
}
if (!pmu->pmu_enable) {
pmu->pmu_enable = perf_pmu_nop_void;
pmu->pmu_disable = perf_pmu_nop_void;
}
if (!pmu->event_idx)
pmu->event_idx = perf_event_idx_default;
list_add_rcu(&pmu->entry, &pmus);
ret = 0;
unlock:
mutex_unlock(&pmus_lock);
return ret;
free_dev:
device_del(pmu->dev);
put_device(pmu->dev);
free_idr:
if (pmu->type >= PERF_TYPE_MAX)
idr_remove(&pmu_idr, pmu->type);
free_pdc:
free_percpu(pmu->pmu_disable_count);
goto unlock;
}
void perf_pmu_unregister(struct pmu *pmu)
{
mutex_lock(&pmus_lock);
list_del_rcu(&pmu->entry);
mutex_unlock(&pmus_lock);
/*
* We dereference the pmu list under both SRCU and regular RCU, so
* synchronize against both of those.
*/
synchronize_srcu(&pmus_srcu);
synchronize_rcu();
free_percpu(pmu->pmu_disable_count);
if (pmu->type >= PERF_TYPE_MAX)
idr_remove(&pmu_idr, pmu->type);
device_del(pmu->dev);
put_device(pmu->dev);
free_pmu_context(pmu);
}
struct pmu *perf_init_event(struct perf_event *event)
{
struct pmu *pmu = NULL;
int idx;
int ret;
idx = srcu_read_lock(&pmus_srcu);
rcu_read_lock();
pmu = idr_find(&pmu_idr, event->attr.type);
rcu_read_unlock();
if (pmu) {
event->pmu = pmu;
ret = pmu->event_init(event);
if (ret)
pmu = ERR_PTR(ret);
goto unlock;
}
list_for_each_entry_rcu(pmu, &pmus, entry) {
event->pmu = pmu;
ret = pmu->event_init(event);
if (!ret)
goto unlock;
if (ret != -ENOENT) {
pmu = ERR_PTR(ret);
goto unlock;
}
}
pmu = ERR_PTR(-ENOENT);
unlock:
srcu_read_unlock(&pmus_srcu, idx);
return pmu;
}
/*
* Allocate and initialize a event structure
*/
static struct perf_event *
perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
struct perf_event *group_leader,
struct perf_event *parent_event,
perf_overflow_handler_t overflow_handler,
void *context)
{
struct pmu *pmu;
struct perf_event *event;
struct hw_perf_event *hwc;
long err;
if ((unsigned)cpu >= nr_cpu_ids) {
if (!task || cpu != -1)
return ERR_PTR(-EINVAL);
}
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return ERR_PTR(-ENOMEM);
/*
* Single events are their own group leaders, with an
* empty sibling list:
*/
if (!group_leader)
group_leader = event;
mutex_init(&event->child_mutex);
INIT_LIST_HEAD(&event->child_list);
INIT_LIST_HEAD(&event->group_entry);
INIT_LIST_HEAD(&event->event_entry);
INIT_LIST_HEAD(&event->sibling_list);
INIT_LIST_HEAD(&event->rb_entry);
init_waitqueue_head(&event->waitq);
init_irq_work(&event->pending, perf_pending_event);
mutex_init(&event->mmap_mutex);
atomic_long_set(&event->refcount, 1);
event->cpu = cpu;
event->attr = *attr;
event->group_leader = group_leader;
event->pmu = NULL;
event->oncpu = -1;
event->parent = parent_event;
event->ns = get_pid_ns(task_active_pid_ns(current));
event->id = atomic64_inc_return(&perf_event_id);
event->state = PERF_EVENT_STATE_INACTIVE;
if (task) {
event->attach_state = PERF_ATTACH_TASK;
if (attr->type == PERF_TYPE_TRACEPOINT)
event->hw.tp_target = task;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
* hw_breakpoint is a bit difficult here..
*/
else if (attr->type == PERF_TYPE_BREAKPOINT)
event->hw.bp_target = task;
#endif
}
if (!overflow_handler && parent_event) {
overflow_handler = parent_event->overflow_handler;
context = parent_event->overflow_handler_context;
}
event->overflow_handler = overflow_handler;
event->overflow_handler_context = context;
perf_event__state_init(event);
pmu = NULL;
hwc = &event->hw;
hwc->sample_period = attr->sample_period;
if (attr->freq && attr->sample_freq)
hwc->sample_period = 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
/*
* we currently do not support PERF_FORMAT_GROUP on inherited events
*/
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
goto done;
pmu = perf_init_event(event);
done:
err = 0;
if (!pmu)
err = -EINVAL;
else if (IS_ERR(pmu))
err = PTR_ERR(pmu);
if (err) {
if (event->ns)
put_pid_ns(event->ns);
kfree(event);
return ERR_PTR(err);
}
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_inc(&perf_sched_events.key);
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
if (event->attr.comm)
atomic_inc(&nr_comm_events);
if (event->attr.task)
atomic_inc(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
err = get_callchain_buffers();
if (err) {
free_event(event);
return ERR_PTR(err);
}
}
if (has_branch_stack(event)) {
static_key_slow_inc(&perf_sched_events.key);
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_inc(&per_cpu(perf_branch_stack_events,
event->cpu));
}
}
return event;
}
static int perf_copy_attr(struct perf_event_attr __user *uattr,
struct perf_event_attr *attr)
{
u32 size;
int ret;
if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
return -EFAULT;
/*
* zero the full structure, so that a short copy will be nice.
*/
memset(attr, 0, sizeof(*attr));
ret = get_user(size, &uattr->size);
if (ret)
return ret;
if (size > PAGE_SIZE) /* silly large */
goto err_size;
if (!size) /* abi compat */
size = PERF_ATTR_SIZE_VER0;
if (size < PERF_ATTR_SIZE_VER0)
goto err_size;
/*
* If we're handed a bigger struct than we know of,
* ensure all the unknown bits are 0 - i.e. new
* user-space does not rely on any kernel feature
* extensions we dont know about yet.
*/
if (size > sizeof(*attr)) {
unsigned char __user *addr;
unsigned char __user *end;
unsigned char val;
addr = (void __user *)uattr + sizeof(*attr);
end = (void __user *)uattr + size;
for (; addr < end; addr++) {
ret = get_user(val, addr);
if (ret)
return ret;
if (val)
goto err_size;
}
size = sizeof(*attr);
}
ret = copy_from_user(attr, uattr, size);
if (ret)
return -EFAULT;
if (attr->__reserved_1)
return -EINVAL;
if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
return -EINVAL;
if (attr->read_format & ~(PERF_FORMAT_MAX-1))
return -EINVAL;
if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
u64 mask = attr->branch_sample_type;
/* only using defined bits */
if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
return -EINVAL;
/* at least one branch bit must be set */
if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
return -EINVAL;
/* kernel level capture: check permissions */
if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
&& perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EACCES;
/* propagate priv level, when not set for branch */
if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
/* exclude_kernel checked on syscall entry */
if (!attr->exclude_kernel)
mask |= PERF_SAMPLE_BRANCH_KERNEL;
if (!attr->exclude_user)
mask |= PERF_SAMPLE_BRANCH_USER;
if (!attr->exclude_hv)
mask |= PERF_SAMPLE_BRANCH_HV;
/*
* adjust user setting (for HW filter setup)
*/
attr->branch_sample_type = mask;
}
}
if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
ret = perf_reg_validate(attr->sample_regs_user);
if (ret)
return ret;
}
if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
if (!arch_perf_have_user_stack_dump())
return -ENOSYS;
/*
* We have __u32 type for the size, but so far
* we can only use __u16 as maximum due to the
* __u16 sample size limit.
*/
if (attr->sample_stack_user >= USHRT_MAX)
ret = -EINVAL;
else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
ret = -EINVAL;
}
out:
return ret;
err_size:
put_user(sizeof(*attr), &uattr->size);
ret = -E2BIG;
goto out;
}
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{
struct ring_buffer *rb = NULL, *old_rb = NULL;
int ret = -EINVAL;
if (!output_event)
goto set;
/* don't allow circular references */
if (event == output_event)
goto out;
/*
* Don't allow cross-cpu buffers
*/
if (output_event->cpu != event->cpu)
goto out;
/*
* If its not a per-cpu rb, it must be the same task.
*/
if (output_event->cpu == -1 && output_event->ctx != event->ctx)
goto out;
set:
mutex_lock(&event->mmap_mutex);
/* Can't redirect output if we've got an active mmap() */
if (atomic_read(&event->mmap_count))
goto unlock;
if (output_event) {
/* get the rb we want to redirect to */
rb = ring_buffer_get(output_event);
if (!rb)
goto unlock;
}
old_rb = event->rb;
rcu_assign_pointer(event->rb, rb);
if (old_rb)
ring_buffer_detach(event, old_rb);
ret = 0;
unlock:
mutex_unlock(&event->mmap_mutex);
if (old_rb)
ring_buffer_put(old_rb);
out:
return ret;
}
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
* @attr_uptr: event_id type attributes for monitoring/sampling
* @pid: target pid
* @cpu: target cpu
* @group_fd: group leader event fd
*/
SYSCALL_DEFINE5(perf_event_open,
struct perf_event_attr __user *, attr_uptr,
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
{
struct perf_event *group_leader = NULL, *output_event = NULL;
struct perf_event *event, *sibling;
struct perf_event_attr attr;
struct perf_event_context *ctx;
struct file *event_file = NULL;
struct fd group = {NULL, 0};
struct task_struct *task = NULL;
struct pmu *pmu;
int event_fd;
int move_group = 0;
int err;
/* for future expandability... */
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
if (!attr.exclude_kernel) {
if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EACCES;
}
if (attr.freq) {
if (attr.sample_freq > sysctl_perf_event_sample_rate)
return -EINVAL;
}
/*
* In cgroup mode, the pid argument is used to pass the fd
* opened to the cgroup directory in cgroupfs. The cpu argument
* designates the cpu on which to monitor threads from that
* cgroup.
*/
if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
return -EINVAL;
event_fd = get_unused_fd();
if (event_fd < 0)
return event_fd;
if (group_fd != -1) {
err = perf_fget_light(group_fd, &group);
if (err)
goto err_fd;
group_leader = group.file->private_data;
if (flags & PERF_FLAG_FD_OUTPUT)
output_event = group_leader;
if (flags & PERF_FLAG_FD_NO_GROUP)
group_leader = NULL;
}
if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
task = find_lively_task_by_vpid(pid);
if (IS_ERR(task)) {
err = PTR_ERR(task);
goto err_group_fd;
}
}
get_online_cpus();
event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
NULL, NULL);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err_task;
}
if (flags & PERF_FLAG_PID_CGROUP) {
err = perf_cgroup_connect(pid, event, &attr, group_leader);
if (err)
goto err_alloc;
/*
* one more event:
* - that has cgroup constraint on event->cpu
* - that may need work on context switch
*/
atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_inc(&perf_sched_events.key);
}
/*
* Special case software events and allow them to be part of
* any hardware group.
*/
pmu = event->pmu;
if (group_leader &&
(is_software_event(event) != is_software_event(group_leader))) {
if (is_software_event(event)) {
/*
* If event and group_leader are not both a software
* event, and event is, then group leader is not.
*
* Allow the addition of software events to !software
* groups, this is safe because software events never
* fail to schedule.
*/
pmu = group_leader->pmu;
} else if (is_software_event(group_leader) &&
(group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
/*
* In case the group is a pure software group, and we
* try to add a hardware event, move the whole group to
* the hardware context.
*/
move_group = 1;
}
}
/*
* Get the target context (task or percpu):
*/
ctx = find_get_context(pmu, task, event->cpu);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_alloc;
}
if (task) {
put_task_struct(task);
task = NULL;
}
/*
* Look up the group leader (we will attach this event to it):
*/
if (group_leader) {
err = -EINVAL;
/*
* Do not allow a recursive hierarchy (this new sibling
* becoming part of another group-sibling):
*/
if (group_leader->group_leader != group_leader)
goto err_context;
/*
* Do not allow to attach to a group in a different
* task or CPU context:
*/
if (move_group) {
if (group_leader->ctx->type != ctx->type)
goto err_context;
} else {
if (group_leader->ctx != ctx)
goto err_context;
}
/*
* Only a group leader can be exclusive or pinned
*/
if (attr.exclusive || attr.pinned)
goto err_context;
}
if (output_event) {
err = perf_event_set_output(event, output_event);
if (err)
goto err_context;
}
event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
if (IS_ERR(event_file)) {
err = PTR_ERR(event_file);
goto err_context;
}
if (move_group) {
struct perf_event_context *gctx = group_leader->ctx;
mutex_lock(&gctx->mutex);
perf_remove_from_context(group_leader);
/*
* Removing from the context ends up with disabled
* event. What we want here is event in the initial
* startup state, ready to be add into new context.
*/
perf_event__state_init(group_leader);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_remove_from_context(sibling);
perf_event__state_init(sibling);
put_ctx(gctx);
}
mutex_unlock(&gctx->mutex);
put_ctx(gctx);
}
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
if (move_group) {
synchronize_rcu();
perf_install_in_context(ctx, group_leader, event->cpu);
get_ctx(ctx);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_install_in_context(ctx, sibling, event->cpu);
get_ctx(ctx);
}
}
perf_install_in_context(ctx, event, event->cpu);
++ctx->generation;
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
put_online_cpus();
event->owner = current;
mutex_lock(¤t->perf_event_mutex);
list_add_tail(&event->owner_entry, ¤t->perf_event_list);
mutex_unlock(¤t->perf_event_mutex);
/*
* Precalculate sample_data sizes
*/
perf_event__header_size(event);
perf_event__id_header_size(event);
/*
* Drop the reference on the group_event after placing the
* new event on the sibling_list. This ensures destruction
* of the group leader will find the pointer to itself in
* perf_group_detach().
*/
fdput(group);
fd_install(event_fd, event_file);
return event_fd;
err_context:
perf_unpin_context(ctx);
put_ctx(ctx);
err_alloc:
free_event(event);
err_task:
put_online_cpus();
if (task)
put_task_struct(task);
err_group_fd:
fdput(group);
err_fd:
put_unused_fd(event_fd);
return err;
}
/**
* perf_event_create_kernel_counter
*
* @attr: attributes of the counter to create
* @cpu: cpu in which the counter is bound
* @task: task to profile (NULL for percpu)
*/
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
perf_overflow_handler_t overflow_handler,
void *context)
{
struct perf_event_context *ctx;
struct perf_event *event;
int err;
/*
* Get the target context (task or percpu):
*/
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
overflow_handler, context);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err;
}
ctx = find_get_context(event->pmu, task, cpu);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_free;
}
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
perf_install_in_context(ctx, event, cpu);
++ctx->generation;
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
return event;
err_free:
free_event(event);
err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
{
struct perf_event_context *src_ctx;
struct perf_event_context *dst_ctx;
struct perf_event *event, *tmp;
LIST_HEAD(events);
src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
mutex_lock(&src_ctx->mutex);
list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
event_entry) {
perf_remove_from_context(event);
put_ctx(src_ctx);
list_add(&event->event_entry, &events);
}
mutex_unlock(&src_ctx->mutex);
synchronize_rcu();
mutex_lock(&dst_ctx->mutex);
list_for_each_entry_safe(event, tmp, &events, event_entry) {
list_del(&event->event_entry);
if (event->state >= PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_INACTIVE;
perf_install_in_context(dst_ctx, event, dst_cpu);
get_ctx(dst_ctx);
}
mutex_unlock(&dst_ctx->mutex);
}
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
static void sync_child_event(struct perf_event *child_event,
struct task_struct *child)
{
struct perf_event *parent_event = child_event->parent;
u64 child_val;
if (child_event->attr.inherit_stat)
perf_event_read_event(child_event, child);
child_val = perf_event_count(child_event);
/*
* Add back the child's count to the parent's count:
*/
atomic64_add(child_val, &parent_event->child_count);
atomic64_add(child_event->total_time_enabled,
&parent_event->child_total_time_enabled);
atomic64_add(child_event->total_time_running,
&parent_event->child_total_time_running);
/*
* Remove this event from the parent's list
*/
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
mutex_lock(&parent_event->child_mutex);
list_del_init(&child_event->child_list);
mutex_unlock(&parent_event->child_mutex);
/*
* Release the parent event, if this was the last
* reference to it.
*/
put_event(parent_event);
}
static void
__perf_event_exit_task(struct perf_event *child_event,
struct perf_event_context *child_ctx,
struct task_struct *child)
{
if (child_event->parent) {
raw_spin_lock_irq(&child_ctx->lock);
perf_group_detach(child_event);
raw_spin_unlock_irq(&child_ctx->lock);
}
perf_remove_from_context(child_event);
/*
* It can happen that the parent exits first, and has events
* that are still around due to the child reference. These
* events need to be zapped.
*/
if (child_event->parent) {
sync_child_event(child_event, child);
free_event(child_event);
}
}
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
{
struct perf_event *child_event, *tmp;
struct perf_event_context *child_ctx;
unsigned long flags;
if (likely(!child->perf_event_ctxp[ctxn])) {
perf_event_task(child, NULL, 0);
return;
}
local_irq_save(flags);
/*
* We can't reschedule here because interrupts are disabled,
* and either child is current or it is a task that can't be
* scheduled, so we are now safe from rescheduling changing
* our context.
*/
child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
/*
* Take the context lock here so that if find_get_context is
* reading child->perf_event_ctxp, we wait until it has
* incremented the context's refcount before we do put_ctx below.
*/
raw_spin_lock(&child_ctx->lock);
task_ctx_sched_out(child_ctx);
child->perf_event_ctxp[ctxn] = NULL;
/*
* If this context is a clone; unclone it so it can't get
* swapped to another process while we're removing all
* the events from it.
*/
unclone_ctx(child_ctx);
update_context_time(child_ctx);
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Report the task dead after unscheduling the events so that we
* won't get any samples after PERF_RECORD_EXIT. We can however still
* get a few PERF_RECORD_READ events.
*/
perf_event_task(child, child_ctx, 0);
/*
* We can recurse on the same lock type through:
*
* __perf_event_exit_task()
* sync_child_event()
* put_event()
* mutex_lock(&ctx->mutex)
*
* But since its the parent context it won't be the same instance.
*/
mutex_lock(&child_ctx->mutex);
again:
list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
group_entry)
__perf_event_exit_task(child_event, child_ctx, child);
list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
group_entry)
__perf_event_exit_task(child_event, child_ctx, child);
/*
* If the last event was a group event, it will have appended all
* its siblings to the list, but we obtained 'tmp' before that which
* will still point to the list head terminating the iteration.
*/
if (!list_empty(&child_ctx->pinned_groups) ||
!list_empty(&child_ctx->flexible_groups))
goto again;
mutex_unlock(&child_ctx->mutex);
put_ctx(child_ctx);
}
/*
* When a child task exits, feed back event values to parent events.
*/
void perf_event_exit_task(struct task_struct *child)
{
struct perf_event *event, *tmp;
int ctxn;
mutex_lock(&child->perf_event_mutex);
list_for_each_entry_safe(event, tmp, &child->perf_event_list,
owner_entry) {
list_del_init(&event->owner_entry);
/*
* Ensure the list deletion is visible before we clear
* the owner, closes a race against perf_release() where
* we need to serialize on the owner->perf_event_mutex.
*/
smp_wmb();
event->owner = NULL;
}
mutex_unlock(&child->perf_event_mutex);
for_each_task_context_nr(ctxn)
perf_event_exit_task_context(child, ctxn);
}
static void perf_free_event(struct perf_event *event,
struct perf_event_context *ctx)
{
struct perf_event *parent = event->parent;
if (WARN_ON_ONCE(!parent))
return;
mutex_lock(&parent->child_mutex);
list_del_init(&event->child_list);
mutex_unlock(&parent->child_mutex);
put_event(parent);
perf_group_detach(event);
list_del_event(event, ctx);
free_event(event);
}
/*
* free an unexposed, unused context as created by inheritance by
* perf_event_init_task below, used by fork() in case of fail.
*/
void perf_event_free_task(struct task_struct *task)
{
struct perf_event_context *ctx;
struct perf_event *event, *tmp;
int ctxn;
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (!ctx)
continue;
mutex_lock(&ctx->mutex);
again:
list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
group_entry)
perf_free_event(event, ctx);
list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
group_entry)
perf_free_event(event, ctx);
if (!list_empty(&ctx->pinned_groups) ||
!list_empty(&ctx->flexible_groups))
goto again;
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
}
}
void perf_event_delayed_put(struct task_struct *task)
{
int ctxn;
for_each_task_context_nr(ctxn)
WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
}
/*
* inherit a event from parent task to child task:
*/
static struct perf_event *
inherit_event(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event *group_leader,
struct perf_event_context *child_ctx)
{
struct perf_event *child_event;
unsigned long flags;
/*
* Instead of creating recursive hierarchies of events,
* we link inherited events back to the original parent,
* which has a filp for sure, which we use as the reference
* count:
*/
if (parent_event->parent)
parent_event = parent_event->parent;
child_event = perf_event_alloc(&parent_event->attr,
parent_event->cpu,
child,
group_leader, parent_event,
NULL, NULL);
if (IS_ERR(child_event))
return child_event;
if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
free_event(child_event);
return NULL;
}
get_ctx(child_ctx);
/*
* Make the child state follow the state of the parent event,
* not its attr.disabled bit. We hold the parent's mutex,
* so we won't race with perf_event_{en, dis}able_family.
*/
if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
child_event->state = PERF_EVENT_STATE_INACTIVE;
else
child_event->state = PERF_EVENT_STATE_OFF;
if (parent_event->attr.freq) {
u64 sample_period = parent_event->hw.sample_period;
struct hw_perf_event *hwc = &child_event->hw;
hwc->sample_period = sample_period;
hwc->last_period = sample_period;
local64_set(&hwc->period_left, sample_period);
}
child_event->ctx = child_ctx;
child_event->overflow_handler = parent_event->overflow_handler;
child_event->overflow_handler_context
= parent_event->overflow_handler_context;
/*
* Precalculate sample_data sizes
*/
perf_event__header_size(child_event);
perf_event__id_header_size(child_event);
/*
* Link it up in the child's context:
*/
raw_spin_lock_irqsave(&child_ctx->lock, flags);
add_event_to_ctx(child_event, child_ctx);
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Link this into the parent event's child list
*/
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
mutex_lock(&parent_event->child_mutex);
list_add_tail(&child_event->child_list, &parent_event->child_list);
mutex_unlock(&parent_event->child_mutex);
return child_event;
}
static int inherit_group(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event_context *child_ctx)
{
struct perf_event *leader;
struct perf_event *sub;
struct perf_event *child_ctr;
leader = inherit_event(parent_event, parent, parent_ctx,
child, NULL, child_ctx);
if (IS_ERR(leader))
return PTR_ERR(leader);
list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
child_ctr = inherit_event(sub, parent, parent_ctx,
child, leader, child_ctx);
if (IS_ERR(child_ctr))
return PTR_ERR(child_ctr);
}
return 0;
}
static int
inherit_task_group(struct perf_event *event, struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child, int ctxn,
int *inherited_all)
{
int ret;
struct perf_event_context *child_ctx;
if (!event->attr.inherit) {
*inherited_all = 0;
return 0;
}
child_ctx = child->perf_event_ctxp[ctxn];
if (!child_ctx) {
/*
* This is executed from the parent task context, so
* inherit events that have been marked for cloning.
* First allocate and initialize a context for the
* child.
*/
child_ctx = alloc_perf_context(event->pmu, child);
if (!child_ctx)
return -ENOMEM;
child->perf_event_ctxp[ctxn] = child_ctx;
}
ret = inherit_group(event, parent, parent_ctx,
child, child_ctx);
if (ret)
*inherited_all = 0;
return ret;
}
/*
* Initialize the perf_event context in task_struct
*/
int perf_event_init_context(struct task_struct *child, int ctxn)
{
struct perf_event_context *child_ctx, *parent_ctx;
struct perf_event_context *cloned_ctx;
struct perf_event *event;
struct task_struct *parent = current;
int inherited_all = 1;
unsigned long flags;
int ret = 0;
if (likely(!parent->perf_event_ctxp[ctxn]))
return 0;
/*
* If the parent's context is a clone, pin it so it won't get
* swapped under us.
*/
parent_ctx = perf_pin_task_context(parent, ctxn);
/*
* No need to check if parent_ctx != NULL here; since we saw
* it non-NULL earlier, the only reason for it to become NULL
* is if we exit, and since we're currently in the middle of
* a fork we can't be exiting at the same time.
*/
/*
* Lock the parent list. No need to lock the child - not PID
* hashed yet and not running, so nobody can access it.
*/
mutex_lock(&parent_ctx->mutex);
/*
* We dont have to disable NMIs - we are only looking at
* the list, not manipulating it:
*/
list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
/*
* We can't hold ctx->lock when iterating the ->flexible_group list due
* to allocations, but we need to prevent rotation because
* rotate_ctx() will change the list from interrupt context.
*/
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 1;
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 0;
child_ctx = child->perf_event_ctxp[ctxn];
if (child_ctx && inherited_all) {
/*
* Mark the child context as a clone of the parent
* context, or of whatever the parent is a clone of.
*
* Note that if the parent is a clone, the holding of
* parent_ctx->lock avoids it from being uncloned.
*/
cloned_ctx = parent_ctx->parent_ctx;
if (cloned_ctx) {
child_ctx->parent_ctx = cloned_ctx;
child_ctx->parent_gen = parent_ctx->parent_gen;
} else {
child_ctx->parent_ctx = parent_ctx;
child_ctx->parent_gen = parent_ctx->generation;
}
get_ctx(child_ctx->parent_ctx);
}
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
put_ctx(parent_ctx);
return ret;
}
/*
* Initialize the perf_event context in task_struct
*/
int perf_event_init_task(struct task_struct *child)
{
int ctxn, ret;
memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
mutex_init(&child->perf_event_mutex);
INIT_LIST_HEAD(&child->perf_event_list);
for_each_task_context_nr(ctxn) {
ret = perf_event_init_context(child, ctxn);
if (ret)
return ret;
}
return 0;
}
static void __init perf_event_init_all_cpus(void)
{
struct swevent_htable *swhash;
int cpu;
for_each_possible_cpu(cpu) {
swhash = &per_cpu(swevent_htable, cpu);
mutex_init(&swhash->hlist_mutex);
INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
}
}
static void __cpuinit perf_event_init_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
if (swhash->hlist_refcount > 0) {
struct swevent_hlist *hlist;
hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
WARN_ON(!hlist);
rcu_assign_pointer(swhash->swevent_hlist, hlist);
}
mutex_unlock(&swhash->hlist_mutex);
}
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
static void perf_pmu_rotate_stop(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
WARN_ON(!irqs_disabled());
list_del_init(&cpuctx->rotation_list);
}
static void __perf_event_exit_context(void *__info)
{
struct perf_event_context *ctx = __info;
struct perf_event *event, *tmp;
perf_pmu_rotate_stop(ctx->pmu);
list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
__perf_remove_from_context(event);
list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
__perf_remove_from_context(event);
}
static void perf_event_exit_cpu_context(int cpu)
{
struct perf_event_context *ctx;
struct pmu *pmu;
int idx;
idx = srcu_read_lock(&pmus_srcu);
list_for_each_entry_rcu(pmu, &pmus, entry) {
ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
mutex_lock(&ctx->mutex);
smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
mutex_unlock(&ctx->mutex);
}
srcu_read_unlock(&pmus_srcu, idx);
}
static void perf_event_exit_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
perf_event_exit_cpu_context(cpu);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
#endif
static int
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
{
int cpu;
for_each_online_cpu(cpu)
perf_event_exit_cpu(cpu);
return NOTIFY_OK;
}
/*
* Run the perf reboot notifier at the very last possible moment so that
* the generic watchdog code runs as long as possible.
*/
static struct notifier_block perf_reboot_notifier = {
.notifier_call = perf_reboot,
.priority = INT_MIN,
};
static int __cpuinit
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
case CPU_DOWN_FAILED:
perf_event_init_cpu(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DOWN_PREPARE:
perf_event_exit_cpu(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
void __init perf_event_init(void)
{
int ret;
idr_init(&pmu_idr);
perf_event_init_all_cpus();
init_srcu_struct(&pmus_srcu);
perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
perf_pmu_register(&perf_cpu_clock, NULL, -1);
perf_pmu_register(&perf_task_clock, NULL, -1);
perf_tp_register();
perf_cpu_notifier(perf_cpu_notify);
register_reboot_notifier(&perf_reboot_notifier);
ret = init_hw_breakpoint();
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
/* do not patch jump label more than once per second */
jump_label_rate_limit(&perf_sched_events, HZ);
/*
* Build time assertion that we keep the data_head at the intended
* location. IOW, validation we got the __reserved[] size right.
*/
BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
!= 1024);
}
static int __init perf_event_sysfs_init(void)
{
struct pmu *pmu;
int ret;
mutex_lock(&pmus_lock);
ret = bus_register(&pmu_bus);
if (ret)
goto unlock;
list_for_each_entry(pmu, &pmus, entry) {
if (!pmu->name || pmu->type < 0)
continue;
ret = pmu_dev_alloc(pmu);
WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
}
pmu_bus_running = 1;
ret = 0;
unlock:
mutex_unlock(&pmus_lock);
return ret;
}
device_initcall(perf_event_sysfs_init);
#ifdef CONFIG_CGROUP_PERF
static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
{
struct perf_cgroup *jc;
jc = kzalloc(sizeof(*jc), GFP_KERNEL);
if (!jc)
return ERR_PTR(-ENOMEM);
jc->info = alloc_percpu(struct perf_cgroup_info);
if (!jc->info) {
kfree(jc);
return ERR_PTR(-ENOMEM);
}
return &jc->css;
}
static void perf_cgroup_css_free(struct cgroup *cont)
{
struct perf_cgroup *jc;
jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
struct perf_cgroup, css);
free_percpu(jc->info);
kfree(jc);
}
static int __perf_cgroup_move(void *info)
{
struct task_struct *task = info;
perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
return 0;
}
static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
struct task_struct *task;
cgroup_taskset_for_each(task, cgrp, tset)
task_function_call(task, __perf_cgroup_move, task);
}
static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
struct task_struct *task)
{
/*
* cgroup_exit() is called in the copy_process() failure path.
* Ignore this case since the task hasn't ran yet, this avoids
* trying to poke a half freed task state from generic code.
*/
if (!(task->flags & PF_EXITING))
return;
task_function_call(task, __perf_cgroup_move, task);
}
struct cgroup_subsys perf_subsys = {
.name = "perf_event",
.subsys_id = perf_subsys_id,
.css_alloc = perf_cgroup_css_alloc,
.css_free = perf_cgroup_css_free,
.exit = perf_cgroup_exit,
.attach = perf_cgroup_attach,
/*
* perf_event cgroup doesn't handle nesting correctly.
* ctx->nr_cgroups adjustments should be propagated through the
* cgroup hierarchy. Fix it and remove the following.
*/
.broken_hierarchy = true,
};
#endif /* CONFIG_CGROUP_PERF */
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5640_0 |
crossvul-cpp_data_bad_3457_0 | /*
* arch/arm/kernel/sys_oabi-compat.c
*
* Compatibility wrappers for syscalls that are used from
* old ABI user space binaries with an EABI kernel.
*
* Author: Nicolas Pitre
* Created: Oct 7, 2005
* Copyright: MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* The legacy ABI and the new ARM EABI have different rules making some
* syscalls incompatible especially with structure arguments.
* Most notably, Eabi says 64-bit members should be 64-bit aligned instead of
* simply word aligned. EABI also pads structures to the size of the largest
* member it contains instead of the invariant 32-bit.
*
* The following syscalls are affected:
*
* sys_stat64:
* sys_lstat64:
* sys_fstat64:
* sys_fstatat64:
*
* struct stat64 has different sizes and some members are shifted
* Compatibility wrappers are needed for them and provided below.
*
* sys_fcntl64:
*
* struct flock64 has different sizes and some members are shifted
* A compatibility wrapper is needed and provided below.
*
* sys_statfs64:
* sys_fstatfs64:
*
* struct statfs64 has extra padding with EABI growing its size from
* 84 to 88. This struct is now __attribute__((packed,aligned(4)))
* with a small assembly wrapper to force the sz argument to 84 if it is 88
* to avoid copying the extra padding over user space unexpecting it.
*
* sys_newuname:
*
* struct new_utsname has no padding with EABI. No problem there.
*
* sys_epoll_ctl:
* sys_epoll_wait:
*
* struct epoll_event has its second member shifted also affecting the
* structure size. Compatibility wrappers are needed and provided below.
*
* sys_ipc:
* sys_semop:
* sys_semtimedop:
*
* struct sembuf loses its padding with EABI. Since arrays of them are
* used they have to be copyed to remove the padding. Compatibility wrappers
* provided below.
*
* sys_bind:
* sys_connect:
* sys_sendmsg:
* sys_sendto:
* sys_socketcall:
*
* struct sockaddr_un loses its padding with EABI. Since the size of the
* structure is used as a validation test in unix_mkname(), we need to
* change the length argument to 110 whenever it is 112. Compatibility
* wrappers provided below.
*/
#include <linux/syscalls.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/fcntl.h>
#include <linux/eventpoll.h>
#include <linux/sem.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/ipc.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
struct oldabi_stat64 {
unsigned long long st_dev;
unsigned int __pad1;
unsigned long __st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned long st_uid;
unsigned long st_gid;
unsigned long long st_rdev;
unsigned int __pad2;
long long st_size;
unsigned long st_blksize;
unsigned long long st_blocks;
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long long st_ino;
} __attribute__ ((packed,aligned(4)));
static long cp_oldabi_stat64(struct kstat *stat,
struct oldabi_stat64 __user *statbuf)
{
struct oldabi_stat64 tmp;
tmp.st_dev = huge_encode_dev(stat->dev);
tmp.__pad1 = 0;
tmp.__st_ino = stat->ino;
tmp.st_mode = stat->mode;
tmp.st_nlink = stat->nlink;
tmp.st_uid = stat->uid;
tmp.st_gid = stat->gid;
tmp.st_rdev = huge_encode_dev(stat->rdev);
tmp.st_size = stat->size;
tmp.st_blocks = stat->blocks;
tmp.__pad2 = 0;
tmp.st_blksize = stat->blksize;
tmp.st_atime = stat->atime.tv_sec;
tmp.st_atime_nsec = stat->atime.tv_nsec;
tmp.st_mtime = stat->mtime.tv_sec;
tmp.st_mtime_nsec = stat->mtime.tv_nsec;
tmp.st_ctime = stat->ctime.tv_sec;
tmp.st_ctime_nsec = stat->ctime.tv_nsec;
tmp.st_ino = stat->ino;
return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
}
asmlinkage long sys_oabi_stat64(const char __user * filename,
struct oldabi_stat64 __user * statbuf)
{
struct kstat stat;
int error = vfs_stat(filename, &stat);
if (!error)
error = cp_oldabi_stat64(&stat, statbuf);
return error;
}
asmlinkage long sys_oabi_lstat64(const char __user * filename,
struct oldabi_stat64 __user * statbuf)
{
struct kstat stat;
int error = vfs_lstat(filename, &stat);
if (!error)
error = cp_oldabi_stat64(&stat, statbuf);
return error;
}
asmlinkage long sys_oabi_fstat64(unsigned long fd,
struct oldabi_stat64 __user * statbuf)
{
struct kstat stat;
int error = vfs_fstat(fd, &stat);
if (!error)
error = cp_oldabi_stat64(&stat, statbuf);
return error;
}
asmlinkage long sys_oabi_fstatat64(int dfd,
const char __user *filename,
struct oldabi_stat64 __user *statbuf,
int flag)
{
struct kstat stat;
int error;
error = vfs_fstatat(dfd, filename, &stat, flag);
if (error)
return error;
return cp_oldabi_stat64(&stat, statbuf);
}
struct oabi_flock64 {
short l_type;
short l_whence;
loff_t l_start;
loff_t l_len;
pid_t l_pid;
} __attribute__ ((packed,aligned(4)));
asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
unsigned long arg)
{
struct oabi_flock64 user;
struct flock64 kernel;
mm_segment_t fs = USER_DS; /* initialized to kill a warning */
unsigned long local_arg = arg;
int ret;
switch (cmd) {
case F_GETLK64:
case F_SETLK64:
case F_SETLKW64:
if (copy_from_user(&user, (struct oabi_flock64 __user *)arg,
sizeof(user)))
return -EFAULT;
kernel.l_type = user.l_type;
kernel.l_whence = user.l_whence;
kernel.l_start = user.l_start;
kernel.l_len = user.l_len;
kernel.l_pid = user.l_pid;
local_arg = (unsigned long)&kernel;
fs = get_fs();
set_fs(KERNEL_DS);
}
ret = sys_fcntl64(fd, cmd, local_arg);
switch (cmd) {
case F_GETLK64:
if (!ret) {
user.l_type = kernel.l_type;
user.l_whence = kernel.l_whence;
user.l_start = kernel.l_start;
user.l_len = kernel.l_len;
user.l_pid = kernel.l_pid;
if (copy_to_user((struct oabi_flock64 __user *)arg,
&user, sizeof(user)))
ret = -EFAULT;
}
case F_SETLK64:
case F_SETLKW64:
set_fs(fs);
}
return ret;
}
struct oabi_epoll_event {
__u32 events;
__u64 data;
} __attribute__ ((packed,aligned(4)));
asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
struct oabi_epoll_event __user *event)
{
struct oabi_epoll_event user;
struct epoll_event kernel;
mm_segment_t fs;
long ret;
if (op == EPOLL_CTL_DEL)
return sys_epoll_ctl(epfd, op, fd, NULL);
if (copy_from_user(&user, event, sizeof(user)))
return -EFAULT;
kernel.events = user.events;
kernel.data = user.data;
fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_epoll_ctl(epfd, op, fd, &kernel);
set_fs(fs);
return ret;
}
asmlinkage long sys_oabi_epoll_wait(int epfd,
struct oabi_epoll_event __user *events,
int maxevents, int timeout)
{
struct epoll_event *kbuf;
mm_segment_t fs;
long ret, err, i;
if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
return -EINVAL;
kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout);
set_fs(fs);
err = 0;
for (i = 0; i < ret; i++) {
__put_user_error(kbuf[i].events, &events->events, err);
__put_user_error(kbuf[i].data, &events->data, err);
events++;
}
kfree(kbuf);
return err ? -EFAULT : ret;
}
struct oabi_sembuf {
unsigned short sem_num;
short sem_op;
short sem_flg;
unsigned short __pad;
};
asmlinkage long sys_oabi_semtimedop(int semid,
struct oabi_sembuf __user *tsops,
unsigned nsops,
const struct timespec __user *timeout)
{
struct sembuf *sops;
struct timespec local_timeout;
long err;
int i;
if (nsops < 1)
return -EINVAL;
sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
if (!sops)
return -ENOMEM;
err = 0;
for (i = 0; i < nsops; i++) {
__get_user_error(sops[i].sem_num, &tsops->sem_num, err);
__get_user_error(sops[i].sem_op, &tsops->sem_op, err);
__get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
tsops++;
}
if (timeout) {
/* copy this as well before changing domain protection */
err |= copy_from_user(&local_timeout, timeout, sizeof(*timeout));
timeout = &local_timeout;
}
if (err) {
err = -EFAULT;
} else {
mm_segment_t fs = get_fs();
set_fs(KERNEL_DS);
err = sys_semtimedop(semid, sops, nsops, timeout);
set_fs(fs);
}
kfree(sops);
return err;
}
asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops,
unsigned nsops)
{
return sys_oabi_semtimedop(semid, tsops, nsops, NULL);
}
asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third,
void __user *ptr, long fifth)
{
switch (call & 0xffff) {
case SEMOP:
return sys_oabi_semtimedop(first,
(struct oabi_sembuf __user *)ptr,
second, NULL);
case SEMTIMEDOP:
return sys_oabi_semtimedop(first,
(struct oabi_sembuf __user *)ptr,
second,
(const struct timespec __user *)fifth);
default:
return sys_ipc(call, first, second, third, ptr, fifth);
}
}
asmlinkage long sys_oabi_bind(int fd, struct sockaddr __user *addr, int addrlen)
{
sa_family_t sa_family;
if (addrlen == 112 &&
get_user(sa_family, &addr->sa_family) == 0 &&
sa_family == AF_UNIX)
addrlen = 110;
return sys_bind(fd, addr, addrlen);
}
asmlinkage long sys_oabi_connect(int fd, struct sockaddr __user *addr, int addrlen)
{
sa_family_t sa_family;
if (addrlen == 112 &&
get_user(sa_family, &addr->sa_family) == 0 &&
sa_family == AF_UNIX)
addrlen = 110;
return sys_connect(fd, addr, addrlen);
}
asmlinkage long sys_oabi_sendto(int fd, void __user *buff,
size_t len, unsigned flags,
struct sockaddr __user *addr,
int addrlen)
{
sa_family_t sa_family;
if (addrlen == 112 &&
get_user(sa_family, &addr->sa_family) == 0 &&
sa_family == AF_UNIX)
addrlen = 110;
return sys_sendto(fd, buff, len, flags, addr, addrlen);
}
asmlinkage long sys_oabi_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
{
struct sockaddr __user *addr;
int msg_namelen;
sa_family_t sa_family;
if (msg &&
get_user(msg_namelen, &msg->msg_namelen) == 0 &&
msg_namelen == 112 &&
get_user(addr, &msg->msg_name) == 0 &&
get_user(sa_family, &addr->sa_family) == 0 &&
sa_family == AF_UNIX)
{
/*
* HACK ALERT: there is a limit to how much backward bending
* we should do for what is actually a transitional
* compatibility layer. This already has known flaws with
* a few ioctls that we don't intend to fix. Therefore
* consider this blatent hack as another one... and take care
* to run for cover. In most cases it will "just work fine".
* If it doesn't, well, tough.
*/
put_user(110, &msg->msg_namelen);
}
return sys_sendmsg(fd, msg, flags);
}
asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args)
{
unsigned long r = -EFAULT, a[6];
switch (call) {
case SYS_BIND:
if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
r = sys_oabi_bind(a[0], (struct sockaddr __user *)a[1], a[2]);
break;
case SYS_CONNECT:
if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
r = sys_oabi_connect(a[0], (struct sockaddr __user *)a[1], a[2]);
break;
case SYS_SENDTO:
if (copy_from_user(a, args, 6 * sizeof(long)) == 0)
r = sys_oabi_sendto(a[0], (void __user *)a[1], a[2], a[3],
(struct sockaddr __user *)a[4], a[5]);
break;
case SYS_SENDMSG:
if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
r = sys_oabi_sendmsg(a[0], (struct msghdr __user *)a[1], a[2]);
break;
default:
r = sys_socketcall(call, args);
}
return r;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3457_0 |
crossvul-cpp_data_good_5728_0 | /*
* fs/cifs/connect.c
*
* Copyright (C) International Business Machines Corp., 2002,2011
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/net.h>
#include <linux/string.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/ctype.h>
#include <linux/utsname.h>
#include <linux/mempool.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/kthread.h>
#include <linux/pagevec.h>
#include <linux/freezer.h>
#include <linux/namei.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <linux/inet.h>
#include <linux/module.h>
#include <keys/user-type.h>
#include <net/ipv6.h>
#include <linux/parser.h>
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
#include "ntlmssp.h"
#include "nterr.h"
#include "rfc1002pdu.h"
#include "fscache.h"
#define CIFS_PORT 445
#define RFC1001_PORT 139
extern mempool_t *cifs_req_poolp;
/* FIXME: should these be tunable? */
#define TLINK_ERROR_EXPIRE (1 * HZ)
#define TLINK_IDLE_EXPIRE (600 * HZ)
enum {
/* Mount options that take no arguments */
Opt_user_xattr, Opt_nouser_xattr,
Opt_forceuid, Opt_noforceuid,
Opt_forcegid, Opt_noforcegid,
Opt_noblocksend, Opt_noautotune,
Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
Opt_mapchars, Opt_nomapchars, Opt_sfu,
Opt_nosfu, Opt_nodfs, Opt_posixpaths,
Opt_noposixpaths, Opt_nounix,
Opt_nocase,
Opt_brl, Opt_nobrl,
Opt_forcemandatorylock, Opt_setuids,
Opt_nosetuids, Opt_dynperm, Opt_nodynperm,
Opt_nohard, Opt_nosoft,
Opt_nointr, Opt_intr,
Opt_nostrictsync, Opt_strictsync,
Opt_serverino, Opt_noserverino,
Opt_rwpidforward, Opt_cifsacl, Opt_nocifsacl,
Opt_acl, Opt_noacl, Opt_locallease,
Opt_sign, Opt_seal, Opt_noac,
Opt_fsc, Opt_mfsymlinks,
Opt_multiuser, Opt_sloppy,
/* Mount options which take numeric value */
Opt_backupuid, Opt_backupgid, Opt_uid,
Opt_cruid, Opt_gid, Opt_file_mode,
Opt_dirmode, Opt_port,
Opt_rsize, Opt_wsize, Opt_actimeo,
/* Mount options which take string value */
Opt_user, Opt_pass, Opt_ip,
Opt_domain, Opt_srcaddr, Opt_iocharset,
Opt_netbiosname, Opt_servern,
Opt_ver, Opt_vers, Opt_sec, Opt_cache,
/* Mount options to be ignored */
Opt_ignore,
/* Options which could be blank */
Opt_blank_pass,
Opt_blank_user,
Opt_blank_ip,
Opt_err
};
static const match_table_t cifs_mount_option_tokens = {
{ Opt_user_xattr, "user_xattr" },
{ Opt_nouser_xattr, "nouser_xattr" },
{ Opt_forceuid, "forceuid" },
{ Opt_noforceuid, "noforceuid" },
{ Opt_forcegid, "forcegid" },
{ Opt_noforcegid, "noforcegid" },
{ Opt_noblocksend, "noblocksend" },
{ Opt_noautotune, "noautotune" },
{ Opt_hard, "hard" },
{ Opt_soft, "soft" },
{ Opt_perm, "perm" },
{ Opt_noperm, "noperm" },
{ Opt_mapchars, "mapchars" },
{ Opt_nomapchars, "nomapchars" },
{ Opt_sfu, "sfu" },
{ Opt_nosfu, "nosfu" },
{ Opt_nodfs, "nodfs" },
{ Opt_posixpaths, "posixpaths" },
{ Opt_noposixpaths, "noposixpaths" },
{ Opt_nounix, "nounix" },
{ Opt_nounix, "nolinux" },
{ Opt_nocase, "nocase" },
{ Opt_nocase, "ignorecase" },
{ Opt_brl, "brl" },
{ Opt_nobrl, "nobrl" },
{ Opt_nobrl, "nolock" },
{ Opt_forcemandatorylock, "forcemandatorylock" },
{ Opt_forcemandatorylock, "forcemand" },
{ Opt_setuids, "setuids" },
{ Opt_nosetuids, "nosetuids" },
{ Opt_dynperm, "dynperm" },
{ Opt_nodynperm, "nodynperm" },
{ Opt_nohard, "nohard" },
{ Opt_nosoft, "nosoft" },
{ Opt_nointr, "nointr" },
{ Opt_intr, "intr" },
{ Opt_nostrictsync, "nostrictsync" },
{ Opt_strictsync, "strictsync" },
{ Opt_serverino, "serverino" },
{ Opt_noserverino, "noserverino" },
{ Opt_rwpidforward, "rwpidforward" },
{ Opt_cifsacl, "cifsacl" },
{ Opt_nocifsacl, "nocifsacl" },
{ Opt_acl, "acl" },
{ Opt_noacl, "noacl" },
{ Opt_locallease, "locallease" },
{ Opt_sign, "sign" },
{ Opt_seal, "seal" },
{ Opt_noac, "noac" },
{ Opt_fsc, "fsc" },
{ Opt_mfsymlinks, "mfsymlinks" },
{ Opt_multiuser, "multiuser" },
{ Opt_sloppy, "sloppy" },
{ Opt_backupuid, "backupuid=%s" },
{ Opt_backupgid, "backupgid=%s" },
{ Opt_uid, "uid=%s" },
{ Opt_cruid, "cruid=%s" },
{ Opt_gid, "gid=%s" },
{ Opt_file_mode, "file_mode=%s" },
{ Opt_dirmode, "dirmode=%s" },
{ Opt_dirmode, "dir_mode=%s" },
{ Opt_port, "port=%s" },
{ Opt_rsize, "rsize=%s" },
{ Opt_wsize, "wsize=%s" },
{ Opt_actimeo, "actimeo=%s" },
{ Opt_blank_user, "user=" },
{ Opt_blank_user, "username=" },
{ Opt_user, "user=%s" },
{ Opt_user, "username=%s" },
{ Opt_blank_pass, "pass=" },
{ Opt_blank_pass, "password=" },
{ Opt_pass, "pass=%s" },
{ Opt_pass, "password=%s" },
{ Opt_blank_ip, "ip=" },
{ Opt_blank_ip, "addr=" },
{ Opt_ip, "ip=%s" },
{ Opt_ip, "addr=%s" },
{ Opt_ignore, "unc=%s" },
{ Opt_ignore, "target=%s" },
{ Opt_ignore, "path=%s" },
{ Opt_domain, "dom=%s" },
{ Opt_domain, "domain=%s" },
{ Opt_domain, "workgroup=%s" },
{ Opt_srcaddr, "srcaddr=%s" },
{ Opt_ignore, "prefixpath=%s" },
{ Opt_iocharset, "iocharset=%s" },
{ Opt_netbiosname, "netbiosname=%s" },
{ Opt_servern, "servern=%s" },
{ Opt_ver, "ver=%s" },
{ Opt_vers, "vers=%s" },
{ Opt_sec, "sec=%s" },
{ Opt_cache, "cache=%s" },
{ Opt_ignore, "cred" },
{ Opt_ignore, "credentials" },
{ Opt_ignore, "cred=%s" },
{ Opt_ignore, "credentials=%s" },
{ Opt_ignore, "guest" },
{ Opt_ignore, "rw" },
{ Opt_ignore, "ro" },
{ Opt_ignore, "suid" },
{ Opt_ignore, "nosuid" },
{ Opt_ignore, "exec" },
{ Opt_ignore, "noexec" },
{ Opt_ignore, "nodev" },
{ Opt_ignore, "noauto" },
{ Opt_ignore, "dev" },
{ Opt_ignore, "mand" },
{ Opt_ignore, "nomand" },
{ Opt_ignore, "_netdev" },
{ Opt_err, NULL }
};
enum {
Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p,
Opt_sec_ntlmsspi, Opt_sec_ntlmssp,
Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2,
Opt_sec_ntlmv2i, Opt_sec_lanman,
Opt_sec_none,
Opt_sec_err
};
static const match_table_t cifs_secflavor_tokens = {
{ Opt_sec_krb5, "krb5" },
{ Opt_sec_krb5i, "krb5i" },
{ Opt_sec_krb5p, "krb5p" },
{ Opt_sec_ntlmsspi, "ntlmsspi" },
{ Opt_sec_ntlmssp, "ntlmssp" },
{ Opt_ntlm, "ntlm" },
{ Opt_sec_ntlmi, "ntlmi" },
{ Opt_sec_ntlmv2, "nontlm" },
{ Opt_sec_ntlmv2, "ntlmv2" },
{ Opt_sec_ntlmv2i, "ntlmv2i" },
{ Opt_sec_lanman, "lanman" },
{ Opt_sec_none, "none" },
{ Opt_sec_err, NULL }
};
/* cache flavors */
enum {
Opt_cache_loose,
Opt_cache_strict,
Opt_cache_none,
Opt_cache_err
};
static const match_table_t cifs_cacheflavor_tokens = {
{ Opt_cache_loose, "loose" },
{ Opt_cache_strict, "strict" },
{ Opt_cache_none, "none" },
{ Opt_cache_err, NULL }
};
static const match_table_t cifs_smb_version_tokens = {
{ Smb_1, SMB1_VERSION_STRING },
{ Smb_20, SMB20_VERSION_STRING},
{ Smb_21, SMB21_VERSION_STRING },
{ Smb_30, SMB30_VERSION_STRING },
};
static int ip_connect(struct TCP_Server_Info *server);
static int generic_ip_connect(struct TCP_Server_Info *server);
static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
static void cifs_prune_tlinks(struct work_struct *work);
static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
const char *devname);
/*
* cifs tcp session reconnection
*
* mark tcp session as reconnecting so temporarily locked
* mark all smb sessions as reconnecting for tcp session
* reconnect tcp session
* wake up waiters on reconnection? - (not needed currently)
*/
int
cifs_reconnect(struct TCP_Server_Info *server)
{
int rc = 0;
struct list_head *tmp, *tmp2;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
struct mid_q_entry *mid_entry;
struct list_head retry_list;
spin_lock(&GlobalMid_Lock);
if (server->tcpStatus == CifsExiting) {
/* the demux thread will exit normally
next time through the loop */
spin_unlock(&GlobalMid_Lock);
return rc;
} else
server->tcpStatus = CifsNeedReconnect;
spin_unlock(&GlobalMid_Lock);
server->maxBuf = 0;
#ifdef CONFIG_CIFS_SMB2
server->max_read = 0;
#endif
cifs_dbg(FYI, "Reconnecting tcp session\n");
/* before reconnecting the tcp session, mark the smb session (uid)
and the tid bad so they are not used until reconnected */
cifs_dbg(FYI, "%s: marking sessions and tcons for reconnect\n",
__func__);
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
ses->need_reconnect = true;
ses->ipc_tid = 0;
list_for_each(tmp2, &ses->tcon_list) {
tcon = list_entry(tmp2, struct cifs_tcon, tcon_list);
tcon->need_reconnect = true;
}
}
spin_unlock(&cifs_tcp_ses_lock);
/* do not want to be sending data on a socket we are freeing */
cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
mutex_lock(&server->srv_mutex);
if (server->ssocket) {
cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n",
server->ssocket->state, server->ssocket->flags);
kernel_sock_shutdown(server->ssocket, SHUT_WR);
cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n",
server->ssocket->state, server->ssocket->flags);
sock_release(server->ssocket);
server->ssocket = NULL;
}
server->sequence_number = 0;
server->session_estab = false;
kfree(server->session_key.response);
server->session_key.response = NULL;
server->session_key.len = 0;
server->lstrp = jiffies;
mutex_unlock(&server->srv_mutex);
/* mark submitted MIDs for retry and issue callback */
INIT_LIST_HEAD(&retry_list);
cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
spin_lock(&GlobalMid_Lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
if (mid_entry->mid_state == MID_REQUEST_SUBMITTED)
mid_entry->mid_state = MID_RETRY_NEEDED;
list_move(&mid_entry->qhead, &retry_list);
}
spin_unlock(&GlobalMid_Lock);
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
list_for_each_safe(tmp, tmp2, &retry_list) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
}
do {
try_to_freeze();
/* we should try only the port we connected to before */
rc = generic_ip_connect(server);
if (rc) {
cifs_dbg(FYI, "reconnect error %d\n", rc);
msleep(3000);
} else {
atomic_inc(&tcpSesReconnectCount);
spin_lock(&GlobalMid_Lock);
if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsNeedNegotiate;
spin_unlock(&GlobalMid_Lock);
}
} while (server->tcpStatus == CifsNeedReconnect);
return rc;
}
static void
cifs_echo_request(struct work_struct *work)
{
int rc;
struct TCP_Server_Info *server = container_of(work,
struct TCP_Server_Info, echo.work);
/*
* We cannot send an echo if it is disabled or until the
* NEGOTIATE_PROTOCOL request is done, which is indicated by
* server->ops->need_neg() == true. Also, no need to ping if
* we got a response recently.
*/
if (!server->ops->need_neg || server->ops->need_neg(server) ||
(server->ops->can_echo && !server->ops->can_echo(server)) ||
time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
goto requeue_echo;
rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
if (rc)
cifs_dbg(FYI, "Unable to send echo request to server: %s\n",
server->hostname);
requeue_echo:
queue_delayed_work(cifsiod_wq, &server->echo, SMB_ECHO_INTERVAL);
}
static bool
allocate_buffers(struct TCP_Server_Info *server)
{
if (!server->bigbuf) {
server->bigbuf = (char *)cifs_buf_get();
if (!server->bigbuf) {
cifs_dbg(VFS, "No memory for large SMB response\n");
msleep(3000);
/* retry will check if exiting */
return false;
}
} else if (server->large_buf) {
/* we are reusing a dirty large buf, clear its start */
memset(server->bigbuf, 0, HEADER_SIZE(server));
}
if (!server->smallbuf) {
server->smallbuf = (char *)cifs_small_buf_get();
if (!server->smallbuf) {
cifs_dbg(VFS, "No memory for SMB response\n");
msleep(1000);
/* retry will check if exiting */
return false;
}
/* beginning of smb buffer is cleared in our buf_get */
} else {
/* if existing small buf clear beginning */
memset(server->smallbuf, 0, HEADER_SIZE(server));
}
return true;
}
static bool
server_unresponsive(struct TCP_Server_Info *server)
{
/*
* We need to wait 2 echo intervals to make sure we handle such
* situations right:
* 1s client sends a normal SMB request
* 2s client gets a response
* 30s echo workqueue job pops, and decides we got a response recently
* and don't need to send another
* ...
* 65s kernel_recvmsg times out, and we see that we haven't gotten
* a response in >60s.
*/
if (server->tcpStatus == CifsGood &&
time_after(jiffies, server->lstrp + 2 * SMB_ECHO_INTERVAL)) {
cifs_dbg(VFS, "Server %s has not responded in %d seconds. Reconnecting...\n",
server->hostname, (2 * SMB_ECHO_INTERVAL) / HZ);
cifs_reconnect(server);
wake_up(&server->response_q);
return true;
}
return false;
}
/*
* kvec_array_init - clone a kvec array, and advance into it
* @new: pointer to memory for cloned array
* @iov: pointer to original array
* @nr_segs: number of members in original array
* @bytes: number of bytes to advance into the cloned array
*
* This function will copy the array provided in iov to a section of memory
* and advance the specified number of bytes into the new array. It returns
* the number of segments in the new array. "new" must be at least as big as
* the original iov array.
*/
static unsigned int
kvec_array_init(struct kvec *new, struct kvec *iov, unsigned int nr_segs,
size_t bytes)
{
size_t base = 0;
while (bytes || !iov->iov_len) {
int copy = min(bytes, iov->iov_len);
bytes -= copy;
base += copy;
if (iov->iov_len == base) {
iov++;
nr_segs--;
base = 0;
}
}
memcpy(new, iov, sizeof(*iov) * nr_segs);
new->iov_base += base;
new->iov_len -= base;
return nr_segs;
}
static struct kvec *
get_server_iovec(struct TCP_Server_Info *server, unsigned int nr_segs)
{
struct kvec *new_iov;
if (server->iov && nr_segs <= server->nr_iov)
return server->iov;
/* not big enough -- allocate a new one and release the old */
new_iov = kmalloc(sizeof(*new_iov) * nr_segs, GFP_NOFS);
if (new_iov) {
kfree(server->iov);
server->iov = new_iov;
server->nr_iov = nr_segs;
}
return new_iov;
}
int
cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
unsigned int nr_segs, unsigned int to_read)
{
int length = 0;
int total_read;
unsigned int segs;
struct msghdr smb_msg;
struct kvec *iov;
iov = get_server_iovec(server, nr_segs);
if (!iov)
return -ENOMEM;
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
for (total_read = 0; to_read; total_read += length, to_read -= length) {
try_to_freeze();
if (server_unresponsive(server)) {
total_read = -EAGAIN;
break;
}
segs = kvec_array_init(iov, iov_orig, nr_segs, total_read);
length = kernel_recvmsg(server->ssocket, &smb_msg,
iov, segs, to_read, 0);
if (server->tcpStatus == CifsExiting) {
total_read = -ESHUTDOWN;
break;
} else if (server->tcpStatus == CifsNeedReconnect) {
cifs_reconnect(server);
total_read = -EAGAIN;
break;
} else if (length == -ERESTARTSYS ||
length == -EAGAIN ||
length == -EINTR) {
/*
* Minimum sleep to prevent looping, allowing socket
* to clear and app threads to set tcpStatus
* CifsNeedReconnect if server hung.
*/
usleep_range(1000, 2000);
length = 0;
continue;
} else if (length <= 0) {
cifs_dbg(FYI, "Received no data or error: expecting %d\n"
"got %d", to_read, length);
cifs_reconnect(server);
total_read = -EAGAIN;
break;
}
}
return total_read;
}
int
cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
unsigned int to_read)
{
struct kvec iov;
iov.iov_base = buf;
iov.iov_len = to_read;
return cifs_readv_from_socket(server, &iov, 1, to_read);
}
static bool
is_smb_response(struct TCP_Server_Info *server, unsigned char type)
{
/*
* The first byte big endian of the length field,
* is actually not part of the length but the type
* with the most common, zero, as regular data.
*/
switch (type) {
case RFC1002_SESSION_MESSAGE:
/* Regular SMB response */
return true;
case RFC1002_SESSION_KEEP_ALIVE:
cifs_dbg(FYI, "RFC 1002 session keep alive\n");
break;
case RFC1002_POSITIVE_SESSION_RESPONSE:
cifs_dbg(FYI, "RFC 1002 positive session response\n");
break;
case RFC1002_NEGATIVE_SESSION_RESPONSE:
/*
* We get this from Windows 98 instead of an error on
* SMB negprot response.
*/
cifs_dbg(FYI, "RFC 1002 negative session response\n");
/* give server a second to clean up */
msleep(1000);
/*
* Always try 445 first on reconnect since we get NACK
* on some if we ever connected to port 139 (the NACK
* is since we do not begin with RFC1001 session
* initialize frame).
*/
cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
cifs_reconnect(server);
wake_up(&server->response_q);
break;
default:
cifs_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type);
cifs_reconnect(server);
}
return false;
}
void
dequeue_mid(struct mid_q_entry *mid, bool malformed)
{
#ifdef CONFIG_CIFS_STATS2
mid->when_received = jiffies;
#endif
spin_lock(&GlobalMid_Lock);
if (!malformed)
mid->mid_state = MID_RESPONSE_RECEIVED;
else
mid->mid_state = MID_RESPONSE_MALFORMED;
list_del_init(&mid->qhead);
spin_unlock(&GlobalMid_Lock);
}
static void
handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
char *buf, int malformed)
{
if (server->ops->check_trans2 &&
server->ops->check_trans2(mid, server, buf, malformed))
return;
mid->resp_buf = buf;
mid->large_buf = server->large_buf;
/* Was previous buf put in mpx struct for multi-rsp? */
if (!mid->multiRsp) {
/* smb buffer will be freed by user thread */
if (server->large_buf)
server->bigbuf = NULL;
else
server->smallbuf = NULL;
}
dequeue_mid(mid, malformed);
}
static void clean_demultiplex_info(struct TCP_Server_Info *server)
{
int length;
/* take it off the list, if it's not already */
spin_lock(&cifs_tcp_ses_lock);
list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
wake_up_all(&server->response_q);
/* check if we have blocked requests that need to free */
spin_lock(&server->req_lock);
if (server->credits <= 0)
server->credits = 1;
spin_unlock(&server->req_lock);
/*
* Although there should not be any requests blocked on this queue it
* can not hurt to be paranoid and try to wake up requests that may
* haven been blocked when more than 50 at time were on the wire to the
* same server - they now will see the session is in exit state and get
* out of SendReceive.
*/
wake_up_all(&server->request_q);
/* give those requests time to exit */
msleep(125);
if (server->ssocket) {
sock_release(server->ssocket);
server->ssocket = NULL;
}
if (!list_empty(&server->pending_mid_q)) {
struct list_head dispose_list;
struct mid_q_entry *mid_entry;
struct list_head *tmp, *tmp2;
INIT_LIST_HEAD(&dispose_list);
spin_lock(&GlobalMid_Lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
cifs_dbg(FYI, "Clearing mid 0x%llx\n", mid_entry->mid);
mid_entry->mid_state = MID_SHUTDOWN;
list_move(&mid_entry->qhead, &dispose_list);
}
spin_unlock(&GlobalMid_Lock);
/* now walk dispose list and issue callbacks */
list_for_each_safe(tmp, tmp2, &dispose_list) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
cifs_dbg(FYI, "Callback mid 0x%llx\n", mid_entry->mid);
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
}
/* 1/8th of sec is more than enough time for them to exit */
msleep(125);
}
if (!list_empty(&server->pending_mid_q)) {
/*
* mpx threads have not exited yet give them at least the smb
* send timeout time for long ops.
*
* Due to delays on oplock break requests, we need to wait at
* least 45 seconds before giving up on a request getting a
* response and going ahead and killing cifsd.
*/
cifs_dbg(FYI, "Wait for exit from demultiplex thread\n");
msleep(46000);
/*
* If threads still have not exited they are probably never
* coming home not much else we can do but free the memory.
*/
}
kfree(server->hostname);
kfree(server->iov);
kfree(server);
length = atomic_dec_return(&tcpSesAllocCount);
if (length > 0)
mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
GFP_KERNEL);
}
static int
standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
int length;
char *buf = server->smallbuf;
unsigned int pdu_length = get_rfc1002_length(buf);
/* make sure this will fit in a large buffer */
if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) - 4) {
cifs_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
cifs_reconnect(server);
wake_up(&server->response_q);
return -EAGAIN;
}
/* switch to large buffer if too big for a small one */
if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
server->large_buf = true;
memcpy(server->bigbuf, buf, server->total_read);
buf = server->bigbuf;
}
/* now read the rest */
length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
pdu_length - HEADER_SIZE(server) + 1 + 4);
if (length < 0)
return length;
server->total_read += length;
dump_smb(buf, server->total_read);
/*
* We know that we received enough to get to the MID as we
* checked the pdu_length earlier. Now check to see
* if the rest of the header is OK. We borrow the length
* var for the rest of the loop to avoid a new stack var.
*
* 48 bytes is enough to display the header and a little bit
* into the payload for debugging purposes.
*/
length = server->ops->check_message(buf, server->total_read);
if (length != 0)
cifs_dump_mem("Bad SMB: ", buf,
min_t(unsigned int, server->total_read, 48));
if (server->ops->is_status_pending &&
server->ops->is_status_pending(buf, server, length))
return -1;
if (!mid)
return length;
handle_mid(mid, server, buf, length);
return 0;
}
static int
cifs_demultiplex_thread(void *p)
{
int length;
struct TCP_Server_Info *server = p;
unsigned int pdu_length;
char *buf = NULL;
struct task_struct *task_to_wake = NULL;
struct mid_q_entry *mid_entry;
current->flags |= PF_MEMALLOC;
cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
length = atomic_inc_return(&tcpSesAllocCount);
if (length > 1)
mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
GFP_KERNEL);
set_freezable();
while (server->tcpStatus != CifsExiting) {
if (try_to_freeze())
continue;
if (!allocate_buffers(server))
continue;
server->large_buf = false;
buf = server->smallbuf;
pdu_length = 4; /* enough to get RFC1001 header */
length = cifs_read_from_socket(server, buf, pdu_length);
if (length < 0)
continue;
server->total_read = length;
/*
* The right amount was read from socket - 4 bytes,
* so we can now interpret the length field.
*/
pdu_length = get_rfc1002_length(buf);
cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length);
if (!is_smb_response(server, buf[0]))
continue;
/* make sure we have enough to get to the MID */
if (pdu_length < HEADER_SIZE(server) - 1 - 4) {
cifs_dbg(VFS, "SMB response too short (%u bytes)\n",
pdu_length);
cifs_reconnect(server);
wake_up(&server->response_q);
continue;
}
/* read down to the MID */
length = cifs_read_from_socket(server, buf + 4,
HEADER_SIZE(server) - 1 - 4);
if (length < 0)
continue;
server->total_read += length;
mid_entry = server->ops->find_mid(server, buf);
if (!mid_entry || !mid_entry->receive)
length = standard_receive3(server, mid_entry);
else
length = mid_entry->receive(server, mid_entry);
if (length < 0)
continue;
if (server->large_buf)
buf = server->bigbuf;
server->lstrp = jiffies;
if (mid_entry != NULL) {
if (!mid_entry->multiRsp || mid_entry->multiEnd)
mid_entry->callback(mid_entry);
} else if (!server->ops->is_oplock_break ||
!server->ops->is_oplock_break(buf, server)) {
cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
atomic_read(&midCount));
cifs_dump_mem("Received Data is: ", buf,
HEADER_SIZE(server));
#ifdef CONFIG_CIFS_DEBUG2
if (server->ops->dump_detail)
server->ops->dump_detail(buf);
cifs_dump_mids(server);
#endif /* CIFS_DEBUG2 */
}
} /* end while !EXITING */
/* buffer usually freed in free_mid - need to free it here on exit */
cifs_buf_release(server->bigbuf);
if (server->smallbuf) /* no sense logging a debug message if NULL */
cifs_small_buf_release(server->smallbuf);
task_to_wake = xchg(&server->tsk, NULL);
clean_demultiplex_info(server);
/* if server->tsk was NULL then wait for a signal before exiting */
if (!task_to_wake) {
set_current_state(TASK_INTERRUPTIBLE);
while (!signal_pending(current)) {
schedule();
set_current_state(TASK_INTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
}
module_put_and_exit(0);
}
/* extract the host portion of the UNC string */
static char *
extract_hostname(const char *unc)
{
const char *src;
char *dst, *delim;
unsigned int len;
/* skip double chars at beginning of string */
/* BB: check validity of these bytes? */
src = unc + 2;
/* delimiter between hostname and sharename is always '\\' now */
delim = strchr(src, '\\');
if (!delim)
return ERR_PTR(-EINVAL);
len = delim - src;
dst = kmalloc((len + 1), GFP_KERNEL);
if (dst == NULL)
return ERR_PTR(-ENOMEM);
memcpy(dst, src, len);
dst[len] = '\0';
return dst;
}
static int get_option_ul(substring_t args[], unsigned long *option)
{
int rc;
char *string;
string = match_strdup(args);
if (string == NULL)
return -ENOMEM;
rc = kstrtoul(string, 0, option);
kfree(string);
return rc;
}
static int get_option_uid(substring_t args[], kuid_t *result)
{
unsigned long value;
kuid_t uid;
int rc;
rc = get_option_ul(args, &value);
if (rc)
return rc;
uid = make_kuid(current_user_ns(), value);
if (!uid_valid(uid))
return -EINVAL;
*result = uid;
return 0;
}
static int get_option_gid(substring_t args[], kgid_t *result)
{
unsigned long value;
kgid_t gid;
int rc;
rc = get_option_ul(args, &value);
if (rc)
return rc;
gid = make_kgid(current_user_ns(), value);
if (!gid_valid(gid))
return -EINVAL;
*result = gid;
return 0;
}
static int cifs_parse_security_flavors(char *value,
struct smb_vol *vol)
{
substring_t args[MAX_OPT_ARGS];
switch (match_token(value, cifs_secflavor_tokens, args)) {
case Opt_sec_krb5:
vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_SIGN;
break;
case Opt_sec_krb5i:
vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MUST_SIGN;
break;
case Opt_sec_krb5p:
/* vol->secFlg |= CIFSSEC_MUST_SEAL | CIFSSEC_MAY_KRB5; */
cifs_dbg(VFS, "Krb5 cifs privacy not supported\n");
break;
case Opt_sec_ntlmssp:
vol->secFlg |= CIFSSEC_MAY_NTLMSSP;
break;
case Opt_sec_ntlmsspi:
vol->secFlg |= CIFSSEC_MAY_NTLMSSP | CIFSSEC_MUST_SIGN;
break;
case Opt_ntlm:
/* ntlm is default so can be turned off too */
vol->secFlg |= CIFSSEC_MAY_NTLM;
break;
case Opt_sec_ntlmi:
vol->secFlg |= CIFSSEC_MAY_NTLM | CIFSSEC_MUST_SIGN;
break;
case Opt_sec_ntlmv2:
vol->secFlg |= CIFSSEC_MAY_NTLMV2;
break;
case Opt_sec_ntlmv2i:
vol->secFlg |= CIFSSEC_MAY_NTLMV2 | CIFSSEC_MUST_SIGN;
break;
#ifdef CONFIG_CIFS_WEAK_PW_HASH
case Opt_sec_lanman:
vol->secFlg |= CIFSSEC_MAY_LANMAN;
break;
#endif
case Opt_sec_none:
vol->nullauth = 1;
vol->secFlg |= CIFSSEC_MAY_NTLM;
break;
default:
cifs_dbg(VFS, "bad security option: %s\n", value);
return 1;
}
return 0;
}
static int
cifs_parse_cache_flavor(char *value, struct smb_vol *vol)
{
substring_t args[MAX_OPT_ARGS];
switch (match_token(value, cifs_cacheflavor_tokens, args)) {
case Opt_cache_loose:
vol->direct_io = false;
vol->strict_io = false;
break;
case Opt_cache_strict:
vol->direct_io = false;
vol->strict_io = true;
break;
case Opt_cache_none:
vol->direct_io = true;
vol->strict_io = false;
break;
default:
cifs_dbg(VFS, "bad cache= option: %s\n", value);
return 1;
}
return 0;
}
static int
cifs_parse_smb_version(char *value, struct smb_vol *vol)
{
substring_t args[MAX_OPT_ARGS];
switch (match_token(value, cifs_smb_version_tokens, args)) {
case Smb_1:
vol->ops = &smb1_operations;
vol->vals = &smb1_values;
break;
#ifdef CONFIG_CIFS_SMB2
case Smb_20:
vol->ops = &smb21_operations; /* currently identical with 2.1 */
vol->vals = &smb20_values;
break;
case Smb_21:
vol->ops = &smb21_operations;
vol->vals = &smb21_values;
break;
case Smb_30:
vol->ops = &smb30_operations;
vol->vals = &smb30_values;
break;
#endif
default:
cifs_dbg(VFS, "Unknown vers= option specified: %s\n", value);
return 1;
}
return 0;
}
/*
* Parse a devname into substrings and populate the vol->UNC and vol->prepath
* fields with the result. Returns 0 on success and an error otherwise.
*/
static int
cifs_parse_devname(const char *devname, struct smb_vol *vol)
{
char *pos;
const char *delims = "/\\";
size_t len;
/* make sure we have a valid UNC double delimiter prefix */
len = strspn(devname, delims);
if (len != 2)
return -EINVAL;
/* find delimiter between host and sharename */
pos = strpbrk(devname + 2, delims);
if (!pos)
return -EINVAL;
/* skip past delimiter */
++pos;
/* now go until next delimiter or end of string */
len = strcspn(pos, delims);
/* move "pos" up to delimiter or NULL */
pos += len;
vol->UNC = kstrndup(devname, pos - devname, GFP_KERNEL);
if (!vol->UNC)
return -ENOMEM;
convert_delimiter(vol->UNC, '\\');
/* If pos is NULL, or is a bogus trailing delimiter then no prepath */
if (!*pos++ || !*pos)
return 0;
vol->prepath = kstrdup(pos, GFP_KERNEL);
if (!vol->prepath)
return -ENOMEM;
return 0;
}
static int
cifs_parse_mount_options(const char *mountdata, const char *devname,
struct smb_vol *vol)
{
char *data, *end;
char *mountdata_copy = NULL, *options;
unsigned int temp_len, i, j;
char separator[2];
short int override_uid = -1;
short int override_gid = -1;
bool uid_specified = false;
bool gid_specified = false;
bool sloppy = false;
char *invalid = NULL;
char *nodename = utsname()->nodename;
char *string = NULL;
char *tmp_end, *value;
char delim;
bool got_ip = false;
unsigned short port = 0;
struct sockaddr *dstaddr = (struct sockaddr *)&vol->dstaddr;
separator[0] = ',';
separator[1] = 0;
delim = separator[0];
/* ensure we always start with zeroed-out smb_vol */
memset(vol, 0, sizeof(*vol));
/*
* does not have to be perfect mapping since field is
* informational, only used for servers that do not support
* port 445 and it can be overridden at mount time
*/
memset(vol->source_rfc1001_name, 0x20, RFC1001_NAME_LEN);
for (i = 0; i < strnlen(nodename, RFC1001_NAME_LEN); i++)
vol->source_rfc1001_name[i] = toupper(nodename[i]);
vol->source_rfc1001_name[RFC1001_NAME_LEN] = 0;
/* null target name indicates to use *SMBSERVR default called name
if we end up sending RFC1001 session initialize */
vol->target_rfc1001_name[0] = 0;
vol->cred_uid = current_uid();
vol->linux_uid = current_uid();
vol->linux_gid = current_gid();
/* default to only allowing write access to owner of the mount */
vol->dir_mode = vol->file_mode = S_IRUGO | S_IXUGO | S_IWUSR;
/* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */
/* default is always to request posix paths. */
vol->posix_paths = 1;
/* default to using server inode numbers where available */
vol->server_ino = 1;
/* default is to use strict cifs caching semantics */
vol->strict_io = true;
vol->actimeo = CIFS_DEF_ACTIMEO;
/* FIXME: add autonegotiation -- for now, SMB1 is default */
vol->ops = &smb1_operations;
vol->vals = &smb1_values;
if (!mountdata)
goto cifs_parse_mount_err;
mountdata_copy = kstrndup(mountdata, PAGE_SIZE, GFP_KERNEL);
if (!mountdata_copy)
goto cifs_parse_mount_err;
options = mountdata_copy;
end = options + strlen(options);
if (strncmp(options, "sep=", 4) == 0) {
if (options[4] != 0) {
separator[0] = options[4];
options += 5;
} else {
cifs_dbg(FYI, "Null separator not allowed\n");
}
}
vol->backupuid_specified = false; /* no backup intent for a user */
vol->backupgid_specified = false; /* no backup intent for a group */
switch (cifs_parse_devname(devname, vol)) {
case 0:
break;
case -ENOMEM:
cifs_dbg(VFS, "Unable to allocate memory for devname.\n");
goto cifs_parse_mount_err;
case -EINVAL:
cifs_dbg(VFS, "Malformed UNC in devname.\n");
goto cifs_parse_mount_err;
default:
cifs_dbg(VFS, "Unknown error parsing devname.\n");
goto cifs_parse_mount_err;
}
while ((data = strsep(&options, separator)) != NULL) {
substring_t args[MAX_OPT_ARGS];
unsigned long option;
int token;
if (!*data)
continue;
token = match_token(data, cifs_mount_option_tokens, args);
switch (token) {
/* Ingnore the following */
case Opt_ignore:
break;
/* Boolean values */
case Opt_user_xattr:
vol->no_xattr = 0;
break;
case Opt_nouser_xattr:
vol->no_xattr = 1;
break;
case Opt_forceuid:
override_uid = 1;
break;
case Opt_noforceuid:
override_uid = 0;
break;
case Opt_forcegid:
override_gid = 1;
break;
case Opt_noforcegid:
override_gid = 0;
break;
case Opt_noblocksend:
vol->noblocksnd = 1;
break;
case Opt_noautotune:
vol->noautotune = 1;
break;
case Opt_hard:
vol->retry = 1;
break;
case Opt_soft:
vol->retry = 0;
break;
case Opt_perm:
vol->noperm = 0;
break;
case Opt_noperm:
vol->noperm = 1;
break;
case Opt_mapchars:
vol->remap = 1;
break;
case Opt_nomapchars:
vol->remap = 0;
break;
case Opt_sfu:
vol->sfu_emul = 1;
break;
case Opt_nosfu:
vol->sfu_emul = 0;
break;
case Opt_nodfs:
vol->nodfs = 1;
break;
case Opt_posixpaths:
vol->posix_paths = 1;
break;
case Opt_noposixpaths:
vol->posix_paths = 0;
break;
case Opt_nounix:
vol->no_linux_ext = 1;
break;
case Opt_nocase:
vol->nocase = 1;
break;
case Opt_brl:
vol->nobrl = 0;
break;
case Opt_nobrl:
vol->nobrl = 1;
/*
* turn off mandatory locking in mode
* if remote locking is turned off since the
* local vfs will do advisory
*/
if (vol->file_mode ==
(S_IALLUGO & ~(S_ISUID | S_IXGRP)))
vol->file_mode = S_IALLUGO;
break;
case Opt_forcemandatorylock:
vol->mand_lock = 1;
break;
case Opt_setuids:
vol->setuids = 1;
break;
case Opt_nosetuids:
vol->setuids = 0;
break;
case Opt_dynperm:
vol->dynperm = true;
break;
case Opt_nodynperm:
vol->dynperm = false;
break;
case Opt_nohard:
vol->retry = 0;
break;
case Opt_nosoft:
vol->retry = 1;
break;
case Opt_nointr:
vol->intr = 0;
break;
case Opt_intr:
vol->intr = 1;
break;
case Opt_nostrictsync:
vol->nostrictsync = 1;
break;
case Opt_strictsync:
vol->nostrictsync = 0;
break;
case Opt_serverino:
vol->server_ino = 1;
break;
case Opt_noserverino:
vol->server_ino = 0;
break;
case Opt_rwpidforward:
vol->rwpidforward = 1;
break;
case Opt_cifsacl:
vol->cifs_acl = 1;
break;
case Opt_nocifsacl:
vol->cifs_acl = 0;
break;
case Opt_acl:
vol->no_psx_acl = 0;
break;
case Opt_noacl:
vol->no_psx_acl = 1;
break;
case Opt_locallease:
vol->local_lease = 1;
break;
case Opt_sign:
vol->secFlg |= CIFSSEC_MUST_SIGN;
break;
case Opt_seal:
/* we do not do the following in secFlags because seal
* is a per tree connection (mount) not a per socket
* or per-smb connection option in the protocol
* vol->secFlg |= CIFSSEC_MUST_SEAL;
*/
vol->seal = 1;
break;
case Opt_noac:
printk(KERN_WARNING "CIFS: Mount option noac not "
"supported. Instead set "
"/proc/fs/cifs/LookupCacheEnabled to 0\n");
break;
case Opt_fsc:
#ifndef CONFIG_CIFS_FSCACHE
cifs_dbg(VFS, "FS-Cache support needs CONFIG_CIFS_FSCACHE kernel config option set\n");
goto cifs_parse_mount_err;
#endif
vol->fsc = true;
break;
case Opt_mfsymlinks:
vol->mfsymlinks = true;
break;
case Opt_multiuser:
vol->multiuser = true;
break;
case Opt_sloppy:
sloppy = true;
break;
/* Numeric Values */
case Opt_backupuid:
if (get_option_uid(args, &vol->backupuid)) {
cifs_dbg(VFS, "%s: Invalid backupuid value\n",
__func__);
goto cifs_parse_mount_err;
}
vol->backupuid_specified = true;
break;
case Opt_backupgid:
if (get_option_gid(args, &vol->backupgid)) {
cifs_dbg(VFS, "%s: Invalid backupgid value\n",
__func__);
goto cifs_parse_mount_err;
}
vol->backupgid_specified = true;
break;
case Opt_uid:
if (get_option_uid(args, &vol->linux_uid)) {
cifs_dbg(VFS, "%s: Invalid uid value\n",
__func__);
goto cifs_parse_mount_err;
}
uid_specified = true;
break;
case Opt_cruid:
if (get_option_uid(args, &vol->cred_uid)) {
cifs_dbg(VFS, "%s: Invalid cruid value\n",
__func__);
goto cifs_parse_mount_err;
}
break;
case Opt_gid:
if (get_option_gid(args, &vol->linux_gid)) {
cifs_dbg(VFS, "%s: Invalid gid value\n",
__func__);
goto cifs_parse_mount_err;
}
gid_specified = true;
break;
case Opt_file_mode:
if (get_option_ul(args, &option)) {
cifs_dbg(VFS, "%s: Invalid file_mode value\n",
__func__);
goto cifs_parse_mount_err;
}
vol->file_mode = option;
break;
case Opt_dirmode:
if (get_option_ul(args, &option)) {
cifs_dbg(VFS, "%s: Invalid dir_mode value\n",
__func__);
goto cifs_parse_mount_err;
}
vol->dir_mode = option;
break;
case Opt_port:
if (get_option_ul(args, &option) ||
option > USHRT_MAX) {
cifs_dbg(VFS, "%s: Invalid port value\n",
__func__);
goto cifs_parse_mount_err;
}
port = (unsigned short)option;
break;
case Opt_rsize:
if (get_option_ul(args, &option)) {
cifs_dbg(VFS, "%s: Invalid rsize value\n",
__func__);
goto cifs_parse_mount_err;
}
vol->rsize = option;
break;
case Opt_wsize:
if (get_option_ul(args, &option)) {
cifs_dbg(VFS, "%s: Invalid wsize value\n",
__func__);
goto cifs_parse_mount_err;
}
vol->wsize = option;
break;
case Opt_actimeo:
if (get_option_ul(args, &option)) {
cifs_dbg(VFS, "%s: Invalid actimeo value\n",
__func__);
goto cifs_parse_mount_err;
}
vol->actimeo = HZ * option;
if (vol->actimeo > CIFS_MAX_ACTIMEO) {
cifs_dbg(VFS, "attribute cache timeout too large\n");
goto cifs_parse_mount_err;
}
break;
/* String Arguments */
case Opt_blank_user:
/* null user, ie. anonymous authentication */
vol->nullauth = 1;
vol->username = NULL;
break;
case Opt_user:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (strnlen(string, MAX_USERNAME_SIZE) >
MAX_USERNAME_SIZE) {
printk(KERN_WARNING "CIFS: username too long\n");
goto cifs_parse_mount_err;
}
vol->username = kstrdup(string, GFP_KERNEL);
if (!vol->username)
goto cifs_parse_mount_err;
break;
case Opt_blank_pass:
/* passwords have to be handled differently
* to allow the character used for deliminator
* to be passed within them
*/
/*
* Check if this is a case where the password
* starts with a delimiter
*/
tmp_end = strchr(data, '=');
tmp_end++;
if (!(tmp_end < end && tmp_end[1] == delim)) {
/* No it is not. Set the password to NULL */
vol->password = NULL;
break;
}
/* Yes it is. Drop down to Opt_pass below.*/
case Opt_pass:
/* Obtain the value string */
value = strchr(data, '=');
value++;
/* Set tmp_end to end of the string */
tmp_end = (char *) value + strlen(value);
/* Check if following character is the deliminator
* If yes, we have encountered a double deliminator
* reset the NULL character to the deliminator
*/
if (tmp_end < end && tmp_end[1] == delim) {
tmp_end[0] = delim;
/* Keep iterating until we get to a single
* deliminator OR the end
*/
while ((tmp_end = strchr(tmp_end, delim))
!= NULL && (tmp_end[1] == delim)) {
tmp_end = (char *) &tmp_end[2];
}
/* Reset var options to point to next element */
if (tmp_end) {
tmp_end[0] = '\0';
options = (char *) &tmp_end[1];
} else
/* Reached the end of the mount option
* string */
options = end;
}
/* Now build new password string */
temp_len = strlen(value);
vol->password = kzalloc(temp_len+1, GFP_KERNEL);
if (vol->password == NULL) {
printk(KERN_WARNING "CIFS: no memory "
"for password\n");
goto cifs_parse_mount_err;
}
for (i = 0, j = 0; i < temp_len; i++, j++) {
vol->password[j] = value[i];
if ((value[i] == delim) &&
value[i+1] == delim)
/* skip the second deliminator */
i++;
}
vol->password[j] = '\0';
break;
case Opt_blank_ip:
/* FIXME: should this be an error instead? */
got_ip = false;
break;
case Opt_ip:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (!cifs_convert_address(dstaddr, string,
strlen(string))) {
printk(KERN_ERR "CIFS: bad ip= option (%s).\n",
string);
goto cifs_parse_mount_err;
}
got_ip = true;
break;
case Opt_domain:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (strnlen(string, 256) == 256) {
printk(KERN_WARNING "CIFS: domain name too"
" long\n");
goto cifs_parse_mount_err;
}
vol->domainname = kstrdup(string, GFP_KERNEL);
if (!vol->domainname) {
printk(KERN_WARNING "CIFS: no memory "
"for domainname\n");
goto cifs_parse_mount_err;
}
cifs_dbg(FYI, "Domain name set\n");
break;
case Opt_srcaddr:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (!cifs_convert_address(
(struct sockaddr *)&vol->srcaddr,
string, strlen(string))) {
printk(KERN_WARNING "CIFS: Could not parse"
" srcaddr: %s\n", string);
goto cifs_parse_mount_err;
}
break;
case Opt_iocharset:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (strnlen(string, 1024) >= 65) {
printk(KERN_WARNING "CIFS: iocharset name "
"too long.\n");
goto cifs_parse_mount_err;
}
if (strnicmp(string, "default", 7) != 0) {
vol->iocharset = kstrdup(string,
GFP_KERNEL);
if (!vol->iocharset) {
printk(KERN_WARNING "CIFS: no memory"
"for charset\n");
goto cifs_parse_mount_err;
}
}
/* if iocharset not set then load_nls_default
* is used by caller
*/
cifs_dbg(FYI, "iocharset set to %s\n", string);
break;
case Opt_netbiosname:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
memset(vol->source_rfc1001_name, 0x20,
RFC1001_NAME_LEN);
/*
* FIXME: are there cases in which a comma can
* be valid in workstation netbios name (and
* need special handling)?
*/
for (i = 0; i < RFC1001_NAME_LEN; i++) {
/* don't ucase netbiosname for user */
if (string[i] == 0)
break;
vol->source_rfc1001_name[i] = string[i];
}
/* The string has 16th byte zero still from
* set at top of the function
*/
if (i == RFC1001_NAME_LEN && string[i] != 0)
printk(KERN_WARNING "CIFS: netbiosname"
" longer than 15 truncated.\n");
break;
case Opt_servern:
/* servernetbiosname specified override *SMBSERVER */
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
/* last byte, type, is 0x20 for servr type */
memset(vol->target_rfc1001_name, 0x20,
RFC1001_NAME_LEN_WITH_NULL);
/* BB are there cases in which a comma can be
valid in this workstation netbios name
(and need special handling)? */
/* user or mount helper must uppercase the
netbios name */
for (i = 0; i < 15; i++) {
if (string[i] == 0)
break;
vol->target_rfc1001_name[i] = string[i];
}
/* The string has 16th byte zero still from
set at top of the function */
if (i == RFC1001_NAME_LEN && string[i] != 0)
printk(KERN_WARNING "CIFS: server net"
"biosname longer than 15 truncated.\n");
break;
case Opt_ver:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (strnicmp(string, "1", 1) == 0) {
/* This is the default */
break;
}
/* For all other value, error */
printk(KERN_WARNING "CIFS: Invalid version"
" specified\n");
goto cifs_parse_mount_err;
case Opt_vers:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (cifs_parse_smb_version(string, vol) != 0)
goto cifs_parse_mount_err;
break;
case Opt_sec:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (cifs_parse_security_flavors(string, vol) != 0)
goto cifs_parse_mount_err;
break;
case Opt_cache:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (cifs_parse_cache_flavor(string, vol) != 0)
goto cifs_parse_mount_err;
break;
default:
/*
* An option we don't recognize. Save it off for later
* if we haven't already found one
*/
if (!invalid)
invalid = data;
break;
}
/* Free up any allocated string */
kfree(string);
string = NULL;
}
if (!sloppy && invalid) {
printk(KERN_ERR "CIFS: Unknown mount option \"%s\"\n", invalid);
goto cifs_parse_mount_err;
}
#ifndef CONFIG_KEYS
/* Muliuser mounts require CONFIG_KEYS support */
if (vol->multiuser) {
cifs_dbg(VFS, "Multiuser mounts require kernels with CONFIG_KEYS enabled\n");
goto cifs_parse_mount_err;
}
#endif
if (!vol->UNC) {
cifs_dbg(VFS, "CIFS mount error: No usable UNC path provided in device string!\n");
goto cifs_parse_mount_err;
}
/* make sure UNC has a share name */
if (!strchr(vol->UNC + 3, '\\')) {
cifs_dbg(VFS, "Malformed UNC. Unable to find share name.\n");
goto cifs_parse_mount_err;
}
if (!got_ip) {
/* No ip= option specified? Try to get it from UNC */
if (!cifs_convert_address(dstaddr, &vol->UNC[2],
strlen(&vol->UNC[2]))) {
printk(KERN_ERR "Unable to determine destination "
"address.\n");
goto cifs_parse_mount_err;
}
}
/* set the port that we got earlier */
cifs_set_port(dstaddr, port);
if (uid_specified)
vol->override_uid = override_uid;
else if (override_uid == 1)
printk(KERN_NOTICE "CIFS: ignoring forceuid mount option "
"specified with no uid= option.\n");
if (gid_specified)
vol->override_gid = override_gid;
else if (override_gid == 1)
printk(KERN_NOTICE "CIFS: ignoring forcegid mount option "
"specified with no gid= option.\n");
kfree(mountdata_copy);
return 0;
out_nomem:
printk(KERN_WARNING "Could not allocate temporary buffer\n");
cifs_parse_mount_err:
kfree(string);
kfree(mountdata_copy);
return 1;
}
/** Returns true if srcaddr isn't specified and rhs isn't
* specified, or if srcaddr is specified and
* matches the IP address of the rhs argument.
*/
static bool
srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
{
switch (srcaddr->sa_family) {
case AF_UNSPEC:
return (rhs->sa_family == AF_UNSPEC);
case AF_INET: {
struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr);
}
case AF_INET6: {
struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr);
}
default:
WARN_ON(1);
return false; /* don't expect to be here */
}
}
/*
* If no port is specified in addr structure, we try to match with 445 port
* and if it fails - with 139 ports. It should be called only if address
* families of server and addr are equal.
*/
static bool
match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
{
__be16 port, *sport;
switch (addr->sa_family) {
case AF_INET:
sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port;
port = ((struct sockaddr_in *) addr)->sin_port;
break;
case AF_INET6:
sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port;
port = ((struct sockaddr_in6 *) addr)->sin6_port;
break;
default:
WARN_ON(1);
return false;
}
if (!port) {
port = htons(CIFS_PORT);
if (port == *sport)
return true;
port = htons(RFC1001_PORT);
}
return port == *sport;
}
static bool
match_address(struct TCP_Server_Info *server, struct sockaddr *addr,
struct sockaddr *srcaddr)
{
switch (addr->sa_family) {
case AF_INET: {
struct sockaddr_in *addr4 = (struct sockaddr_in *)addr;
struct sockaddr_in *srv_addr4 =
(struct sockaddr_in *)&server->dstaddr;
if (addr4->sin_addr.s_addr != srv_addr4->sin_addr.s_addr)
return false;
break;
}
case AF_INET6: {
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr;
struct sockaddr_in6 *srv_addr6 =
(struct sockaddr_in6 *)&server->dstaddr;
if (!ipv6_addr_equal(&addr6->sin6_addr,
&srv_addr6->sin6_addr))
return false;
if (addr6->sin6_scope_id != srv_addr6->sin6_scope_id)
return false;
break;
}
default:
WARN_ON(1);
return false; /* don't expect to be here */
}
if (!srcip_matches(srcaddr, (struct sockaddr *)&server->srcaddr))
return false;
return true;
}
static bool
match_security(struct TCP_Server_Info *server, struct smb_vol *vol)
{
unsigned int secFlags;
if (vol->secFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
secFlags = vol->secFlg;
else
secFlags = global_secflags | vol->secFlg;
switch (server->secType) {
case LANMAN:
if (!(secFlags & (CIFSSEC_MAY_LANMAN|CIFSSEC_MAY_PLNTXT)))
return false;
break;
case NTLMv2:
if (!(secFlags & CIFSSEC_MAY_NTLMV2))
return false;
break;
case NTLM:
if (!(secFlags & CIFSSEC_MAY_NTLM))
return false;
break;
case Kerberos:
if (!(secFlags & CIFSSEC_MAY_KRB5))
return false;
break;
case RawNTLMSSP:
if (!(secFlags & CIFSSEC_MAY_NTLMSSP))
return false;
break;
default:
/* shouldn't happen */
return false;
}
/* now check if signing mode is acceptable */
if ((secFlags & CIFSSEC_MAY_SIGN) == 0 &&
(server->sec_mode & SECMODE_SIGN_REQUIRED))
return false;
else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) &&
(server->sec_mode &
(SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0)
return false;
return true;
}
static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol)
{
struct sockaddr *addr = (struct sockaddr *)&vol->dstaddr;
if ((server->vals != vol->vals) || (server->ops != vol->ops))
return 0;
if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
return 0;
if (!match_address(server, addr,
(struct sockaddr *)&vol->srcaddr))
return 0;
if (!match_port(server, addr))
return 0;
if (!match_security(server, vol))
return 0;
return 1;
}
static struct TCP_Server_Info *
cifs_find_tcp_session(struct smb_vol *vol)
{
struct TCP_Server_Info *server;
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
if (!match_server(server, vol))
continue;
++server->srv_count;
spin_unlock(&cifs_tcp_ses_lock);
cifs_dbg(FYI, "Existing tcp session with server found\n");
return server;
}
spin_unlock(&cifs_tcp_ses_lock);
return NULL;
}
static void
cifs_put_tcp_session(struct TCP_Server_Info *server)
{
struct task_struct *task;
spin_lock(&cifs_tcp_ses_lock);
if (--server->srv_count > 0) {
spin_unlock(&cifs_tcp_ses_lock);
return;
}
put_net(cifs_net_ns(server));
list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
cancel_delayed_work_sync(&server->echo);
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
cifs_crypto_shash_release(server);
cifs_fscache_release_client_cookie(server);
kfree(server->session_key.response);
server->session_key.response = NULL;
server->session_key.len = 0;
task = xchg(&server->tsk, NULL);
if (task)
force_sig(SIGKILL, task);
}
static struct TCP_Server_Info *
cifs_get_tcp_session(struct smb_vol *volume_info)
{
struct TCP_Server_Info *tcp_ses = NULL;
int rc;
cifs_dbg(FYI, "UNC: %s\n", volume_info->UNC);
/* see if we already have a matching tcp_ses */
tcp_ses = cifs_find_tcp_session(volume_info);
if (tcp_ses)
return tcp_ses;
tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL);
if (!tcp_ses) {
rc = -ENOMEM;
goto out_err;
}
rc = cifs_crypto_shash_allocate(tcp_ses);
if (rc) {
cifs_dbg(VFS, "could not setup hash structures rc %d\n", rc);
goto out_err;
}
tcp_ses->ops = volume_info->ops;
tcp_ses->vals = volume_info->vals;
cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
tcp_ses->hostname = extract_hostname(volume_info->UNC);
if (IS_ERR(tcp_ses->hostname)) {
rc = PTR_ERR(tcp_ses->hostname);
goto out_err_crypto_release;
}
tcp_ses->noblocksnd = volume_info->noblocksnd;
tcp_ses->noautotune = volume_info->noautotune;
tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
tcp_ses->in_flight = 0;
tcp_ses->credits = 1;
init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q);
INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
mutex_init(&tcp_ses->srv_mutex);
memcpy(tcp_ses->workstation_RFC1001_name,
volume_info->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
memcpy(tcp_ses->server_RFC1001_name,
volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
tcp_ses->session_estab = false;
tcp_ses->sequence_number = 0;
tcp_ses->lstrp = jiffies;
spin_lock_init(&tcp_ses->req_lock);
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr,
sizeof(tcp_ses->srcaddr));
memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
sizeof(tcp_ses->dstaddr));
/*
* at this point we are the only ones with the pointer
* to the struct since the kernel thread not created yet
* no need to spinlock this init of tcpStatus or srv_count
*/
tcp_ses->tcpStatus = CifsNew;
++tcp_ses->srv_count;
rc = ip_connect(tcp_ses);
if (rc < 0) {
cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n");
goto out_err_crypto_release;
}
/*
* since we're in a cifs function already, we know that
* this will succeed. No need for try_module_get().
*/
__module_get(THIS_MODULE);
tcp_ses->tsk = kthread_run(cifs_demultiplex_thread,
tcp_ses, "cifsd");
if (IS_ERR(tcp_ses->tsk)) {
rc = PTR_ERR(tcp_ses->tsk);
cifs_dbg(VFS, "error %d create cifsd thread\n", rc);
module_put(THIS_MODULE);
goto out_err_crypto_release;
}
tcp_ses->tcpStatus = CifsNeedNegotiate;
/* thread spawned, put it on the list */
spin_lock(&cifs_tcp_ses_lock);
list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
cifs_fscache_get_client_cookie(tcp_ses);
/* queue echo request delayed work */
queue_delayed_work(cifsiod_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL);
return tcp_ses;
out_err_crypto_release:
cifs_crypto_shash_release(tcp_ses);
put_net(cifs_net_ns(tcp_ses));
out_err:
if (tcp_ses) {
if (!IS_ERR(tcp_ses->hostname))
kfree(tcp_ses->hostname);
if (tcp_ses->ssocket)
sock_release(tcp_ses->ssocket);
kfree(tcp_ses);
}
return ERR_PTR(rc);
}
static int match_session(struct cifs_ses *ses, struct smb_vol *vol)
{
switch (ses->server->secType) {
case Kerberos:
if (!uid_eq(vol->cred_uid, ses->cred_uid))
return 0;
break;
default:
/* NULL username means anonymous session */
if (ses->user_name == NULL) {
if (!vol->nullauth)
return 0;
break;
}
/* anything else takes username/password */
if (strncmp(ses->user_name,
vol->username ? vol->username : "",
MAX_USERNAME_SIZE))
return 0;
if (strlen(vol->username) != 0 &&
ses->password != NULL &&
strncmp(ses->password,
vol->password ? vol->password : "",
MAX_PASSWORD_SIZE))
return 0;
}
return 1;
}
static struct cifs_ses *
cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
{
struct cifs_ses *ses;
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
if (!match_session(ses, vol))
continue;
++ses->ses_count;
spin_unlock(&cifs_tcp_ses_lock);
return ses;
}
spin_unlock(&cifs_tcp_ses_lock);
return NULL;
}
static void
cifs_put_smb_ses(struct cifs_ses *ses)
{
unsigned int xid;
struct TCP_Server_Info *server = ses->server;
cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
spin_lock(&cifs_tcp_ses_lock);
if (--ses->ses_count > 0) {
spin_unlock(&cifs_tcp_ses_lock);
return;
}
list_del_init(&ses->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
if (ses->status == CifsGood && server->ops->logoff) {
xid = get_xid();
server->ops->logoff(xid, ses);
_free_xid(xid);
}
sesInfoFree(ses);
cifs_put_tcp_session(server);
}
#ifdef CONFIG_KEYS
/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */
#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1)
/* Populate username and pw fields from keyring if possible */
static int
cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
{
int rc = 0;
char *desc, *delim, *payload;
ssize_t len;
struct key *key;
struct TCP_Server_Info *server = ses->server;
struct sockaddr_in *sa;
struct sockaddr_in6 *sa6;
struct user_key_payload *upayload;
desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
if (!desc)
return -ENOMEM;
/* try to find an address key first */
switch (server->dstaddr.ss_family) {
case AF_INET:
sa = (struct sockaddr_in *)&server->dstaddr;
sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr);
break;
case AF_INET6:
sa6 = (struct sockaddr_in6 *)&server->dstaddr;
sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
break;
default:
cifs_dbg(FYI, "Bad ss_family (%hu)\n",
server->dstaddr.ss_family);
rc = -EINVAL;
goto out_err;
}
cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
key = request_key(&key_type_logon, desc, "");
if (IS_ERR(key)) {
if (!ses->domainName) {
cifs_dbg(FYI, "domainName is NULL\n");
rc = PTR_ERR(key);
goto out_err;
}
/* didn't work, try to find a domain key */
sprintf(desc, "cifs:d:%s", ses->domainName);
cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
key = request_key(&key_type_logon, desc, "");
if (IS_ERR(key)) {
rc = PTR_ERR(key);
goto out_err;
}
}
down_read(&key->sem);
upayload = key->payload.data;
if (IS_ERR_OR_NULL(upayload)) {
rc = upayload ? PTR_ERR(upayload) : -EINVAL;
goto out_key_put;
}
/* find first : in payload */
payload = (char *)upayload->data;
delim = strnchr(payload, upayload->datalen, ':');
cifs_dbg(FYI, "payload=%s\n", payload);
if (!delim) {
cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n",
upayload->datalen);
rc = -EINVAL;
goto out_key_put;
}
len = delim - payload;
if (len > MAX_USERNAME_SIZE || len <= 0) {
cifs_dbg(FYI, "Bad value from username search (len=%zd)\n",
len);
rc = -EINVAL;
goto out_key_put;
}
vol->username = kstrndup(payload, len, GFP_KERNEL);
if (!vol->username) {
cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n",
len);
rc = -ENOMEM;
goto out_key_put;
}
cifs_dbg(FYI, "%s: username=%s\n", __func__, vol->username);
len = key->datalen - (len + 1);
if (len > MAX_PASSWORD_SIZE || len <= 0) {
cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len);
rc = -EINVAL;
kfree(vol->username);
vol->username = NULL;
goto out_key_put;
}
++delim;
vol->password = kstrndup(delim, len, GFP_KERNEL);
if (!vol->password) {
cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n",
len);
rc = -ENOMEM;
kfree(vol->username);
vol->username = NULL;
goto out_key_put;
}
out_key_put:
up_read(&key->sem);
key_put(key);
out_err:
kfree(desc);
cifs_dbg(FYI, "%s: returning %d\n", __func__, rc);
return rc;
}
#else /* ! CONFIG_KEYS */
static inline int
cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)),
struct cifs_ses *ses __attribute__((unused)))
{
return -ENOSYS;
}
#endif /* CONFIG_KEYS */
static struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
{
int rc = -ENOMEM;
unsigned int xid;
struct cifs_ses *ses;
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
xid = get_xid();
ses = cifs_find_smb_ses(server, volume_info);
if (ses) {
cifs_dbg(FYI, "Existing smb sess found (status=%d)\n",
ses->status);
mutex_lock(&ses->session_mutex);
rc = cifs_negotiate_protocol(xid, ses);
if (rc) {
mutex_unlock(&ses->session_mutex);
/* problem -- put our ses reference */
cifs_put_smb_ses(ses);
free_xid(xid);
return ERR_PTR(rc);
}
if (ses->need_reconnect) {
cifs_dbg(FYI, "Session needs reconnect\n");
rc = cifs_setup_session(xid, ses,
volume_info->local_nls);
if (rc) {
mutex_unlock(&ses->session_mutex);
/* problem -- put our reference */
cifs_put_smb_ses(ses);
free_xid(xid);
return ERR_PTR(rc);
}
}
mutex_unlock(&ses->session_mutex);
/* existing SMB ses has a server reference already */
cifs_put_tcp_session(server);
free_xid(xid);
return ses;
}
cifs_dbg(FYI, "Existing smb sess not found\n");
ses = sesInfoAlloc();
if (ses == NULL)
goto get_ses_fail;
/* new SMB session uses our server ref */
ses->server = server;
if (server->dstaddr.ss_family == AF_INET6)
sprintf(ses->serverName, "%pI6", &addr6->sin6_addr);
else
sprintf(ses->serverName, "%pI4", &addr->sin_addr);
if (volume_info->username) {
ses->user_name = kstrdup(volume_info->username, GFP_KERNEL);
if (!ses->user_name)
goto get_ses_fail;
}
/* volume_info->password freed at unmount */
if (volume_info->password) {
ses->password = kstrdup(volume_info->password, GFP_KERNEL);
if (!ses->password)
goto get_ses_fail;
}
if (volume_info->domainname) {
ses->domainName = kstrdup(volume_info->domainname, GFP_KERNEL);
if (!ses->domainName)
goto get_ses_fail;
}
ses->cred_uid = volume_info->cred_uid;
ses->linux_uid = volume_info->linux_uid;
ses->overrideSecFlg = volume_info->secFlg;
mutex_lock(&ses->session_mutex);
rc = cifs_negotiate_protocol(xid, ses);
if (!rc)
rc = cifs_setup_session(xid, ses, volume_info->local_nls);
mutex_unlock(&ses->session_mutex);
if (rc)
goto get_ses_fail;
/* success, put it on the list */
spin_lock(&cifs_tcp_ses_lock);
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
free_xid(xid);
return ses;
get_ses_fail:
sesInfoFree(ses);
free_xid(xid);
return ERR_PTR(rc);
}
static int match_tcon(struct cifs_tcon *tcon, const char *unc)
{
if (tcon->tidStatus == CifsExiting)
return 0;
if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE))
return 0;
return 1;
}
static struct cifs_tcon *
cifs_find_tcon(struct cifs_ses *ses, const char *unc)
{
struct list_head *tmp;
struct cifs_tcon *tcon;
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &ses->tcon_list) {
tcon = list_entry(tmp, struct cifs_tcon, tcon_list);
if (!match_tcon(tcon, unc))
continue;
++tcon->tc_count;
spin_unlock(&cifs_tcp_ses_lock);
return tcon;
}
spin_unlock(&cifs_tcp_ses_lock);
return NULL;
}
static void
cifs_put_tcon(struct cifs_tcon *tcon)
{
unsigned int xid;
struct cifs_ses *ses = tcon->ses;
cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
spin_lock(&cifs_tcp_ses_lock);
if (--tcon->tc_count > 0) {
spin_unlock(&cifs_tcp_ses_lock);
return;
}
list_del_init(&tcon->tcon_list);
spin_unlock(&cifs_tcp_ses_lock);
xid = get_xid();
if (ses->server->ops->tree_disconnect)
ses->server->ops->tree_disconnect(xid, tcon);
_free_xid(xid);
cifs_fscache_release_super_cookie(tcon);
tconInfoFree(tcon);
cifs_put_smb_ses(ses);
}
static struct cifs_tcon *
cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
{
int rc, xid;
struct cifs_tcon *tcon;
tcon = cifs_find_tcon(ses, volume_info->UNC);
if (tcon) {
cifs_dbg(FYI, "Found match on UNC path\n");
/* existing tcon already has a reference */
cifs_put_smb_ses(ses);
if (tcon->seal != volume_info->seal)
cifs_dbg(VFS, "transport encryption setting conflicts with existing tid\n");
return tcon;
}
if (!ses->server->ops->tree_connect) {
rc = -ENOSYS;
goto out_fail;
}
tcon = tconInfoAlloc();
if (tcon == NULL) {
rc = -ENOMEM;
goto out_fail;
}
tcon->ses = ses;
if (volume_info->password) {
tcon->password = kstrdup(volume_info->password, GFP_KERNEL);
if (!tcon->password) {
rc = -ENOMEM;
goto out_fail;
}
}
/*
* BB Do we need to wrap session_mutex around this TCon call and Unix
* SetFS as we do on SessSetup and reconnect?
*/
xid = get_xid();
rc = ses->server->ops->tree_connect(xid, ses, volume_info->UNC, tcon,
volume_info->local_nls);
free_xid(xid);
cifs_dbg(FYI, "Tcon rc = %d\n", rc);
if (rc)
goto out_fail;
if (volume_info->nodfs) {
tcon->Flags &= ~SMB_SHARE_IS_IN_DFS;
cifs_dbg(FYI, "DFS disabled (%d)\n", tcon->Flags);
}
tcon->seal = volume_info->seal;
/*
* We can have only one retry value for a connection to a share so for
* resources mounted more than once to the same server share the last
* value passed in for the retry flag is used.
*/
tcon->retry = volume_info->retry;
tcon->nocase = volume_info->nocase;
tcon->local_lease = volume_info->local_lease;
INIT_LIST_HEAD(&tcon->pending_opens);
spin_lock(&cifs_tcp_ses_lock);
list_add(&tcon->tcon_list, &ses->tcon_list);
spin_unlock(&cifs_tcp_ses_lock);
cifs_fscache_get_super_cookie(tcon);
return tcon;
out_fail:
tconInfoFree(tcon);
return ERR_PTR(rc);
}
void
cifs_put_tlink(struct tcon_link *tlink)
{
if (!tlink || IS_ERR(tlink))
return;
if (!atomic_dec_and_test(&tlink->tl_count) ||
test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) {
tlink->tl_time = jiffies;
return;
}
if (!IS_ERR(tlink_tcon(tlink)))
cifs_put_tcon(tlink_tcon(tlink));
kfree(tlink);
return;
}
static inline struct tcon_link *
cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
{
return cifs_sb->master_tlink;
}
static int
compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
{
struct cifs_sb_info *old = CIFS_SB(sb);
struct cifs_sb_info *new = mnt_data->cifs_sb;
if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
return 0;
if ((old->mnt_cifs_flags & CIFS_MOUNT_MASK) !=
(new->mnt_cifs_flags & CIFS_MOUNT_MASK))
return 0;
/*
* We want to share sb only if we don't specify an r/wsize or
* specified r/wsize is greater than or equal to existing one.
*/
if (new->wsize && new->wsize < old->wsize)
return 0;
if (new->rsize && new->rsize < old->rsize)
return 0;
if (!uid_eq(old->mnt_uid, new->mnt_uid) || !gid_eq(old->mnt_gid, new->mnt_gid))
return 0;
if (old->mnt_file_mode != new->mnt_file_mode ||
old->mnt_dir_mode != new->mnt_dir_mode)
return 0;
if (strcmp(old->local_nls->charset, new->local_nls->charset))
return 0;
if (old->actimeo != new->actimeo)
return 0;
return 1;
}
int
cifs_match_super(struct super_block *sb, void *data)
{
struct cifs_mnt_data *mnt_data = (struct cifs_mnt_data *)data;
struct smb_vol *volume_info;
struct cifs_sb_info *cifs_sb;
struct TCP_Server_Info *tcp_srv;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
struct tcon_link *tlink;
int rc = 0;
spin_lock(&cifs_tcp_ses_lock);
cifs_sb = CIFS_SB(sb);
tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
if (IS_ERR(tlink)) {
spin_unlock(&cifs_tcp_ses_lock);
return rc;
}
tcon = tlink_tcon(tlink);
ses = tcon->ses;
tcp_srv = ses->server;
volume_info = mnt_data->vol;
if (!match_server(tcp_srv, volume_info) ||
!match_session(ses, volume_info) ||
!match_tcon(tcon, volume_info->UNC)) {
rc = 0;
goto out;
}
rc = compare_mount_options(sb, mnt_data);
out:
spin_unlock(&cifs_tcp_ses_lock);
cifs_put_tlink(tlink);
return rc;
}
int
get_dfs_path(const unsigned int xid, struct cifs_ses *ses, const char *old_path,
const struct nls_table *nls_codepage, unsigned int *num_referrals,
struct dfs_info3_param **referrals, int remap)
{
char *temp_unc;
int rc = 0;
if (!ses->server->ops->tree_connect || !ses->server->ops->get_dfs_refer)
return -ENOSYS;
*num_referrals = 0;
*referrals = NULL;
if (ses->ipc_tid == 0) {
temp_unc = kmalloc(2 /* for slashes */ +
strnlen(ses->serverName, SERVER_NAME_LEN_WITH_NULL * 2)
+ 1 + 4 /* slash IPC$ */ + 2, GFP_KERNEL);
if (temp_unc == NULL)
return -ENOMEM;
temp_unc[0] = '\\';
temp_unc[1] = '\\';
strcpy(temp_unc + 2, ses->serverName);
strcpy(temp_unc + 2 + strlen(ses->serverName), "\\IPC$");
rc = ses->server->ops->tree_connect(xid, ses, temp_unc, NULL,
nls_codepage);
cifs_dbg(FYI, "Tcon rc = %d ipc_tid = %d\n", rc, ses->ipc_tid);
kfree(temp_unc);
}
if (rc == 0)
rc = ses->server->ops->get_dfs_refer(xid, ses, old_path,
referrals, num_referrals,
nls_codepage, remap);
/*
* BB - map targetUNCs to dfs_info3 structures, here or in
* ses->server->ops->get_dfs_refer.
*/
return rc;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key cifs_key[2];
static struct lock_class_key cifs_slock_key[2];
static inline void
cifs_reclassify_socket4(struct socket *sock)
{
struct sock *sk = sock->sk;
BUG_ON(sock_owned_by_user(sk));
sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
&cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
}
static inline void
cifs_reclassify_socket6(struct socket *sock)
{
struct sock *sk = sock->sk;
BUG_ON(sock_owned_by_user(sk));
sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
&cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
}
#else
static inline void
cifs_reclassify_socket4(struct socket *sock)
{
}
static inline void
cifs_reclassify_socket6(struct socket *sock)
{
}
#endif
/* See RFC1001 section 14 on representation of Netbios names */
static void rfc1002mangle(char *target, char *source, unsigned int length)
{
unsigned int i, j;
for (i = 0, j = 0; i < (length); i++) {
/* mask a nibble at a time and encode */
target[j] = 'A' + (0x0F & (source[i] >> 4));
target[j+1] = 'A' + (0x0F & source[i]);
j += 2;
}
}
static int
bind_socket(struct TCP_Server_Info *server)
{
int rc = 0;
if (server->srcaddr.ss_family != AF_UNSPEC) {
/* Bind to the specified local IP address */
struct socket *socket = server->ssocket;
rc = socket->ops->bind(socket,
(struct sockaddr *) &server->srcaddr,
sizeof(server->srcaddr));
if (rc < 0) {
struct sockaddr_in *saddr4;
struct sockaddr_in6 *saddr6;
saddr4 = (struct sockaddr_in *)&server->srcaddr;
saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
if (saddr6->sin6_family == AF_INET6)
cifs_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n",
&saddr6->sin6_addr, rc);
else
cifs_dbg(VFS, "Failed to bind to: %pI4, error: %d\n",
&saddr4->sin_addr.s_addr, rc);
}
}
return rc;
}
static int
ip_rfc1001_connect(struct TCP_Server_Info *server)
{
int rc = 0;
/*
* some servers require RFC1001 sessinit before sending
* negprot - BB check reconnection in case where second
* sessinit is sent but no second negprot
*/
struct rfc1002_session_packet *ses_init_buf;
struct smb_hdr *smb_buf;
ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet),
GFP_KERNEL);
if (ses_init_buf) {
ses_init_buf->trailer.session_req.called_len = 32;
if (server->server_RFC1001_name &&
server->server_RFC1001_name[0] != 0)
rfc1002mangle(ses_init_buf->trailer.
session_req.called_name,
server->server_RFC1001_name,
RFC1001_NAME_LEN_WITH_NULL);
else
rfc1002mangle(ses_init_buf->trailer.
session_req.called_name,
DEFAULT_CIFS_CALLED_NAME,
RFC1001_NAME_LEN_WITH_NULL);
ses_init_buf->trailer.session_req.calling_len = 32;
/*
* calling name ends in null (byte 16) from old smb
* convention.
*/
if (server->workstation_RFC1001_name &&
server->workstation_RFC1001_name[0] != 0)
rfc1002mangle(ses_init_buf->trailer.
session_req.calling_name,
server->workstation_RFC1001_name,
RFC1001_NAME_LEN_WITH_NULL);
else
rfc1002mangle(ses_init_buf->trailer.
session_req.calling_name,
"LINUX_CIFS_CLNT",
RFC1001_NAME_LEN_WITH_NULL);
ses_init_buf->trailer.session_req.scope1 = 0;
ses_init_buf->trailer.session_req.scope2 = 0;
smb_buf = (struct smb_hdr *)ses_init_buf;
/* sizeof RFC1002_SESSION_REQUEST with no scope */
smb_buf->smb_buf_length = cpu_to_be32(0x81000044);
rc = smb_send(server, smb_buf, 0x44);
kfree(ses_init_buf);
/*
* RFC1001 layer in at least one server
* requires very short break before negprot
* presumably because not expecting negprot
* to follow so fast. This is a simple
* solution that works without
* complicating the code and causes no
* significant slowing down on mount
* for everyone else
*/
usleep_range(1000, 2000);
}
/*
* else the negprot may still work without this
* even though malloc failed
*/
return rc;
}
static int
generic_ip_connect(struct TCP_Server_Info *server)
{
int rc = 0;
__be16 sport;
int slen, sfamily;
struct socket *socket = server->ssocket;
struct sockaddr *saddr;
saddr = (struct sockaddr *) &server->dstaddr;
if (server->dstaddr.ss_family == AF_INET6) {
sport = ((struct sockaddr_in6 *) saddr)->sin6_port;
slen = sizeof(struct sockaddr_in6);
sfamily = AF_INET6;
} else {
sport = ((struct sockaddr_in *) saddr)->sin_port;
slen = sizeof(struct sockaddr_in);
sfamily = AF_INET;
}
if (socket == NULL) {
rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
IPPROTO_TCP, &socket, 1);
if (rc < 0) {
cifs_dbg(VFS, "Error %d creating socket\n", rc);
server->ssocket = NULL;
return rc;
}
/* BB other socket options to set KEEPALIVE, NODELAY? */
cifs_dbg(FYI, "Socket created\n");
server->ssocket = socket;
socket->sk->sk_allocation = GFP_NOFS;
if (sfamily == AF_INET6)
cifs_reclassify_socket6(socket);
else
cifs_reclassify_socket4(socket);
}
rc = bind_socket(server);
if (rc < 0)
return rc;
/*
* Eventually check for other socket options to change from
* the default. sock_setsockopt not used because it expects
* user space buffer
*/
socket->sk->sk_rcvtimeo = 7 * HZ;
socket->sk->sk_sndtimeo = 5 * HZ;
/* make the bufsizes depend on wsize/rsize and max requests */
if (server->noautotune) {
if (socket->sk->sk_sndbuf < (200 * 1024))
socket->sk->sk_sndbuf = 200 * 1024;
if (socket->sk->sk_rcvbuf < (140 * 1024))
socket->sk->sk_rcvbuf = 140 * 1024;
}
if (server->tcp_nodelay) {
int val = 1;
rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
(char *)&val, sizeof(val));
if (rc)
cifs_dbg(FYI, "set TCP_NODELAY socket option error %d\n",
rc);
}
cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n",
socket->sk->sk_sndbuf,
socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
rc = socket->ops->connect(socket, saddr, slen, 0);
if (rc < 0) {
cifs_dbg(FYI, "Error %d connecting to server\n", rc);
sock_release(socket);
server->ssocket = NULL;
return rc;
}
if (sport == htons(RFC1001_PORT))
rc = ip_rfc1001_connect(server);
return rc;
}
static int
ip_connect(struct TCP_Server_Info *server)
{
__be16 *sport;
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
if (server->dstaddr.ss_family == AF_INET6)
sport = &addr6->sin6_port;
else
sport = &addr->sin_port;
if (*sport == 0) {
int rc;
/* try with 445 port at first */
*sport = htons(CIFS_PORT);
rc = generic_ip_connect(server);
if (rc >= 0)
return rc;
/* if it failed, try with 139 port */
*sport = htons(RFC1001_PORT);
}
return generic_ip_connect(server);
}
void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, struct smb_vol *vol_info)
{
/* if we are reconnecting then should we check to see if
* any requested capabilities changed locally e.g. via
* remount but we can not do much about it here
* if they have (even if we could detect it by the following)
* Perhaps we could add a backpointer to array of sb from tcon
* or if we change to make all sb to same share the same
* sb as NFS - then we only have one backpointer to sb.
* What if we wanted to mount the server share twice once with
* and once without posixacls or posix paths? */
__u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
if (vol_info && vol_info->no_linux_ext) {
tcon->fsUnixInfo.Capability = 0;
tcon->unix_ext = 0; /* Unix Extensions disabled */
cifs_dbg(FYI, "Linux protocol extensions disabled\n");
return;
} else if (vol_info)
tcon->unix_ext = 1; /* Unix Extensions supported */
if (tcon->unix_ext == 0) {
cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
return;
}
if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
__u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
cifs_dbg(FYI, "unix caps which server supports %lld\n", cap);
/* check for reconnect case in which we do not
want to change the mount behavior if we can avoid it */
if (vol_info == NULL) {
/* turn off POSIX ACL and PATHNAMES if not set
originally at mount time */
if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0)
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
cifs_dbg(VFS, "POSIXPATH support change\n");
cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
} else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
cifs_dbg(VFS, "possible reconnect error\n");
cifs_dbg(VFS, "server disabled POSIX path support\n");
}
}
if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
cifs_dbg(VFS, "per-share encryption not supported yet\n");
cap &= CIFS_UNIX_CAP_MASK;
if (vol_info && vol_info->no_psx_acl)
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
cifs_dbg(FYI, "negotiated posix acl support\n");
if (cifs_sb)
cifs_sb->mnt_cifs_flags |=
CIFS_MOUNT_POSIXACL;
}
if (vol_info && vol_info->posix_paths == 0)
cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
cifs_dbg(FYI, "negotiate posix pathnames\n");
if (cifs_sb)
cifs_sb->mnt_cifs_flags |=
CIFS_MOUNT_POSIX_PATHS;
}
cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap);
#ifdef CONFIG_CIFS_DEBUG2
if (cap & CIFS_UNIX_FCNTL_CAP)
cifs_dbg(FYI, "FCNTL cap\n");
if (cap & CIFS_UNIX_EXTATTR_CAP)
cifs_dbg(FYI, "EXTATTR cap\n");
if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
cifs_dbg(FYI, "POSIX path cap\n");
if (cap & CIFS_UNIX_XATTR_CAP)
cifs_dbg(FYI, "XATTR cap\n");
if (cap & CIFS_UNIX_POSIX_ACL_CAP)
cifs_dbg(FYI, "POSIX ACL cap\n");
if (cap & CIFS_UNIX_LARGE_READ_CAP)
cifs_dbg(FYI, "very large read cap\n");
if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
cifs_dbg(FYI, "very large write cap\n");
if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
cifs_dbg(FYI, "transport encryption cap\n");
if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
cifs_dbg(FYI, "mandatory transport encryption cap\n");
#endif /* CIFS_DEBUG2 */
if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
if (vol_info == NULL) {
cifs_dbg(FYI, "resetting capabilities failed\n");
} else
cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n");
}
}
}
void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
struct cifs_sb_info *cifs_sb)
{
INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
spin_lock_init(&cifs_sb->tlink_tree_lock);
cifs_sb->tlink_tree = RB_ROOT;
/*
* Temporarily set r/wsize for matching superblock. If we end up using
* new sb then client will later negotiate it downward if needed.
*/
cifs_sb->rsize = pvolume_info->rsize;
cifs_sb->wsize = pvolume_info->wsize;
cifs_sb->mnt_uid = pvolume_info->linux_uid;
cifs_sb->mnt_gid = pvolume_info->linux_gid;
cifs_sb->mnt_file_mode = pvolume_info->file_mode;
cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
cifs_dbg(FYI, "file mode: 0x%hx dir mode: 0x%hx\n",
cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
cifs_sb->actimeo = pvolume_info->actimeo;
cifs_sb->local_nls = pvolume_info->local_nls;
if (pvolume_info->noperm)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
if (pvolume_info->setuids)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID;
if (pvolume_info->server_ino)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM;
if (pvolume_info->remap)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SPECIAL_CHR;
if (pvolume_info->no_xattr)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR;
if (pvolume_info->sfu_emul)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL;
if (pvolume_info->nobrl)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL;
if (pvolume_info->nostrictsync)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC;
if (pvolume_info->mand_lock)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL;
if (pvolume_info->rwpidforward)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD;
if (pvolume_info->cifs_acl)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
if (pvolume_info->backupuid_specified) {
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID;
cifs_sb->mnt_backupuid = pvolume_info->backupuid;
}
if (pvolume_info->backupgid_specified) {
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID;
cifs_sb->mnt_backupgid = pvolume_info->backupgid;
}
if (pvolume_info->override_uid)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID;
if (pvolume_info->override_gid)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID;
if (pvolume_info->dynperm)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM;
if (pvolume_info->fsc)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE;
if (pvolume_info->multiuser)
cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER |
CIFS_MOUNT_NO_PERM);
if (pvolume_info->strict_io)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO;
if (pvolume_info->direct_io) {
cifs_dbg(FYI, "mounting share using direct i/o\n");
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
}
if (pvolume_info->mfsymlinks) {
if (pvolume_info->sfu_emul) {
cifs_dbg(VFS, "mount option mfsymlinks ignored if sfu mount option is used\n");
} else {
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MF_SYMLINKS;
}
}
if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm))
cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
}
static void
cleanup_volume_info_contents(struct smb_vol *volume_info)
{
kfree(volume_info->username);
kzfree(volume_info->password);
kfree(volume_info->UNC);
kfree(volume_info->domainname);
kfree(volume_info->iocharset);
kfree(volume_info->prepath);
}
void
cifs_cleanup_volume_info(struct smb_vol *volume_info)
{
if (!volume_info)
return;
cleanup_volume_info_contents(volume_info);
kfree(volume_info);
}
#ifdef CONFIG_CIFS_DFS_UPCALL
/*
* cifs_build_path_to_root returns full path to root when we do not have an
* exiting connection (tcon)
*/
static char *
build_unc_path_to_root(const struct smb_vol *vol,
const struct cifs_sb_info *cifs_sb)
{
char *full_path, *pos;
unsigned int pplen = vol->prepath ? strlen(vol->prepath) + 1 : 0;
unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1);
full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
if (full_path == NULL)
return ERR_PTR(-ENOMEM);
strncpy(full_path, vol->UNC, unc_len);
pos = full_path + unc_len;
if (pplen) {
*pos = CIFS_DIR_SEP(cifs_sb);
strncpy(pos + 1, vol->prepath, pplen);
pos += pplen;
}
*pos = '\0'; /* add trailing null */
convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
cifs_dbg(FYI, "%s: full_path=%s\n", __func__, full_path);
return full_path;
}
/*
* Perform a dfs referral query for a share and (optionally) prefix
*
* If a referral is found, cifs_sb->mountdata will be (re-)allocated
* to a string containing updated options for the submount. Otherwise it
* will be left untouched.
*
* Returns the rc from get_dfs_path to the caller, which can be used to
* determine whether there were referrals.
*/
static int
expand_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
struct smb_vol *volume_info, struct cifs_sb_info *cifs_sb,
int check_prefix)
{
int rc;
unsigned int num_referrals = 0;
struct dfs_info3_param *referrals = NULL;
char *full_path = NULL, *ref_path = NULL, *mdata = NULL;
full_path = build_unc_path_to_root(volume_info, cifs_sb);
if (IS_ERR(full_path))
return PTR_ERR(full_path);
/* For DFS paths, skip the first '\' of the UNC */
ref_path = check_prefix ? full_path + 1 : volume_info->UNC + 1;
rc = get_dfs_path(xid, ses, ref_path, cifs_sb->local_nls,
&num_referrals, &referrals,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (!rc && num_referrals > 0) {
char *fake_devname = NULL;
mdata = cifs_compose_mount_options(cifs_sb->mountdata,
full_path + 1, referrals,
&fake_devname);
free_dfs_info_array(referrals, num_referrals);
if (IS_ERR(mdata)) {
rc = PTR_ERR(mdata);
mdata = NULL;
} else {
cleanup_volume_info_contents(volume_info);
rc = cifs_setup_volume_info(volume_info, mdata,
fake_devname);
}
kfree(fake_devname);
kfree(cifs_sb->mountdata);
cifs_sb->mountdata = mdata;
}
kfree(full_path);
return rc;
}
#endif
static int
cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
const char *devname)
{
int rc = 0;
if (cifs_parse_mount_options(mount_data, devname, volume_info))
return -EINVAL;
if (volume_info->nullauth) {
cifs_dbg(FYI, "Anonymous login\n");
kfree(volume_info->username);
volume_info->username = NULL;
} else if (volume_info->username) {
/* BB fixme parse for domain name here */
cifs_dbg(FYI, "Username: %s\n", volume_info->username);
} else {
cifs_dbg(VFS, "No username specified\n");
/* In userspace mount helper we can get user name from alternate
locations such as env variables and files on disk */
return -EINVAL;
}
/* this is needed for ASCII cp to Unicode converts */
if (volume_info->iocharset == NULL) {
/* load_nls_default cannot return null */
volume_info->local_nls = load_nls_default();
} else {
volume_info->local_nls = load_nls(volume_info->iocharset);
if (volume_info->local_nls == NULL) {
cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n",
volume_info->iocharset);
return -ELIBACC;
}
}
return rc;
}
struct smb_vol *
cifs_get_volume_info(char *mount_data, const char *devname)
{
int rc;
struct smb_vol *volume_info;
volume_info = kmalloc(sizeof(struct smb_vol), GFP_KERNEL);
if (!volume_info)
return ERR_PTR(-ENOMEM);
rc = cifs_setup_volume_info(volume_info, mount_data, devname);
if (rc) {
cifs_cleanup_volume_info(volume_info);
volume_info = ERR_PTR(rc);
}
return volume_info;
}
int
cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
{
int rc;
unsigned int xid;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
char *full_path;
struct tcon_link *tlink;
#ifdef CONFIG_CIFS_DFS_UPCALL
int referral_walks_count = 0;
#endif
rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
if (rc)
return rc;
#ifdef CONFIG_CIFS_DFS_UPCALL
try_mount_again:
/* cleanup activities if we're chasing a referral */
if (referral_walks_count) {
if (tcon)
cifs_put_tcon(tcon);
else if (ses)
cifs_put_smb_ses(ses);
free_xid(xid);
}
#endif
rc = 0;
tcon = NULL;
ses = NULL;
server = NULL;
full_path = NULL;
tlink = NULL;
xid = get_xid();
/* get a reference to a tcp session */
server = cifs_get_tcp_session(volume_info);
if (IS_ERR(server)) {
rc = PTR_ERR(server);
bdi_destroy(&cifs_sb->bdi);
goto out;
}
/* get a reference to a SMB session */
ses = cifs_get_smb_ses(server, volume_info);
if (IS_ERR(ses)) {
rc = PTR_ERR(ses);
ses = NULL;
goto mount_fail_check;
}
/* search for existing tcon to this server share */
tcon = cifs_get_tcon(ses, volume_info);
if (IS_ERR(tcon)) {
rc = PTR_ERR(tcon);
tcon = NULL;
goto remote_path_check;
}
/* tell server which Unix caps we support */
if (cap_unix(tcon->ses)) {
/* reset of caps checks mount to see if unix extensions
disabled for just this mount */
reset_cifs_unix_caps(xid, tcon, cifs_sb, volume_info);
if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
(le64_to_cpu(tcon->fsUnixInfo.Capability) &
CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
rc = -EACCES;
goto mount_fail_check;
}
} else
tcon->unix_ext = 0; /* server does not support them */
/* do not care if a following call succeed - informational */
if (!tcon->ipc && server->ops->qfs_tcon)
server->ops->qfs_tcon(xid, tcon);
cifs_sb->wsize = server->ops->negotiate_wsize(tcon, volume_info);
cifs_sb->rsize = server->ops->negotiate_rsize(tcon, volume_info);
/* tune readahead according to rsize */
cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_CACHE_SIZE;
remote_path_check:
#ifdef CONFIG_CIFS_DFS_UPCALL
/*
* Perform an unconditional check for whether there are DFS
* referrals for this path without prefix, to provide support
* for DFS referrals from w2k8 servers which don't seem to respond
* with PATH_NOT_COVERED to requests that include the prefix.
* Chase the referral if found, otherwise continue normally.
*/
if (referral_walks_count == 0) {
int refrc = expand_dfs_referral(xid, ses, volume_info, cifs_sb,
false);
if (!refrc) {
referral_walks_count++;
goto try_mount_again;
}
}
#endif
/* check if a whole path is not remote */
if (!rc && tcon) {
if (!server->ops->is_path_accessible) {
rc = -ENOSYS;
goto mount_fail_check;
}
/*
* cifs_build_path_to_root works only when we have a valid tcon
*/
full_path = cifs_build_path_to_root(volume_info, cifs_sb, tcon);
if (full_path == NULL) {
rc = -ENOMEM;
goto mount_fail_check;
}
rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
full_path);
if (rc != 0 && rc != -EREMOTE) {
kfree(full_path);
goto mount_fail_check;
}
kfree(full_path);
}
/* get referral if needed */
if (rc == -EREMOTE) {
#ifdef CONFIG_CIFS_DFS_UPCALL
if (referral_walks_count > MAX_NESTED_LINKS) {
/*
* BB: when we implement proper loop detection,
* we will remove this check. But now we need it
* to prevent an indefinite loop if 'DFS tree' is
* misconfigured (i.e. has loops).
*/
rc = -ELOOP;
goto mount_fail_check;
}
rc = expand_dfs_referral(xid, ses, volume_info, cifs_sb, true);
if (!rc) {
referral_walks_count++;
goto try_mount_again;
}
goto mount_fail_check;
#else /* No DFS support, return error on mount */
rc = -EOPNOTSUPP;
#endif
}
if (rc)
goto mount_fail_check;
/* now, hang the tcon off of the superblock */
tlink = kzalloc(sizeof *tlink, GFP_KERNEL);
if (tlink == NULL) {
rc = -ENOMEM;
goto mount_fail_check;
}
tlink->tl_uid = ses->linux_uid;
tlink->tl_tcon = tcon;
tlink->tl_time = jiffies;
set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
cifs_sb->master_tlink = tlink;
spin_lock(&cifs_sb->tlink_tree_lock);
tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
TLINK_IDLE_EXPIRE);
mount_fail_check:
/* on error free sesinfo and tcon struct if needed */
if (rc) {
/* If find_unc succeeded then rc == 0 so we can not end */
/* up accidentally freeing someone elses tcon struct */
if (tcon)
cifs_put_tcon(tcon);
else if (ses)
cifs_put_smb_ses(ses);
else
cifs_put_tcp_session(server);
bdi_destroy(&cifs_sb->bdi);
}
out:
free_xid(xid);
return rc;
}
/*
* Issue a TREE_CONNECT request. Note that for IPC$ shares, that the tcon
* pointer may be NULL.
*/
int
CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
const char *tree, struct cifs_tcon *tcon,
const struct nls_table *nls_codepage)
{
struct smb_hdr *smb_buffer;
struct smb_hdr *smb_buffer_response;
TCONX_REQ *pSMB;
TCONX_RSP *pSMBr;
unsigned char *bcc_ptr;
int rc = 0;
int length;
__u16 bytes_left, count;
if (ses == NULL)
return -EIO;
smb_buffer = cifs_buf_get();
if (smb_buffer == NULL)
return -ENOMEM;
smb_buffer_response = smb_buffer;
header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
NULL /*no tid */ , 4 /*wct */ );
smb_buffer->Mid = get_next_mid(ses->server);
smb_buffer->Uid = ses->Suid;
pSMB = (TCONX_REQ *) smb_buffer;
pSMBr = (TCONX_RSP *) smb_buffer_response;
pSMB->AndXCommand = 0xFF;
pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
bcc_ptr = &pSMB->Password[0];
if (!tcon || (ses->server->sec_mode & SECMODE_USER)) {
pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
*bcc_ptr = 0; /* password is null byte */
bcc_ptr++; /* skip password */
/* already aligned so no need to do it below */
} else {
pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
/* BB FIXME add code to fail this if NTLMv2 or Kerberos
specified as required (when that support is added to
the vfs in the future) as only NTLM or the much
weaker LANMAN (which we do not send by default) is accepted
by Samba (not sure whether other servers allow
NTLMv2 password here) */
#ifdef CONFIG_CIFS_WEAK_PW_HASH
if ((global_secflags & CIFSSEC_MAY_LANMAN) &&
(ses->server->secType == LANMAN))
calc_lanman_hash(tcon->password, ses->server->cryptkey,
ses->server->sec_mode &
SECMODE_PW_ENCRYPT ? true : false,
bcc_ptr);
else
#endif /* CIFS_WEAK_PW_HASH */
rc = SMBNTencrypt(tcon->password, ses->server->cryptkey,
bcc_ptr, nls_codepage);
bcc_ptr += CIFS_AUTH_RESP_SIZE;
if (ses->capabilities & CAP_UNICODE) {
/* must align unicode strings */
*bcc_ptr = 0; /* null byte password */
bcc_ptr++;
}
}
if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
if (ses->capabilities & CAP_STATUS32) {
smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
}
if (ses->capabilities & CAP_DFS) {
smb_buffer->Flags2 |= SMBFLG2_DFS;
}
if (ses->capabilities & CAP_UNICODE) {
smb_buffer->Flags2 |= SMBFLG2_UNICODE;
length =
cifs_strtoUTF16((__le16 *) bcc_ptr, tree,
6 /* max utf8 char length in bytes */ *
(/* server len*/ + 256 /* share len */), nls_codepage);
bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */
bcc_ptr += 2; /* skip trailing null */
} else { /* ASCII */
strcpy(bcc_ptr, tree);
bcc_ptr += strlen(tree) + 1;
}
strcpy(bcc_ptr, "?????");
bcc_ptr += strlen("?????");
bcc_ptr += 1;
count = bcc_ptr - &pSMB->Password[0];
pSMB->hdr.smb_buf_length = cpu_to_be32(be32_to_cpu(
pSMB->hdr.smb_buf_length) + count);
pSMB->ByteCount = cpu_to_le16(count);
rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
0);
/* above now done in SendReceive */
if ((rc == 0) && (tcon != NULL)) {
bool is_unicode;
tcon->tidStatus = CifsGood;
tcon->need_reconnect = false;
tcon->tid = smb_buffer_response->Tid;
bcc_ptr = pByteArea(smb_buffer_response);
bytes_left = get_bcc(smb_buffer_response);
length = strnlen(bcc_ptr, bytes_left - 2);
if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
is_unicode = true;
else
is_unicode = false;
/* skip service field (NB: this field is always ASCII) */
if (length == 3) {
if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
(bcc_ptr[2] == 'C')) {
cifs_dbg(FYI, "IPC connection\n");
tcon->ipc = 1;
}
} else if (length == 2) {
if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
/* the most common case */
cifs_dbg(FYI, "disk share connection\n");
}
}
bcc_ptr += length + 1;
bytes_left -= (length + 1);
strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
/* mostly informational -- no need to fail on error here */
kfree(tcon->nativeFileSystem);
tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr,
bytes_left, is_unicode,
nls_codepage);
cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem);
if ((smb_buffer_response->WordCount == 3) ||
(smb_buffer_response->WordCount == 7))
/* field is in same location */
tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
else
tcon->Flags = 0;
cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags);
} else if ((rc == 0) && tcon == NULL) {
/* all we need to save for IPC$ connection */
ses->ipc_tid = smb_buffer_response->Tid;
}
cifs_buf_release(smb_buffer);
return rc;
}
void
cifs_umount(struct cifs_sb_info *cifs_sb)
{
struct rb_root *root = &cifs_sb->tlink_tree;
struct rb_node *node;
struct tcon_link *tlink;
cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
spin_lock(&cifs_sb->tlink_tree_lock);
while ((node = rb_first(root))) {
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
cifs_get_tlink(tlink);
clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
rb_erase(node, root);
spin_unlock(&cifs_sb->tlink_tree_lock);
cifs_put_tlink(tlink);
spin_lock(&cifs_sb->tlink_tree_lock);
}
spin_unlock(&cifs_sb->tlink_tree_lock);
bdi_destroy(&cifs_sb->bdi);
kfree(cifs_sb->mountdata);
unload_nls(cifs_sb->local_nls);
kfree(cifs_sb);
}
int
cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses)
{
int rc = 0;
struct TCP_Server_Info *server = ses->server;
if (!server->ops->need_neg || !server->ops->negotiate)
return -ENOSYS;
/* only send once per connect */
if (!server->ops->need_neg(server))
return 0;
set_credits(server, 1);
rc = server->ops->negotiate(xid, ses);
if (rc == 0) {
spin_lock(&GlobalMid_Lock);
if (server->tcpStatus == CifsNeedNegotiate)
server->tcpStatus = CifsGood;
else
rc = -EHOSTDOWN;
spin_unlock(&GlobalMid_Lock);
}
return rc;
}
int
cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
struct nls_table *nls_info)
{
int rc = -ENOSYS;
struct TCP_Server_Info *server = ses->server;
ses->flags = 0;
ses->capabilities = server->capabilities;
if (linuxExtEnabled == 0)
ses->capabilities &= (~server->vals->cap_unix);
cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
server->sec_mode, server->capabilities, server->timeAdj);
if (server->ops->sess_setup)
rc = server->ops->sess_setup(xid, ses, nls_info);
if (rc) {
cifs_dbg(VFS, "Send error in SessSetup = %d\n", rc);
} else {
mutex_lock(&server->srv_mutex);
if (!server->session_estab) {
server->session_key.response = ses->auth_key.response;
server->session_key.len = ses->auth_key.len;
server->sequence_number = 0x2;
server->session_estab = true;
ses->auth_key.response = NULL;
}
mutex_unlock(&server->srv_mutex);
cifs_dbg(FYI, "CIFS Session Established successfully\n");
spin_lock(&GlobalMid_Lock);
ses->status = CifsGood;
ses->need_reconnect = false;
spin_unlock(&GlobalMid_Lock);
}
kfree(ses->auth_key.response);
ses->auth_key.response = NULL;
ses->auth_key.len = 0;
kfree(ses->ntlmssp);
ses->ntlmssp = NULL;
return rc;
}
static int
cifs_set_vol_auth(struct smb_vol *vol, struct cifs_ses *ses)
{
switch (ses->server->secType) {
case Kerberos:
vol->secFlg = CIFSSEC_MUST_KRB5;
return 0;
case NTLMv2:
vol->secFlg = CIFSSEC_MUST_NTLMV2;
break;
case NTLM:
vol->secFlg = CIFSSEC_MUST_NTLM;
break;
case RawNTLMSSP:
vol->secFlg = CIFSSEC_MUST_NTLMSSP;
break;
case LANMAN:
vol->secFlg = CIFSSEC_MUST_LANMAN;
break;
}
return cifs_set_cifscreds(vol, ses);
}
static struct cifs_tcon *
cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
{
int rc;
struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
struct cifs_ses *ses;
struct cifs_tcon *tcon = NULL;
struct smb_vol *vol_info;
vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL);
if (vol_info == NULL)
return ERR_PTR(-ENOMEM);
vol_info->local_nls = cifs_sb->local_nls;
vol_info->linux_uid = fsuid;
vol_info->cred_uid = fsuid;
vol_info->UNC = master_tcon->treeName;
vol_info->retry = master_tcon->retry;
vol_info->nocase = master_tcon->nocase;
vol_info->local_lease = master_tcon->local_lease;
vol_info->no_linux_ext = !master_tcon->unix_ext;
rc = cifs_set_vol_auth(vol_info, master_tcon->ses);
if (rc) {
tcon = ERR_PTR(rc);
goto out;
}
/* get a reference for the same TCP session */
spin_lock(&cifs_tcp_ses_lock);
++master_tcon->ses->server->srv_count;
spin_unlock(&cifs_tcp_ses_lock);
ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
if (IS_ERR(ses)) {
tcon = (struct cifs_tcon *)ses;
cifs_put_tcp_session(master_tcon->ses->server);
goto out;
}
tcon = cifs_get_tcon(ses, vol_info);
if (IS_ERR(tcon)) {
cifs_put_smb_ses(ses);
goto out;
}
if (cap_unix(ses))
reset_cifs_unix_caps(0, tcon, NULL, vol_info);
out:
kfree(vol_info->username);
kfree(vol_info->password);
kfree(vol_info);
return tcon;
}
struct cifs_tcon *
cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
{
return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
}
static int
cifs_sb_tcon_pending_wait(void *unused)
{
schedule();
return signal_pending(current) ? -ERESTARTSYS : 0;
}
/* find and return a tlink with given uid */
static struct tcon_link *
tlink_rb_search(struct rb_root *root, kuid_t uid)
{
struct rb_node *node = root->rb_node;
struct tcon_link *tlink;
while (node) {
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
if (uid_gt(tlink->tl_uid, uid))
node = node->rb_left;
else if (uid_lt(tlink->tl_uid, uid))
node = node->rb_right;
else
return tlink;
}
return NULL;
}
/* insert a tcon_link into the tree */
static void
tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct tcon_link *tlink;
while (*new) {
tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
parent = *new;
if (uid_gt(tlink->tl_uid, new_tlink->tl_uid))
new = &((*new)->rb_left);
else
new = &((*new)->rb_right);
}
rb_link_node(&new_tlink->tl_rbnode, parent, new);
rb_insert_color(&new_tlink->tl_rbnode, root);
}
/*
* Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
* current task.
*
* If the superblock doesn't refer to a multiuser mount, then just return
* the master tcon for the mount.
*
* First, search the rbtree for an existing tcon for this fsuid. If one
* exists, then check to see if it's pending construction. If it is then wait
* for construction to complete. Once it's no longer pending, check to see if
* it failed and either return an error or retry construction, depending on
* the timeout.
*
* If one doesn't exist then insert a new tcon_link struct into the tree and
* try to construct a new one.
*/
struct tcon_link *
cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
{
int ret;
kuid_t fsuid = current_fsuid();
struct tcon_link *tlink, *newtlink;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
spin_lock(&cifs_sb->tlink_tree_lock);
tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
if (tlink)
cifs_get_tlink(tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
if (tlink == NULL) {
newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
if (newtlink == NULL)
return ERR_PTR(-ENOMEM);
newtlink->tl_uid = fsuid;
newtlink->tl_tcon = ERR_PTR(-EACCES);
set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
cifs_get_tlink(newtlink);
spin_lock(&cifs_sb->tlink_tree_lock);
/* was one inserted after previous search? */
tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
if (tlink) {
cifs_get_tlink(tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
kfree(newtlink);
goto wait_for_construction;
}
tlink = newtlink;
tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
} else {
wait_for_construction:
ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
cifs_sb_tcon_pending_wait,
TASK_INTERRUPTIBLE);
if (ret) {
cifs_put_tlink(tlink);
return ERR_PTR(ret);
}
/* if it's good, return it */
if (!IS_ERR(tlink->tl_tcon))
return tlink;
/* return error if we tried this already recently */
if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
cifs_put_tlink(tlink);
return ERR_PTR(-EACCES);
}
if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
goto wait_for_construction;
}
tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
clear_bit(TCON_LINK_PENDING, &tlink->tl_flags);
wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
if (IS_ERR(tlink->tl_tcon)) {
cifs_put_tlink(tlink);
return ERR_PTR(-EACCES);
}
return tlink;
}
/*
* periodic workqueue job that scans tcon_tree for a superblock and closes
* out tcons.
*/
static void
cifs_prune_tlinks(struct work_struct *work)
{
struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
prune_tlinks.work);
struct rb_root *root = &cifs_sb->tlink_tree;
struct rb_node *node = rb_first(root);
struct rb_node *tmp;
struct tcon_link *tlink;
/*
* Because we drop the spinlock in the loop in order to put the tlink
* it's not guarded against removal of links from the tree. The only
* places that remove entries from the tree are this function and
* umounts. Because this function is non-reentrant and is canceled
* before umount can proceed, this is safe.
*/
spin_lock(&cifs_sb->tlink_tree_lock);
node = rb_first(root);
while (node != NULL) {
tmp = node;
node = rb_next(tmp);
tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
atomic_read(&tlink->tl_count) != 0 ||
time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
continue;
cifs_get_tlink(tlink);
clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
rb_erase(tmp, root);
spin_unlock(&cifs_sb->tlink_tree_lock);
cifs_put_tlink(tlink);
spin_lock(&cifs_sb->tlink_tree_lock);
}
spin_unlock(&cifs_sb->tlink_tree_lock);
queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
TLINK_IDLE_EXPIRE);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5728_0 |
crossvul-cpp_data_good_3457_0 | /*
* arch/arm/kernel/sys_oabi-compat.c
*
* Compatibility wrappers for syscalls that are used from
* old ABI user space binaries with an EABI kernel.
*
* Author: Nicolas Pitre
* Created: Oct 7, 2005
* Copyright: MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* The legacy ABI and the new ARM EABI have different rules making some
* syscalls incompatible especially with structure arguments.
* Most notably, Eabi says 64-bit members should be 64-bit aligned instead of
* simply word aligned. EABI also pads structures to the size of the largest
* member it contains instead of the invariant 32-bit.
*
* The following syscalls are affected:
*
* sys_stat64:
* sys_lstat64:
* sys_fstat64:
* sys_fstatat64:
*
* struct stat64 has different sizes and some members are shifted
* Compatibility wrappers are needed for them and provided below.
*
* sys_fcntl64:
*
* struct flock64 has different sizes and some members are shifted
* A compatibility wrapper is needed and provided below.
*
* sys_statfs64:
* sys_fstatfs64:
*
* struct statfs64 has extra padding with EABI growing its size from
* 84 to 88. This struct is now __attribute__((packed,aligned(4)))
* with a small assembly wrapper to force the sz argument to 84 if it is 88
* to avoid copying the extra padding over user space unexpecting it.
*
* sys_newuname:
*
* struct new_utsname has no padding with EABI. No problem there.
*
* sys_epoll_ctl:
* sys_epoll_wait:
*
* struct epoll_event has its second member shifted also affecting the
* structure size. Compatibility wrappers are needed and provided below.
*
* sys_ipc:
* sys_semop:
* sys_semtimedop:
*
* struct sembuf loses its padding with EABI. Since arrays of them are
* used they have to be copyed to remove the padding. Compatibility wrappers
* provided below.
*
* sys_bind:
* sys_connect:
* sys_sendmsg:
* sys_sendto:
* sys_socketcall:
*
* struct sockaddr_un loses its padding with EABI. Since the size of the
* structure is used as a validation test in unix_mkname(), we need to
* change the length argument to 110 whenever it is 112. Compatibility
* wrappers provided below.
*/
#include <linux/syscalls.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/fcntl.h>
#include <linux/eventpoll.h>
#include <linux/sem.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/ipc.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
struct oldabi_stat64 {
unsigned long long st_dev;
unsigned int __pad1;
unsigned long __st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned long st_uid;
unsigned long st_gid;
unsigned long long st_rdev;
unsigned int __pad2;
long long st_size;
unsigned long st_blksize;
unsigned long long st_blocks;
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long long st_ino;
} __attribute__ ((packed,aligned(4)));
static long cp_oldabi_stat64(struct kstat *stat,
struct oldabi_stat64 __user *statbuf)
{
struct oldabi_stat64 tmp;
tmp.st_dev = huge_encode_dev(stat->dev);
tmp.__pad1 = 0;
tmp.__st_ino = stat->ino;
tmp.st_mode = stat->mode;
tmp.st_nlink = stat->nlink;
tmp.st_uid = stat->uid;
tmp.st_gid = stat->gid;
tmp.st_rdev = huge_encode_dev(stat->rdev);
tmp.st_size = stat->size;
tmp.st_blocks = stat->blocks;
tmp.__pad2 = 0;
tmp.st_blksize = stat->blksize;
tmp.st_atime = stat->atime.tv_sec;
tmp.st_atime_nsec = stat->atime.tv_nsec;
tmp.st_mtime = stat->mtime.tv_sec;
tmp.st_mtime_nsec = stat->mtime.tv_nsec;
tmp.st_ctime = stat->ctime.tv_sec;
tmp.st_ctime_nsec = stat->ctime.tv_nsec;
tmp.st_ino = stat->ino;
return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
}
asmlinkage long sys_oabi_stat64(const char __user * filename,
struct oldabi_stat64 __user * statbuf)
{
struct kstat stat;
int error = vfs_stat(filename, &stat);
if (!error)
error = cp_oldabi_stat64(&stat, statbuf);
return error;
}
asmlinkage long sys_oabi_lstat64(const char __user * filename,
struct oldabi_stat64 __user * statbuf)
{
struct kstat stat;
int error = vfs_lstat(filename, &stat);
if (!error)
error = cp_oldabi_stat64(&stat, statbuf);
return error;
}
asmlinkage long sys_oabi_fstat64(unsigned long fd,
struct oldabi_stat64 __user * statbuf)
{
struct kstat stat;
int error = vfs_fstat(fd, &stat);
if (!error)
error = cp_oldabi_stat64(&stat, statbuf);
return error;
}
asmlinkage long sys_oabi_fstatat64(int dfd,
const char __user *filename,
struct oldabi_stat64 __user *statbuf,
int flag)
{
struct kstat stat;
int error;
error = vfs_fstatat(dfd, filename, &stat, flag);
if (error)
return error;
return cp_oldabi_stat64(&stat, statbuf);
}
struct oabi_flock64 {
short l_type;
short l_whence;
loff_t l_start;
loff_t l_len;
pid_t l_pid;
} __attribute__ ((packed,aligned(4)));
asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
unsigned long arg)
{
struct oabi_flock64 user;
struct flock64 kernel;
mm_segment_t fs = USER_DS; /* initialized to kill a warning */
unsigned long local_arg = arg;
int ret;
switch (cmd) {
case F_GETLK64:
case F_SETLK64:
case F_SETLKW64:
if (copy_from_user(&user, (struct oabi_flock64 __user *)arg,
sizeof(user)))
return -EFAULT;
kernel.l_type = user.l_type;
kernel.l_whence = user.l_whence;
kernel.l_start = user.l_start;
kernel.l_len = user.l_len;
kernel.l_pid = user.l_pid;
local_arg = (unsigned long)&kernel;
fs = get_fs();
set_fs(KERNEL_DS);
}
ret = sys_fcntl64(fd, cmd, local_arg);
switch (cmd) {
case F_GETLK64:
if (!ret) {
user.l_type = kernel.l_type;
user.l_whence = kernel.l_whence;
user.l_start = kernel.l_start;
user.l_len = kernel.l_len;
user.l_pid = kernel.l_pid;
if (copy_to_user((struct oabi_flock64 __user *)arg,
&user, sizeof(user)))
ret = -EFAULT;
}
case F_SETLK64:
case F_SETLKW64:
set_fs(fs);
}
return ret;
}
struct oabi_epoll_event {
__u32 events;
__u64 data;
} __attribute__ ((packed,aligned(4)));
asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
struct oabi_epoll_event __user *event)
{
struct oabi_epoll_event user;
struct epoll_event kernel;
mm_segment_t fs;
long ret;
if (op == EPOLL_CTL_DEL)
return sys_epoll_ctl(epfd, op, fd, NULL);
if (copy_from_user(&user, event, sizeof(user)))
return -EFAULT;
kernel.events = user.events;
kernel.data = user.data;
fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_epoll_ctl(epfd, op, fd, &kernel);
set_fs(fs);
return ret;
}
asmlinkage long sys_oabi_epoll_wait(int epfd,
struct oabi_epoll_event __user *events,
int maxevents, int timeout)
{
struct epoll_event *kbuf;
mm_segment_t fs;
long ret, err, i;
if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
return -EINVAL;
kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout);
set_fs(fs);
err = 0;
for (i = 0; i < ret; i++) {
__put_user_error(kbuf[i].events, &events->events, err);
__put_user_error(kbuf[i].data, &events->data, err);
events++;
}
kfree(kbuf);
return err ? -EFAULT : ret;
}
struct oabi_sembuf {
unsigned short sem_num;
short sem_op;
short sem_flg;
unsigned short __pad;
};
asmlinkage long sys_oabi_semtimedop(int semid,
struct oabi_sembuf __user *tsops,
unsigned nsops,
const struct timespec __user *timeout)
{
struct sembuf *sops;
struct timespec local_timeout;
long err;
int i;
if (nsops < 1 || nsops > SEMOPM)
return -EINVAL;
sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
if (!sops)
return -ENOMEM;
err = 0;
for (i = 0; i < nsops; i++) {
__get_user_error(sops[i].sem_num, &tsops->sem_num, err);
__get_user_error(sops[i].sem_op, &tsops->sem_op, err);
__get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
tsops++;
}
if (timeout) {
/* copy this as well before changing domain protection */
err |= copy_from_user(&local_timeout, timeout, sizeof(*timeout));
timeout = &local_timeout;
}
if (err) {
err = -EFAULT;
} else {
mm_segment_t fs = get_fs();
set_fs(KERNEL_DS);
err = sys_semtimedop(semid, sops, nsops, timeout);
set_fs(fs);
}
kfree(sops);
return err;
}
asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops,
unsigned nsops)
{
return sys_oabi_semtimedop(semid, tsops, nsops, NULL);
}
asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third,
void __user *ptr, long fifth)
{
switch (call & 0xffff) {
case SEMOP:
return sys_oabi_semtimedop(first,
(struct oabi_sembuf __user *)ptr,
second, NULL);
case SEMTIMEDOP:
return sys_oabi_semtimedop(first,
(struct oabi_sembuf __user *)ptr,
second,
(const struct timespec __user *)fifth);
default:
return sys_ipc(call, first, second, third, ptr, fifth);
}
}
asmlinkage long sys_oabi_bind(int fd, struct sockaddr __user *addr, int addrlen)
{
sa_family_t sa_family;
if (addrlen == 112 &&
get_user(sa_family, &addr->sa_family) == 0 &&
sa_family == AF_UNIX)
addrlen = 110;
return sys_bind(fd, addr, addrlen);
}
asmlinkage long sys_oabi_connect(int fd, struct sockaddr __user *addr, int addrlen)
{
sa_family_t sa_family;
if (addrlen == 112 &&
get_user(sa_family, &addr->sa_family) == 0 &&
sa_family == AF_UNIX)
addrlen = 110;
return sys_connect(fd, addr, addrlen);
}
asmlinkage long sys_oabi_sendto(int fd, void __user *buff,
size_t len, unsigned flags,
struct sockaddr __user *addr,
int addrlen)
{
sa_family_t sa_family;
if (addrlen == 112 &&
get_user(sa_family, &addr->sa_family) == 0 &&
sa_family == AF_UNIX)
addrlen = 110;
return sys_sendto(fd, buff, len, flags, addr, addrlen);
}
asmlinkage long sys_oabi_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
{
struct sockaddr __user *addr;
int msg_namelen;
sa_family_t sa_family;
if (msg &&
get_user(msg_namelen, &msg->msg_namelen) == 0 &&
msg_namelen == 112 &&
get_user(addr, &msg->msg_name) == 0 &&
get_user(sa_family, &addr->sa_family) == 0 &&
sa_family == AF_UNIX)
{
/*
* HACK ALERT: there is a limit to how much backward bending
* we should do for what is actually a transitional
* compatibility layer. This already has known flaws with
* a few ioctls that we don't intend to fix. Therefore
* consider this blatent hack as another one... and take care
* to run for cover. In most cases it will "just work fine".
* If it doesn't, well, tough.
*/
put_user(110, &msg->msg_namelen);
}
return sys_sendmsg(fd, msg, flags);
}
asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args)
{
unsigned long r = -EFAULT, a[6];
switch (call) {
case SYS_BIND:
if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
r = sys_oabi_bind(a[0], (struct sockaddr __user *)a[1], a[2]);
break;
case SYS_CONNECT:
if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
r = sys_oabi_connect(a[0], (struct sockaddr __user *)a[1], a[2]);
break;
case SYS_SENDTO:
if (copy_from_user(a, args, 6 * sizeof(long)) == 0)
r = sys_oabi_sendto(a[0], (void __user *)a[1], a[2], a[3],
(struct sockaddr __user *)a[4], a[5]);
break;
case SYS_SENDMSG:
if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
r = sys_oabi_sendmsg(a[0], (struct msghdr __user *)a[1], a[2]);
break;
default:
r = sys_socketcall(call, args);
}
return r;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3457_0 |
crossvul-cpp_data_good_1616_0 | /* -----------------------------------------------------------------------------
* Copyright (c) 2011 Ozmo Inc
* Released under the GNU General Public License Version 2 (GPLv2).
*
* This file implements the protocol specific parts of the USB service for a PD.
* -----------------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/netdevice.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <asm/unaligned.h>
#include "ozdbg.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozusbif.h"
#include "ozhcd.h"
#include "ozusbsvc.h"
#define MAX_ISOC_FIXED_DATA (253-sizeof(struct oz_isoc_fixed))
/*
* Context: softirq
*/
static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
struct oz_usb_ctx *usb_ctx, u8 strid, u8 isoc)
{
int ret;
struct oz_elt *elt = (struct oz_elt *)ei->data;
struct oz_app_hdr *app_hdr = (struct oz_app_hdr *)(elt+1);
elt->type = OZ_ELT_APP_DATA;
ei->app_id = OZ_APPID_USB;
ei->length = elt->length + sizeof(struct oz_elt);
app_hdr->app_id = OZ_APPID_USB;
spin_lock_bh(&eb->lock);
if (isoc == 0) {
app_hdr->elt_seq_num = usb_ctx->tx_seq_num++;
if (usb_ctx->tx_seq_num == 0)
usb_ctx->tx_seq_num = 1;
}
ret = oz_queue_elt_info(eb, isoc, strid, ei);
if (ret)
oz_elt_info_free(eb, ei);
spin_unlock_bh(&eb->lock);
return ret;
}
/*
* Context: softirq
*/
int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
u8 index, __le16 windex, int offset, int len)
{
struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_get_desc_req *body;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
oz_dbg(ON, " req_type = 0x%x\n", req_type);
oz_dbg(ON, " desc_type = 0x%x\n", desc_type);
oz_dbg(ON, " index = 0x%x\n", index);
oz_dbg(ON, " windex = 0x%x\n", windex);
oz_dbg(ON, " offset = 0x%x\n", offset);
oz_dbg(ON, " len = 0x%x\n", len);
if (len > 200)
len = 200;
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_get_desc_req);
body = (struct oz_get_desc_req *)(elt+1);
body->type = OZ_GET_DESC_REQ;
body->req_id = req_id;
put_unaligned(cpu_to_le16(offset), &body->offset);
put_unaligned(cpu_to_le16(len), &body->size);
body->req_type = req_type;
body->desc_type = desc_type;
body->w_index = windex;
body->index = index;
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
/*
* Context: tasklet
*/
static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
{
struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_set_config_req *body;
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_set_config_req);
body = (struct oz_set_config_req *)(elt+1);
body->type = OZ_SET_CONFIG_REQ;
body->req_id = req_id;
body->index = index;
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
/*
* Context: tasklet
*/
static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
{
struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_set_interface_req *body;
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_set_interface_req);
body = (struct oz_set_interface_req *)(elt+1);
body->type = OZ_SET_INTERFACE_REQ;
body->req_id = req_id;
body->index = index;
body->alternative = alt;
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
/*
* Context: tasklet
*/
static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
u8 recipient, u8 index, __le16 feature)
{
struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_feature_req *body;
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_feature_req);
body = (struct oz_feature_req *)(elt+1);
body->type = type;
body->req_id = req_id;
body->recipient = recipient;
body->index = index;
put_unaligned(feature, &body->feature);
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
/*
* Context: tasklet
*/
static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
u8 request, __le16 value, __le16 index, const u8 *data, int data_len)
{
struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_vendor_class_req *body;
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_vendor_class_req) - 1 + data_len;
body = (struct oz_vendor_class_req *)(elt+1);
body->type = OZ_VENDOR_CLASS_REQ;
body->req_id = req_id;
body->req_type = req_type;
body->request = request;
put_unaligned(value, &body->value);
put_unaligned(index, &body->index);
if (data_len)
memcpy(body->data, data, data_len);
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
/*
* Context: tasklet
*/
int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
const u8 *data, int data_len)
{
unsigned wvalue = le16_to_cpu(setup->wValue);
unsigned windex = le16_to_cpu(setup->wIndex);
unsigned wlength = le16_to_cpu(setup->wLength);
int rc = 0;
if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (setup->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
rc = oz_usb_get_desc_req(hpd, req_id,
setup->bRequestType, (u8)(wvalue>>8),
(u8)wvalue, setup->wIndex, 0, wlength);
break;
case USB_REQ_SET_CONFIGURATION:
rc = oz_usb_set_config_req(hpd, req_id, (u8)wvalue);
break;
case USB_REQ_SET_INTERFACE: {
u8 if_num = (u8)windex;
u8 alt = (u8)wvalue;
rc = oz_usb_set_interface_req(hpd, req_id,
if_num, alt);
}
break;
case USB_REQ_SET_FEATURE:
rc = oz_usb_set_clear_feature_req(hpd, req_id,
OZ_SET_FEATURE_REQ,
setup->bRequestType & 0xf, (u8)windex,
setup->wValue);
break;
case USB_REQ_CLEAR_FEATURE:
rc = oz_usb_set_clear_feature_req(hpd, req_id,
OZ_CLEAR_FEATURE_REQ,
setup->bRequestType & 0xf,
(u8)windex, setup->wValue);
break;
}
} else {
rc = oz_usb_vendor_class_req(hpd, req_id, setup->bRequestType,
setup->bRequest, setup->wValue, setup->wIndex,
data, data_len);
}
return rc;
}
/*
* Context: softirq
*/
int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
{
struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt_buf *eb;
int i;
int hdr_size;
u8 *data;
struct usb_iso_packet_descriptor *desc;
if (pd->mode & OZ_F_ISOC_NO_ELTS) {
for (i = 0; i < urb->number_of_packets; i++) {
u8 *data;
desc = &urb->iso_frame_desc[i];
data = ((u8 *)urb->transfer_buffer)+desc->offset;
oz_send_isoc_unit(pd, ep_num, data, desc->length);
}
return 0;
}
hdr_size = sizeof(struct oz_isoc_fixed) - 1;
eb = &pd->elt_buff;
i = 0;
while (i < urb->number_of_packets) {
struct oz_elt_info *ei = oz_elt_info_alloc(eb);
struct oz_elt *elt;
struct oz_isoc_fixed *body;
int unit_count;
int unit_size;
int rem;
if (ei == NULL)
return -1;
rem = MAX_ISOC_FIXED_DATA;
elt = (struct oz_elt *)ei->data;
body = (struct oz_isoc_fixed *)(elt + 1);
body->type = OZ_USB_ENDPOINT_DATA;
body->endpoint = ep_num;
body->format = OZ_DATA_F_ISOC_FIXED;
unit_size = urb->iso_frame_desc[i].length;
body->unit_size = (u8)unit_size;
data = ((u8 *)(elt+1)) + hdr_size;
unit_count = 0;
while (i < urb->number_of_packets) {
desc = &urb->iso_frame_desc[i];
if ((unit_size == desc->length) &&
(desc->length <= rem)) {
memcpy(data, ((u8 *)urb->transfer_buffer) +
desc->offset, unit_size);
data += unit_size;
rem -= unit_size;
unit_count++;
desc->status = 0;
desc->actual_length = desc->length;
i++;
} else {
break;
}
}
elt->length = hdr_size + MAX_ISOC_FIXED_DATA - rem;
/* Store the number of units in body->frame_number for the
* moment. This field will be correctly determined before
* the element is sent. */
body->frame_number = (u8)unit_count;
oz_usb_submit_elt(eb, ei, usb_ctx, ep_num,
pd->mode & OZ_F_ISOC_ANYTIME);
}
return 0;
}
/*
* Context: softirq-serialized
*/
static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
struct oz_usb_hdr *usb_hdr, int len)
{
struct oz_data *data_hdr = (struct oz_data *)usb_hdr;
switch (data_hdr->format) {
case OZ_DATA_F_MULTIPLE_FIXED: {
struct oz_multiple_fixed *body =
(struct oz_multiple_fixed *)data_hdr;
u8 *data = body->data;
int n;
if (!body->unit_size)
break;
n = (len - sizeof(struct oz_multiple_fixed)+1)
/ body->unit_size;
while (n--) {
oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
data, body->unit_size);
data += body->unit_size;
}
}
break;
case OZ_DATA_F_ISOC_FIXED: {
struct oz_isoc_fixed *body =
(struct oz_isoc_fixed *)data_hdr;
int data_len = len-sizeof(struct oz_isoc_fixed)+1;
int unit_size = body->unit_size;
u8 *data = body->data;
int count;
int i;
if (!unit_size)
break;
count = data_len/unit_size;
for (i = 0; i < count; i++) {
oz_hcd_data_ind(usb_ctx->hport,
body->endpoint, data, unit_size);
data += unit_size;
}
}
break;
}
}
/*
* This is called when the PD has received a USB element. The type of element
* is determined and is then passed to an appropriate handler function.
* Context: softirq-serialized
*/
void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
{
struct oz_usb_hdr *usb_hdr = (struct oz_usb_hdr *)(elt + 1);
struct oz_usb_ctx *usb_ctx;
spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB];
if (usb_ctx)
oz_usb_get(usb_ctx);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
if (usb_ctx == NULL)
return; /* Context has gone so nothing to do. */
if (usb_ctx->stopped)
goto done;
/* If sequence number is non-zero then check it is not a duplicate.
* Zero sequence numbers are always accepted.
*/
if (usb_hdr->elt_seq_num != 0) {
if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0)
/* Reject duplicate element. */
goto done;
}
usb_ctx->rx_seq_num = usb_hdr->elt_seq_num;
switch (usb_hdr->type) {
case OZ_GET_DESC_RSP: {
struct oz_get_desc_rsp *body =
(struct oz_get_desc_rsp *)usb_hdr;
u16 offs, total_size;
u8 data_len;
if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
break;
data_len = elt->length -
(sizeof(struct oz_get_desc_rsp) - 1);
offs = le16_to_cpu(get_unaligned(&body->offset));
total_size =
le16_to_cpu(get_unaligned(&body->total_size));
oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
body->rcode, body->data,
data_len, offs, total_size);
}
break;
case OZ_SET_CONFIG_RSP: {
struct oz_set_config_rsp *body =
(struct oz_set_config_rsp *)usb_hdr;
oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
body->rcode, NULL, 0);
}
break;
case OZ_SET_INTERFACE_RSP: {
struct oz_set_interface_rsp *body =
(struct oz_set_interface_rsp *)usb_hdr;
oz_hcd_control_cnf(usb_ctx->hport,
body->req_id, body->rcode, NULL, 0);
}
break;
case OZ_VENDOR_CLASS_RSP: {
struct oz_vendor_class_rsp *body =
(struct oz_vendor_class_rsp *)usb_hdr;
oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
body->rcode, body->data, elt->length-
sizeof(struct oz_vendor_class_rsp)+1);
}
break;
case OZ_USB_ENDPOINT_DATA:
oz_usb_handle_ep_data(usb_ctx, usb_hdr, elt->length);
break;
}
done:
oz_usb_put(usb_ctx);
}
/*
* Context: softirq, process
*/
void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len)
{
struct oz_usb_ctx *usb_ctx;
spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB];
if (usb_ctx)
oz_usb_get(usb_ctx);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
if (usb_ctx == NULL)
return; /* Context has gone so nothing to do. */
if (!usb_ctx->stopped) {
oz_dbg(ON, "Farewell indicated ep = 0x%x\n", ep_num);
oz_hcd_data_ind(usb_ctx->hport, ep_num, data, len);
}
oz_usb_put(usb_ctx);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_1616_0 |
crossvul-cpp_data_bad_3447_3 | /*
* sound/oss/opl3.c
*
* A low level driver for Yamaha YM3812 and OPL-3 -chips
*
*
* Copyright (C) by Hannu Savolainen 1993-1997
*
* OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
*
*
* Changes
* Thomas Sailer ioctl code reworked (vmalloc/vfree removed)
* Alan Cox modularisation, fixed sound_mem allocs.
* Christoph Hellwig Adapted to module_init/module_exit
* Arnaldo C. de Melo get rid of check_region, use request_region for
* OPL4, release it on exit, some cleanups.
*
* Status
* Believed to work. Badly needs rewriting a bit to support multiple
* OPL3 devices.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/delay.h>
/*
* Major improvements to the FM handling 30AUG92 by Rob Hooft,
* hooft@chem.ruu.nl
*/
#include "sound_config.h"
#include "opl3_hw.h"
#define MAX_VOICE 18
#define OFFS_4OP 11
struct voice_info
{
unsigned char keyon_byte;
long bender;
long bender_range;
unsigned long orig_freq;
unsigned long current_freq;
int volume;
int mode;
int panning; /* 0xffff means not set */
};
typedef struct opl_devinfo
{
int base;
int left_io, right_io;
int nr_voice;
int lv_map[MAX_VOICE];
struct voice_info voc[MAX_VOICE];
struct voice_alloc_info *v_alloc;
struct channel_info *chn_info;
struct sbi_instrument i_map[SBFM_MAXINSTR];
struct sbi_instrument *act_i[MAX_VOICE];
struct synth_info fm_info;
int busy;
int model;
unsigned char cmask;
int is_opl4;
} opl_devinfo;
static struct opl_devinfo *devc = NULL;
static int detected_model;
static int store_instr(int instr_no, struct sbi_instrument *instr);
static void freq_to_fnum(int freq, int *block, int *fnum);
static void opl3_command(int io_addr, unsigned int addr, unsigned int val);
static int opl3_kill_note(int dev, int voice, int note, int velocity);
static void enter_4op_mode(void)
{
int i;
static int v4op[MAX_VOICE] = {
0, 1, 2, 9, 10, 11, 6, 7, 8, 15, 16, 17
};
devc->cmask = 0x3f; /* Connect all possible 4 OP voice operators */
opl3_command(devc->right_io, CONNECTION_SELECT_REGISTER, 0x3f);
for (i = 0; i < 3; i++)
pv_map[i].voice_mode = 4;
for (i = 3; i < 6; i++)
pv_map[i].voice_mode = 0;
for (i = 9; i < 12; i++)
pv_map[i].voice_mode = 4;
for (i = 12; i < 15; i++)
pv_map[i].voice_mode = 0;
for (i = 0; i < 12; i++)
devc->lv_map[i] = v4op[i];
devc->v_alloc->max_voice = devc->nr_voice = 12;
}
static int opl3_ioctl(int dev, unsigned int cmd, void __user * arg)
{
struct sbi_instrument ins;
switch (cmd) {
case SNDCTL_FM_LOAD_INSTR:
printk(KERN_WARNING "Warning: Obsolete ioctl(SNDCTL_FM_LOAD_INSTR) used. Fix the program.\n");
if (copy_from_user(&ins, arg, sizeof(ins)))
return -EFAULT;
if (ins.channel < 0 || ins.channel >= SBFM_MAXINSTR) {
printk(KERN_WARNING "FM Error: Invalid instrument number %d\n", ins.channel);
return -EINVAL;
}
return store_instr(ins.channel, &ins);
case SNDCTL_SYNTH_INFO:
devc->fm_info.nr_voices = (devc->nr_voice == 12) ? 6 : devc->nr_voice;
if (copy_to_user(arg, &devc->fm_info, sizeof(devc->fm_info)))
return -EFAULT;
return 0;
case SNDCTL_SYNTH_MEMAVL:
return 0x7fffffff;
case SNDCTL_FM_4OP_ENABLE:
if (devc->model == 2)
enter_4op_mode();
return 0;
default:
return -EINVAL;
}
}
static int opl3_detect(int ioaddr)
{
/*
* This function returns 1 if the FM chip is present at the given I/O port
* The detection algorithm plays with the timer built in the FM chip and
* looks for a change in the status register.
*
* Note! The timers of the FM chip are not connected to AdLib (and compatible)
* boards.
*
* Note2! The chip is initialized if detected.
*/
unsigned char stat1, signature;
int i;
if (devc != NULL)
{
printk(KERN_ERR "opl3: Only one OPL3 supported.\n");
return 0;
}
devc = kzalloc(sizeof(*devc), GFP_KERNEL);
if (devc == NULL)
{
printk(KERN_ERR "opl3: Can't allocate memory for the device control "
"structure \n ");
return 0;
}
strcpy(devc->fm_info.name, "OPL2");
if (!request_region(ioaddr, 4, devc->fm_info.name)) {
printk(KERN_WARNING "opl3: I/O port 0x%x already in use\n", ioaddr);
goto cleanup_devc;
}
devc->base = ioaddr;
/* Reset timers 1 and 2 */
opl3_command(ioaddr, TIMER_CONTROL_REGISTER, TIMER1_MASK | TIMER2_MASK);
/* Reset the IRQ of the FM chip */
opl3_command(ioaddr, TIMER_CONTROL_REGISTER, IRQ_RESET);
signature = stat1 = inb(ioaddr); /* Status register */
if (signature != 0x00 && signature != 0x06 && signature != 0x02 &&
signature != 0x0f)
{
MDB(printk(KERN_INFO "OPL3 not detected %x\n", signature));
goto cleanup_region;
}
if (signature == 0x06) /* OPL2 */
{
detected_model = 2;
}
else if (signature == 0x00 || signature == 0x0f) /* OPL3 or OPL4 */
{
unsigned char tmp;
detected_model = 3;
/*
* Detect availability of OPL4 (_experimental_). Works probably
* only after a cold boot. In addition the OPL4 port
* of the chip may not be connected to the PC bus at all.
*/
opl3_command(ioaddr + 2, OPL3_MODE_REGISTER, 0x00);
opl3_command(ioaddr + 2, OPL3_MODE_REGISTER, OPL3_ENABLE | OPL4_ENABLE);
if ((tmp = inb(ioaddr)) == 0x02) /* Have a OPL4 */
{
detected_model = 4;
}
if (request_region(ioaddr - 8, 2, "OPL4")) /* OPL4 port was free */
{
int tmp;
outb((0x02), ioaddr - 8); /* Select OPL4 ID register */
udelay(10);
tmp = inb(ioaddr - 7); /* Read it */
udelay(10);
if (tmp == 0x20) /* OPL4 should return 0x20 here */
{
detected_model = 4;
outb((0xF8), ioaddr - 8); /* Select OPL4 FM mixer control */
udelay(10);
outb((0x1B), ioaddr - 7); /* Write value */
udelay(10);
}
else
{ /* release OPL4 port */
release_region(ioaddr - 8, 2);
detected_model = 3;
}
}
opl3_command(ioaddr + 2, OPL3_MODE_REGISTER, 0);
}
for (i = 0; i < 9; i++)
opl3_command(ioaddr, KEYON_BLOCK + i, 0); /*
* Note off
*/
opl3_command(ioaddr, TEST_REGISTER, ENABLE_WAVE_SELECT);
opl3_command(ioaddr, PERCOSSION_REGISTER, 0x00); /*
* Melodic mode.
*/
return 1;
cleanup_region:
release_region(ioaddr, 4);
cleanup_devc:
kfree(devc);
devc = NULL;
return 0;
}
static int opl3_kill_note (int devno, int voice, int note, int velocity)
{
struct physical_voice_info *map;
if (voice < 0 || voice >= devc->nr_voice)
return 0;
devc->v_alloc->map[voice] = 0;
map = &pv_map[devc->lv_map[voice]];
DEB(printk("Kill note %d\n", voice));
if (map->voice_mode == 0)
return 0;
opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num, devc->voc[voice].keyon_byte & ~0x20);
devc->voc[voice].keyon_byte = 0;
devc->voc[voice].bender = 0;
devc->voc[voice].volume = 64;
devc->voc[voice].panning = 0xffff; /* Not set */
devc->voc[voice].bender_range = 200;
devc->voc[voice].orig_freq = 0;
devc->voc[voice].current_freq = 0;
devc->voc[voice].mode = 0;
return 0;
}
#define HIHAT 0
#define CYMBAL 1
#define TOMTOM 2
#define SNARE 3
#define BDRUM 4
#define UNDEFINED TOMTOM
#define DEFAULT TOMTOM
static int store_instr(int instr_no, struct sbi_instrument *instr)
{
if (instr->key != FM_PATCH && (instr->key != OPL3_PATCH || devc->model != 2))
printk(KERN_WARNING "FM warning: Invalid patch format field (key) 0x%x\n", instr->key);
memcpy((char *) &(devc->i_map[instr_no]), (char *) instr, sizeof(*instr));
return 0;
}
static int opl3_set_instr (int dev, int voice, int instr_no)
{
if (voice < 0 || voice >= devc->nr_voice)
return 0;
if (instr_no < 0 || instr_no >= SBFM_MAXINSTR)
instr_no = 0; /* Acoustic piano (usually) */
devc->act_i[voice] = &devc->i_map[instr_no];
return 0;
}
/*
* The next table looks magical, but it certainly is not. Its values have
* been calculated as table[i]=8*log(i/64)/log(2) with an obvious exception
* for i=0. This log-table converts a linear volume-scaling (0..127) to a
* logarithmic scaling as present in the FM-synthesizer chips. so : Volume
* 64 = 0 db = relative volume 0 and: Volume 32 = -6 db = relative
* volume -8 it was implemented as a table because it is only 128 bytes and
* it saves a lot of log() calculations. (RH)
*/
static char fm_volume_table[128] =
{
-64, -48, -40, -35, -32, -29, -27, -26,
-24, -23, -21, -20, -19, -18, -18, -17,
-16, -15, -15, -14, -13, -13, -12, -12,
-11, -11, -10, -10, -10, -9, -9, -8,
-8, -8, -7, -7, -7, -6, -6, -6,
-5, -5, -5, -5, -4, -4, -4, -4,
-3, -3, -3, -3, -2, -2, -2, -2,
-2, -1, -1, -1, -1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1,
1, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 4,
4, 4, 4, 4, 4, 4, 4, 5,
5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6,
6, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 8, 8, 8, 8, 8
};
static void calc_vol(unsigned char *regbyte, int volume, int main_vol)
{
int level = (~*regbyte & 0x3f);
if (main_vol > 127)
main_vol = 127;
volume = (volume * main_vol) / 127;
if (level)
level += fm_volume_table[volume];
if (level > 0x3f)
level = 0x3f;
if (level < 0)
level = 0;
*regbyte = (*regbyte & 0xc0) | (~level & 0x3f);
}
static void set_voice_volume(int voice, int volume, int main_vol)
{
unsigned char vol1, vol2, vol3, vol4;
struct sbi_instrument *instr;
struct physical_voice_info *map;
if (voice < 0 || voice >= devc->nr_voice)
return;
map = &pv_map[devc->lv_map[voice]];
instr = devc->act_i[voice];
if (!instr)
instr = &devc->i_map[0];
if (instr->channel < 0)
return;
if (devc->voc[voice].mode == 0)
return;
if (devc->voc[voice].mode == 2)
{
vol1 = instr->operators[2];
vol2 = instr->operators[3];
if ((instr->operators[10] & 0x01))
{
calc_vol(&vol1, volume, main_vol);
calc_vol(&vol2, volume, main_vol);
}
else
{
calc_vol(&vol2, volume, main_vol);
}
opl3_command(map->ioaddr, KSL_LEVEL + map->op[0], vol1);
opl3_command(map->ioaddr, KSL_LEVEL + map->op[1], vol2);
}
else
{ /*
* 4 OP voice
*/
int connection;
vol1 = instr->operators[2];
vol2 = instr->operators[3];
vol3 = instr->operators[OFFS_4OP + 2];
vol4 = instr->operators[OFFS_4OP + 3];
/*
* The connection method for 4 OP devc->voc is defined by the rightmost
* bits at the offsets 10 and 10+OFFS_4OP
*/
connection = ((instr->operators[10] & 0x01) << 1) | (instr->operators[10 + OFFS_4OP] & 0x01);
switch (connection)
{
case 0:
calc_vol(&vol4, volume, main_vol);
break;
case 1:
calc_vol(&vol2, volume, main_vol);
calc_vol(&vol4, volume, main_vol);
break;
case 2:
calc_vol(&vol1, volume, main_vol);
calc_vol(&vol4, volume, main_vol);
break;
case 3:
calc_vol(&vol1, volume, main_vol);
calc_vol(&vol3, volume, main_vol);
calc_vol(&vol4, volume, main_vol);
break;
default:
;
}
opl3_command(map->ioaddr, KSL_LEVEL + map->op[0], vol1);
opl3_command(map->ioaddr, KSL_LEVEL + map->op[1], vol2);
opl3_command(map->ioaddr, KSL_LEVEL + map->op[2], vol3);
opl3_command(map->ioaddr, KSL_LEVEL + map->op[3], vol4);
}
}
static int opl3_start_note (int dev, int voice, int note, int volume)
{
unsigned char data, fpc;
int block, fnum, freq, voice_mode, pan;
struct sbi_instrument *instr;
struct physical_voice_info *map;
if (voice < 0 || voice >= devc->nr_voice)
return 0;
map = &pv_map[devc->lv_map[voice]];
pan = devc->voc[voice].panning;
if (map->voice_mode == 0)
return 0;
if (note == 255) /*
* Just change the volume
*/
{
set_voice_volume(voice, volume, devc->voc[voice].volume);
return 0;
}
/*
* Kill previous note before playing
*/
opl3_command(map->ioaddr, KSL_LEVEL + map->op[1], 0xff); /*
* Carrier
* volume to
* min
*/
opl3_command(map->ioaddr, KSL_LEVEL + map->op[0], 0xff); /*
* Modulator
* volume to
*/
if (map->voice_mode == 4)
{
opl3_command(map->ioaddr, KSL_LEVEL + map->op[2], 0xff);
opl3_command(map->ioaddr, KSL_LEVEL + map->op[3], 0xff);
}
opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num, 0x00); /*
* Note
* off
*/
instr = devc->act_i[voice];
if (!instr)
instr = &devc->i_map[0];
if (instr->channel < 0)
{
printk(KERN_WARNING "opl3: Initializing voice %d with undefined instrument\n", voice);
return 0;
}
if (map->voice_mode == 2 && instr->key == OPL3_PATCH)
return 0; /*
* Cannot play
*/
voice_mode = map->voice_mode;
if (voice_mode == 4)
{
int voice_shift;
voice_shift = (map->ioaddr == devc->left_io) ? 0 : 3;
voice_shift += map->voice_num;
if (instr->key != OPL3_PATCH) /*
* Just 2 OP patch
*/
{
voice_mode = 2;
devc->cmask &= ~(1 << voice_shift);
}
else
{
devc->cmask |= (1 << voice_shift);
}
opl3_command(devc->right_io, CONNECTION_SELECT_REGISTER, devc->cmask);
}
/*
* Set Sound Characteristics
*/
opl3_command(map->ioaddr, AM_VIB + map->op[0], instr->operators[0]);
opl3_command(map->ioaddr, AM_VIB + map->op[1], instr->operators[1]);
/*
* Set Attack/Decay
*/
opl3_command(map->ioaddr, ATTACK_DECAY + map->op[0], instr->operators[4]);
opl3_command(map->ioaddr, ATTACK_DECAY + map->op[1], instr->operators[5]);
/*
* Set Sustain/Release
*/
opl3_command(map->ioaddr, SUSTAIN_RELEASE + map->op[0], instr->operators[6]);
opl3_command(map->ioaddr, SUSTAIN_RELEASE + map->op[1], instr->operators[7]);
/*
* Set Wave Select
*/
opl3_command(map->ioaddr, WAVE_SELECT + map->op[0], instr->operators[8]);
opl3_command(map->ioaddr, WAVE_SELECT + map->op[1], instr->operators[9]);
/*
* Set Feedback/Connection
*/
fpc = instr->operators[10];
if (pan != 0xffff)
{
fpc &= ~STEREO_BITS;
if (pan < -64)
fpc |= VOICE_TO_LEFT;
else
if (pan > 64)
fpc |= VOICE_TO_RIGHT;
else
fpc |= (VOICE_TO_LEFT | VOICE_TO_RIGHT);
}
if (!(fpc & 0x30))
fpc |= 0x30; /*
* Ensure that at least one chn is enabled
*/
opl3_command(map->ioaddr, FEEDBACK_CONNECTION + map->voice_num, fpc);
/*
* If the voice is a 4 OP one, initialize the operators 3 and 4 also
*/
if (voice_mode == 4)
{
/*
* Set Sound Characteristics
*/
opl3_command(map->ioaddr, AM_VIB + map->op[2], instr->operators[OFFS_4OP + 0]);
opl3_command(map->ioaddr, AM_VIB + map->op[3], instr->operators[OFFS_4OP + 1]);
/*
* Set Attack/Decay
*/
opl3_command(map->ioaddr, ATTACK_DECAY + map->op[2], instr->operators[OFFS_4OP + 4]);
opl3_command(map->ioaddr, ATTACK_DECAY + map->op[3], instr->operators[OFFS_4OP + 5]);
/*
* Set Sustain/Release
*/
opl3_command(map->ioaddr, SUSTAIN_RELEASE + map->op[2], instr->operators[OFFS_4OP + 6]);
opl3_command(map->ioaddr, SUSTAIN_RELEASE + map->op[3], instr->operators[OFFS_4OP + 7]);
/*
* Set Wave Select
*/
opl3_command(map->ioaddr, WAVE_SELECT + map->op[2], instr->operators[OFFS_4OP + 8]);
opl3_command(map->ioaddr, WAVE_SELECT + map->op[3], instr->operators[OFFS_4OP + 9]);
/*
* Set Feedback/Connection
*/
fpc = instr->operators[OFFS_4OP + 10];
if (!(fpc & 0x30))
fpc |= 0x30; /*
* Ensure that at least one chn is enabled
*/
opl3_command(map->ioaddr, FEEDBACK_CONNECTION + map->voice_num + 3, fpc);
}
devc->voc[voice].mode = voice_mode;
set_voice_volume(voice, volume, devc->voc[voice].volume);
freq = devc->voc[voice].orig_freq = note_to_freq(note) / 1000;
/*
* Since the pitch bender may have been set before playing the note, we
* have to calculate the bending now.
*/
freq = compute_finetune(devc->voc[voice].orig_freq, devc->voc[voice].bender, devc->voc[voice].bender_range, 0);
devc->voc[voice].current_freq = freq;
freq_to_fnum(freq, &block, &fnum);
/*
* Play note
*/
data = fnum & 0xff; /*
* Least significant bits of fnumber
*/
opl3_command(map->ioaddr, FNUM_LOW + map->voice_num, data);
data = 0x20 | ((block & 0x7) << 2) | ((fnum >> 8) & 0x3);
devc->voc[voice].keyon_byte = data;
opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num, data);
if (voice_mode == 4)
opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num + 3, data);
return 0;
}
static void freq_to_fnum (int freq, int *block, int *fnum)
{
int f, octave;
/*
* Converts the note frequency to block and fnum values for the FM chip
*/
/*
* First try to compute the block -value (octave) where the note belongs
*/
f = freq;
octave = 5;
if (f == 0)
octave = 0;
else if (f < 261)
{
while (f < 261)
{
octave--;
f <<= 1;
}
}
else if (f > 493)
{
while (f > 493)
{
octave++;
f >>= 1;
}
}
if (octave > 7)
octave = 7;
*fnum = freq * (1 << (20 - octave)) / 49716;
*block = octave;
}
static void opl3_command (int io_addr, unsigned int addr, unsigned int val)
{
int i;
/*
* The original 2-OP synth requires a quite long delay after writing to a
* register. The OPL-3 survives with just two INBs
*/
outb(((unsigned char) (addr & 0xff)), io_addr);
if (devc->model != 2)
udelay(10);
else
for (i = 0; i < 2; i++)
inb(io_addr);
outb(((unsigned char) (val & 0xff)), io_addr + 1);
if (devc->model != 2)
udelay(30);
else
for (i = 0; i < 2; i++)
inb(io_addr);
}
static void opl3_reset(int devno)
{
int i;
for (i = 0; i < 18; i++)
devc->lv_map[i] = i;
for (i = 0; i < devc->nr_voice; i++)
{
opl3_command(pv_map[devc->lv_map[i]].ioaddr,
KSL_LEVEL + pv_map[devc->lv_map[i]].op[0], 0xff);
opl3_command(pv_map[devc->lv_map[i]].ioaddr,
KSL_LEVEL + pv_map[devc->lv_map[i]].op[1], 0xff);
if (pv_map[devc->lv_map[i]].voice_mode == 4)
{
opl3_command(pv_map[devc->lv_map[i]].ioaddr,
KSL_LEVEL + pv_map[devc->lv_map[i]].op[2], 0xff);
opl3_command(pv_map[devc->lv_map[i]].ioaddr,
KSL_LEVEL + pv_map[devc->lv_map[i]].op[3], 0xff);
}
opl3_kill_note(devno, i, 0, 64);
}
if (devc->model == 2)
{
devc->v_alloc->max_voice = devc->nr_voice = 18;
for (i = 0; i < 18; i++)
pv_map[i].voice_mode = 2;
}
}
static int opl3_open(int dev, int mode)
{
int i;
if (devc->busy)
return -EBUSY;
devc->busy = 1;
devc->v_alloc->max_voice = devc->nr_voice = (devc->model == 2) ? 18 : 9;
devc->v_alloc->timestamp = 0;
for (i = 0; i < 18; i++)
{
devc->v_alloc->map[i] = 0;
devc->v_alloc->alloc_times[i] = 0;
}
devc->cmask = 0x00; /*
* Just 2 OP mode
*/
if (devc->model == 2)
opl3_command(devc->right_io, CONNECTION_SELECT_REGISTER, devc->cmask);
return 0;
}
static void opl3_close(int dev)
{
devc->busy = 0;
devc->v_alloc->max_voice = devc->nr_voice = (devc->model == 2) ? 18 : 9;
devc->fm_info.nr_drums = 0;
devc->fm_info.perc_mode = 0;
opl3_reset(dev);
}
static void opl3_hw_control(int dev, unsigned char *event)
{
}
static int opl3_load_patch(int dev, int format, const char __user *addr,
int offs, int count, int pmgr_flag)
{
struct sbi_instrument ins;
if (count <sizeof(ins))
{
printk(KERN_WARNING "FM Error: Patch record too short\n");
return -EINVAL;
}
/*
* What the fuck is going on here? We leave junk in the beginning
* of ins and then check the field pretty close to that beginning?
*/
if(copy_from_user(&((char *) &ins)[offs], addr + offs, sizeof(ins) - offs))
return -EFAULT;
if (ins.channel < 0 || ins.channel >= SBFM_MAXINSTR)
{
printk(KERN_WARNING "FM Error: Invalid instrument number %d\n", ins.channel);
return -EINVAL;
}
ins.key = format;
return store_instr(ins.channel, &ins);
}
static void opl3_panning(int dev, int voice, int value)
{
devc->voc[voice].panning = value;
}
static void opl3_volume_method(int dev, int mode)
{
}
#define SET_VIBRATO(cell) { \
tmp = instr->operators[(cell-1)+(((cell-1)/2)*OFFS_4OP)]; \
if (pressure > 110) \
tmp |= 0x40; /* Vibrato on */ \
opl3_command (map->ioaddr, AM_VIB + map->op[cell-1], tmp);}
static void opl3_aftertouch(int dev, int voice, int pressure)
{
int tmp;
struct sbi_instrument *instr;
struct physical_voice_info *map;
if (voice < 0 || voice >= devc->nr_voice)
return;
map = &pv_map[devc->lv_map[voice]];
DEB(printk("Aftertouch %d\n", voice));
if (map->voice_mode == 0)
return;
/*
* Adjust the amount of vibrato depending the pressure
*/
instr = devc->act_i[voice];
if (!instr)
instr = &devc->i_map[0];
if (devc->voc[voice].mode == 4)
{
int connection = ((instr->operators[10] & 0x01) << 1) | (instr->operators[10 + OFFS_4OP] & 0x01);
switch (connection)
{
case 0:
SET_VIBRATO(4);
break;
case 1:
SET_VIBRATO(2);
SET_VIBRATO(4);
break;
case 2:
SET_VIBRATO(1);
SET_VIBRATO(4);
break;
case 3:
SET_VIBRATO(1);
SET_VIBRATO(3);
SET_VIBRATO(4);
break;
}
/*
* Not implemented yet
*/
}
else
{
SET_VIBRATO(1);
if ((instr->operators[10] & 0x01)) /*
* Additive synthesis
*/
SET_VIBRATO(2);
}
}
#undef SET_VIBRATO
static void bend_pitch(int dev, int voice, int value)
{
unsigned char data;
int block, fnum, freq;
struct physical_voice_info *map;
map = &pv_map[devc->lv_map[voice]];
if (map->voice_mode == 0)
return;
devc->voc[voice].bender = value;
if (!value)
return;
if (!(devc->voc[voice].keyon_byte & 0x20))
return; /*
* Not keyed on
*/
freq = compute_finetune(devc->voc[voice].orig_freq, devc->voc[voice].bender, devc->voc[voice].bender_range, 0);
devc->voc[voice].current_freq = freq;
freq_to_fnum(freq, &block, &fnum);
data = fnum & 0xff; /*
* Least significant bits of fnumber
*/
opl3_command(map->ioaddr, FNUM_LOW + map->voice_num, data);
data = 0x20 | ((block & 0x7) << 2) | ((fnum >> 8) & 0x3);
devc->voc[voice].keyon_byte = data;
opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num, data);
}
static void opl3_controller (int dev, int voice, int ctrl_num, int value)
{
if (voice < 0 || voice >= devc->nr_voice)
return;
switch (ctrl_num)
{
case CTRL_PITCH_BENDER:
bend_pitch(dev, voice, value);
break;
case CTRL_PITCH_BENDER_RANGE:
devc->voc[voice].bender_range = value;
break;
case CTL_MAIN_VOLUME:
devc->voc[voice].volume = value / 128;
break;
case CTL_PAN:
devc->voc[voice].panning = (value * 2) - 128;
break;
}
}
static void opl3_bender(int dev, int voice, int value)
{
if (voice < 0 || voice >= devc->nr_voice)
return;
bend_pitch(dev, voice, value - 8192);
}
static int opl3_alloc_voice(int dev, int chn, int note, struct voice_alloc_info *alloc)
{
int i, p, best, first, avail, best_time = 0x7fffffff;
struct sbi_instrument *instr;
int is4op;
int instr_no;
if (chn < 0 || chn > 15)
instr_no = 0;
else
instr_no = devc->chn_info[chn].pgm_num;
instr = &devc->i_map[instr_no];
if (instr->channel < 0 || /* Instrument not loaded */
devc->nr_voice != 12) /* Not in 4 OP mode */
is4op = 0;
else if (devc->nr_voice == 12) /* 4 OP mode */
is4op = (instr->key == OPL3_PATCH);
else
is4op = 0;
if (is4op)
{
first = p = 0;
avail = 6;
}
else
{
if (devc->nr_voice == 12) /* 4 OP mode. Use the '2 OP only' operators first */
first = p = 6;
else
first = p = 0;
avail = devc->nr_voice;
}
/*
* Now try to find a free voice
*/
best = first;
for (i = 0; i < avail; i++)
{
if (alloc->map[p] == 0)
{
return p;
}
if (alloc->alloc_times[p] < best_time) /* Find oldest playing note */
{
best_time = alloc->alloc_times[p];
best = p;
}
p = (p + 1) % avail;
}
/*
* Insert some kind of priority mechanism here.
*/
if (best < 0)
best = 0;
if (best > devc->nr_voice)
best -= devc->nr_voice;
return best; /* All devc->voc in use. Select the first one. */
}
static void opl3_setup_voice(int dev, int voice, int chn)
{
struct channel_info *info =
&synth_devs[dev]->chn_info[chn];
opl3_set_instr(dev, voice, info->pgm_num);
devc->voc[voice].bender = 0;
devc->voc[voice].bender_range = info->bender_range;
devc->voc[voice].volume = info->controllers[CTL_MAIN_VOLUME];
devc->voc[voice].panning = (info->controllers[CTL_PAN] * 2) - 128;
}
static struct synth_operations opl3_operations =
{
.owner = THIS_MODULE,
.id = "OPL",
.info = NULL,
.midi_dev = 0,
.synth_type = SYNTH_TYPE_FM,
.synth_subtype = FM_TYPE_ADLIB,
.open = opl3_open,
.close = opl3_close,
.ioctl = opl3_ioctl,
.kill_note = opl3_kill_note,
.start_note = opl3_start_note,
.set_instr = opl3_set_instr,
.reset = opl3_reset,
.hw_control = opl3_hw_control,
.load_patch = opl3_load_patch,
.aftertouch = opl3_aftertouch,
.controller = opl3_controller,
.panning = opl3_panning,
.volume_method = opl3_volume_method,
.bender = opl3_bender,
.alloc_voice = opl3_alloc_voice,
.setup_voice = opl3_setup_voice
};
static int opl3_init(int ioaddr, struct module *owner)
{
int i;
int me;
if (devc == NULL)
{
printk(KERN_ERR "opl3: Device control structure not initialized.\n");
return -1;
}
if ((me = sound_alloc_synthdev()) == -1)
{
printk(KERN_WARNING "opl3: Too many synthesizers\n");
return -1;
}
devc->nr_voice = 9;
devc->fm_info.device = 0;
devc->fm_info.synth_type = SYNTH_TYPE_FM;
devc->fm_info.synth_subtype = FM_TYPE_ADLIB;
devc->fm_info.perc_mode = 0;
devc->fm_info.nr_voices = 9;
devc->fm_info.nr_drums = 0;
devc->fm_info.instr_bank_size = SBFM_MAXINSTR;
devc->fm_info.capabilities = 0;
devc->left_io = ioaddr;
devc->right_io = ioaddr + 2;
if (detected_model <= 2)
devc->model = 1;
else
{
devc->model = 2;
if (detected_model == 4)
devc->is_opl4 = 1;
}
opl3_operations.info = &devc->fm_info;
synth_devs[me] = &opl3_operations;
if (owner)
synth_devs[me]->owner = owner;
sequencer_init();
devc->v_alloc = &opl3_operations.alloc;
devc->chn_info = &opl3_operations.chn_info[0];
if (devc->model == 2)
{
if (devc->is_opl4)
strcpy(devc->fm_info.name, "Yamaha OPL4/OPL3 FM");
else
strcpy(devc->fm_info.name, "Yamaha OPL3");
devc->v_alloc->max_voice = devc->nr_voice = 18;
devc->fm_info.nr_drums = 0;
devc->fm_info.synth_subtype = FM_TYPE_OPL3;
devc->fm_info.capabilities |= SYNTH_CAP_OPL3;
for (i = 0; i < 18; i++)
{
if (pv_map[i].ioaddr == USE_LEFT)
pv_map[i].ioaddr = devc->left_io;
else
pv_map[i].ioaddr = devc->right_io;
}
opl3_command(devc->right_io, OPL3_MODE_REGISTER, OPL3_ENABLE);
opl3_command(devc->right_io, CONNECTION_SELECT_REGISTER, 0x00);
}
else
{
strcpy(devc->fm_info.name, "Yamaha OPL2");
devc->v_alloc->max_voice = devc->nr_voice = 9;
devc->fm_info.nr_drums = 0;
for (i = 0; i < 18; i++)
pv_map[i].ioaddr = devc->left_io;
};
conf_printf2(devc->fm_info.name, ioaddr, 0, -1, -1);
for (i = 0; i < SBFM_MAXINSTR; i++)
devc->i_map[i].channel = -1;
return me;
}
static int me;
static int io = -1;
module_param(io, int, 0);
static int __init init_opl3 (void)
{
printk(KERN_INFO "YM3812 and OPL-3 driver Copyright (C) by Hannu Savolainen, Rob Hooft 1993-1996\n");
if (io != -1) /* User loading pure OPL3 module */
{
if (!opl3_detect(io))
{
return -ENODEV;
}
me = opl3_init(io, THIS_MODULE);
}
return 0;
}
static void __exit cleanup_opl3(void)
{
if (devc && io != -1)
{
if (devc->base) {
release_region(devc->base,4);
if (devc->is_opl4)
release_region(devc->base - 8, 2);
}
kfree(devc);
devc = NULL;
sound_unload_synthdev(me);
}
}
module_init(init_opl3);
module_exit(cleanup_opl3);
#ifndef MODULE
static int __init setup_opl3(char *str)
{
/* io */
int ints[2];
str = get_options(str, ARRAY_SIZE(ints), ints);
io = ints[1];
return 1;
}
__setup("opl3=", setup_opl3);
#endif
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3447_3 |
crossvul-cpp_data_good_3498_0 | /*
* Support for n32 Linux/MIPS ELF binaries.
*
* Copyright (C) 1999, 2001 Ralf Baechle
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
* Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define ELF_ARCH EM_MIPS
#define ELF_CLASS ELFCLASS32
#ifdef __MIPSEB__
#define ELF_DATA ELFDATA2MSB;
#else /* __MIPSEL__ */
#define ELF_DATA ELFDATA2LSB;
#endif
/* ELF register definitions */
#define ELF_NGREG 45
#define ELF_NFPREG 33
typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
if (__h->e_machine != EM_MIPS) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
__res = 0; \
if (((__h->e_flags & EF_MIPS_ABI2) == 0) || \
((__h->e_flags & EF_MIPS_ABI) != 0)) \
__res = 0; \
\
__res; \
})
#define TASK32_SIZE 0x7fff8000UL
#undef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
#include <linux/math64.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
unsigned int pr_sigpend; /* Set of pending signals */
unsigned int pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define elf_prpsinfo elf_prpsinfo32
struct elf_prpsinfo32
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
unsigned int pr_flag; /* flags */
__kernel_uid_t pr_uid;
__kernel_gid_t pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define elf_caddr_t u32
#define init_elf_binfmt init_elfn32_binfmt
#define jiffies_to_timeval jiffies_to_compat_timeval
static __inline__ void
jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
{
/*
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
u32 rem;
value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
#define ELF_CORE_EFLAGS EF_MIPS_ABI2
MODULE_DESCRIPTION("Binary format loader for compatibility with n32 Linux/MIPS binaries");
MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef MODULE_DESCRIPTION
#undef MODULE_AUTHOR
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
#include "../../../fs/binfmt_elf.c"
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3498_0 |
crossvul-cpp_data_bad_4800_1 | /* $Id$
*
* tiff2pdf - converts a TIFF image to a PDF document
*
* Copyright (c) 2003 Ross Finlayson
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee, provided
* that (i) the above copyright notices and this permission notice appear in
* all copies of the software and related documentation, and (ii) the name of
* Ross Finlayson may not be used in any advertising or
* publicity relating to the software without the specific, prior written
* permission of Ross Finlayson.
*
* THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
* EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
* WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
*
* IN NO EVENT SHALL ROSS FINLAYSON BE LIABLE FOR
* ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND,
* OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF
* LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include "tif_config.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <time.h>
#include <errno.h>
#include <limits.h>
#if HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_FCNTL_H
# include <fcntl.h>
#endif
#ifdef HAVE_IO_H
# include <io.h>
#endif
#ifdef NEED_LIBPORT
# include "libport.h"
#endif
#include "tiffiop.h"
#include "tiffio.h"
#ifndef HAVE_GETOPT
extern int getopt(int, char**, char*);
#endif
#ifndef EXIT_SUCCESS
# define EXIT_SUCCESS 0
#endif
#ifndef EXIT_FAILURE
# define EXIT_FAILURE 1
#endif
#define TIFF2PDF_MODULE "tiff2pdf"
#define PS_UNIT_SIZE 72.0F
/* This type is of PDF color spaces. */
typedef enum {
T2P_CS_BILEVEL = 0x01, /* Bilevel, black and white */
T2P_CS_GRAY = 0x02, /* Single channel */
T2P_CS_RGB = 0x04, /* Three channel tristimulus RGB */
T2P_CS_CMYK = 0x08, /* Four channel CMYK print inkset */
T2P_CS_LAB = 0x10, /* Three channel L*a*b* color space */
T2P_CS_PALETTE = 0x1000,/* One of the above with a color map */
T2P_CS_CALGRAY = 0x20, /* Calibrated single channel */
T2P_CS_CALRGB = 0x40, /* Calibrated three channel tristimulus RGB */
T2P_CS_ICCBASED = 0x80 /* ICC profile color specification */
} t2p_cs_t;
/* This type is of PDF compression types. */
typedef enum{
T2P_COMPRESS_NONE=0x00
#ifdef CCITT_SUPPORT
, T2P_COMPRESS_G4=0x01
#endif
#if defined(JPEG_SUPPORT) || defined(OJPEG_SUPPORT)
, T2P_COMPRESS_JPEG=0x02
#endif
#ifdef ZIP_SUPPORT
, T2P_COMPRESS_ZIP=0x04
#endif
} t2p_compress_t;
/* This type is whether TIFF image data can be used in PDF without transcoding. */
typedef enum{
T2P_TRANSCODE_RAW=0x01, /* The raw data from the input can be used without recompressing */
T2P_TRANSCODE_ENCODE=0x02 /* The data from the input is perhaps unencoded and reencoded */
} t2p_transcode_t;
/* This type is of information about the data samples of the input image. */
typedef enum{
T2P_SAMPLE_NOTHING=0x0000, /* The unencoded samples are normal for the output colorspace */
T2P_SAMPLE_ABGR_TO_RGB=0x0001, /* The unencoded samples are the result of ReadRGBAImage */
T2P_SAMPLE_RGBA_TO_RGB=0x0002, /* The unencoded samples are contiguous RGBA */
T2P_SAMPLE_RGBAA_TO_RGB=0x0004, /* The unencoded samples are RGBA with premultiplied alpha */
T2P_SAMPLE_YCBCR_TO_RGB=0x0008,
T2P_SAMPLE_YCBCR_TO_LAB=0x0010,
T2P_SAMPLE_REALIZE_PALETTE=0x0020, /* The unencoded samples are indexes into the color map */
T2P_SAMPLE_SIGNED_TO_UNSIGNED=0x0040, /* The unencoded samples are signed instead of unsignd */
T2P_SAMPLE_LAB_SIGNED_TO_UNSIGNED=0x0040, /* The L*a*b* samples have a* and b* signed */
T2P_SAMPLE_PLANAR_SEPARATE_TO_CONTIG=0x0100 /* The unencoded samples are separate instead of contiguous */
} t2p_sample_t;
/* This type is of error status of the T2P struct. */
typedef enum{
T2P_ERR_OK = 0, /* This is the value of t2p->t2p_error when there is no error */
T2P_ERR_ERROR = 1 /* This is the value of t2p->t2p_error when there was an error */
} t2p_err_t;
/* This struct defines a logical page of a TIFF. */
typedef struct {
tdir_t page_directory;
uint32 page_number;
ttile_t page_tilecount;
uint32 page_extra;
} T2P_PAGE;
/* This struct defines a PDF rectangle's coordinates. */
typedef struct {
float x1;
float y1;
float x2;
float y2;
float mat[9];
} T2P_BOX;
/* This struct defines a tile of a PDF. */
typedef struct {
T2P_BOX tile_box;
} T2P_TILE;
/* This struct defines information about the tiles on a PDF page. */
typedef struct {
ttile_t tiles_tilecount;
uint32 tiles_tilewidth;
uint32 tiles_tilelength;
uint32 tiles_tilecountx;
uint32 tiles_tilecounty;
uint32 tiles_edgetilewidth;
uint32 tiles_edgetilelength;
T2P_TILE* tiles_tiles;
} T2P_TILES;
/* This struct is the context of a function to generate PDF from a TIFF. */
typedef struct {
t2p_err_t t2p_error;
T2P_PAGE* tiff_pages;
T2P_TILES* tiff_tiles;
tdir_t tiff_pagecount;
uint16 tiff_compression;
uint16 tiff_photometric;
uint16 tiff_fillorder;
uint16 tiff_bitspersample;
uint16 tiff_samplesperpixel;
uint16 tiff_planar;
uint32 tiff_width;
uint32 tiff_length;
float tiff_xres;
float tiff_yres;
uint16 tiff_orientation;
toff_t tiff_dataoffset;
tsize_t tiff_datasize;
uint16 tiff_resunit;
uint16 pdf_centimeters;
uint16 pdf_overrideres;
uint16 pdf_overridepagesize;
float pdf_defaultxres;
float pdf_defaultyres;
float pdf_xres;
float pdf_yres;
float pdf_defaultpagewidth;
float pdf_defaultpagelength;
float pdf_pagewidth;
float pdf_pagelength;
float pdf_imagewidth;
float pdf_imagelength;
int pdf_image_fillpage; /* 0 (default: no scaling, 1:scale imagesize to pagesize */
T2P_BOX pdf_mediabox;
T2P_BOX pdf_imagebox;
uint16 pdf_majorversion;
uint16 pdf_minorversion;
uint32 pdf_catalog;
uint32 pdf_pages;
uint32 pdf_info;
uint32 pdf_palettecs;
uint16 pdf_fitwindow;
uint32 pdf_startxref;
#define TIFF2PDF_FILEID_SIZE 33
char pdf_fileid[TIFF2PDF_FILEID_SIZE];
#define TIFF2PDF_DATETIME_SIZE 17
char pdf_datetime[TIFF2PDF_DATETIME_SIZE];
#define TIFF2PDF_CREATOR_SIZE 512
char pdf_creator[TIFF2PDF_CREATOR_SIZE];
#define TIFF2PDF_AUTHOR_SIZE 512
char pdf_author[TIFF2PDF_AUTHOR_SIZE];
#define TIFF2PDF_TITLE_SIZE 512
char pdf_title[TIFF2PDF_TITLE_SIZE];
#define TIFF2PDF_SUBJECT_SIZE 512
char pdf_subject[TIFF2PDF_SUBJECT_SIZE];
#define TIFF2PDF_KEYWORDS_SIZE 512
char pdf_keywords[TIFF2PDF_KEYWORDS_SIZE];
t2p_cs_t pdf_colorspace;
uint16 pdf_colorspace_invert;
uint16 pdf_switchdecode;
uint16 pdf_palettesize;
unsigned char* pdf_palette;
int pdf_labrange[4];
t2p_compress_t pdf_defaultcompression;
uint16 pdf_defaultcompressionquality;
t2p_compress_t pdf_compression;
uint16 pdf_compressionquality;
uint16 pdf_nopassthrough;
t2p_transcode_t pdf_transcode;
t2p_sample_t pdf_sample;
uint32* pdf_xrefoffsets;
uint32 pdf_xrefcount;
tdir_t pdf_page;
#ifdef OJPEG_SUPPORT
tdata_t pdf_ojpegdata;
uint32 pdf_ojpegdatalength;
uint32 pdf_ojpegiflength;
#endif
float tiff_whitechromaticities[2];
float tiff_primarychromaticities[6];
float tiff_referenceblackwhite[2];
float* tiff_transferfunction[3];
int pdf_image_interpolate; /* 0 (default) : do not interpolate,
1 : interpolate */
uint16 tiff_transferfunctioncount;
uint32 pdf_icccs;
uint32 tiff_iccprofilelength;
tdata_t tiff_iccprofile;
/* fields for custom read/write procedures */
FILE *outputfile;
int outputdisable;
tsize_t outputwritten;
} T2P;
/* These functions are called by main. */
void tiff2pdf_usage(void);
int tiff2pdf_match_paper_size(float*, float*, char*);
/* These functions are used to generate a PDF from a TIFF. */
#ifdef __cplusplus
extern "C" {
#endif
T2P* t2p_init(void);
void t2p_validate(T2P*);
tsize_t t2p_write_pdf(T2P*, TIFF*, TIFF*);
void t2p_free(T2P*);
#ifdef __cplusplus
}
#endif
void t2p_read_tiff_init(T2P*, TIFF*);
int t2p_cmp_t2p_page(const void*, const void*);
void t2p_read_tiff_data(T2P*, TIFF*);
void t2p_read_tiff_size(T2P*, TIFF*);
void t2p_read_tiff_size_tile(T2P*, TIFF*, ttile_t);
int t2p_tile_is_right_edge(T2P_TILES, ttile_t);
int t2p_tile_is_bottom_edge(T2P_TILES, ttile_t);
int t2p_tile_is_edge(T2P_TILES, ttile_t);
int t2p_tile_is_corner_edge(T2P_TILES, ttile_t);
tsize_t t2p_readwrite_pdf_image(T2P*, TIFF*, TIFF*);
tsize_t t2p_readwrite_pdf_image_tile(T2P*, TIFF*, TIFF*, ttile_t);
#ifdef OJPEG_SUPPORT
int t2p_process_ojpeg_tables(T2P*, TIFF*);
#endif
#ifdef JPEG_SUPPORT
int t2p_process_jpeg_strip(unsigned char*, tsize_t*, unsigned char*, tsize_t, tsize_t*, tstrip_t, uint32);
#endif
void t2p_tile_collapse_left(tdata_t, tsize_t, uint32, uint32, uint32);
void t2p_write_advance_directory(T2P*, TIFF*);
tsize_t t2p_sample_planar_separate_to_contig(T2P*, unsigned char*, unsigned char*, tsize_t);
tsize_t t2p_sample_realize_palette(T2P*, unsigned char*);
tsize_t t2p_sample_abgr_to_rgb(tdata_t, uint32);
tsize_t t2p_sample_rgba_to_rgb(tdata_t, uint32);
tsize_t t2p_sample_rgbaa_to_rgb(tdata_t, uint32);
tsize_t t2p_sample_lab_signed_to_unsigned(tdata_t, uint32);
tsize_t t2p_write_pdf_header(T2P*, TIFF*);
tsize_t t2p_write_pdf_obj_start(uint32, TIFF*);
tsize_t t2p_write_pdf_obj_end(TIFF*);
tsize_t t2p_write_pdf_name(unsigned char*, TIFF*);
tsize_t t2p_write_pdf_string(char*, TIFF*);
tsize_t t2p_write_pdf_stream(tdata_t, tsize_t, TIFF*);
tsize_t t2p_write_pdf_stream_start(TIFF*);
tsize_t t2p_write_pdf_stream_end(TIFF*);
tsize_t t2p_write_pdf_stream_dict(tsize_t, uint32, TIFF*);
tsize_t t2p_write_pdf_stream_dict_start(TIFF*);
tsize_t t2p_write_pdf_stream_dict_end(TIFF*);
tsize_t t2p_write_pdf_stream_length(tsize_t, TIFF*);
tsize_t t2p_write_pdf_catalog(T2P*, TIFF*);
tsize_t t2p_write_pdf_info(T2P*, TIFF*, TIFF*);
void t2p_pdf_currenttime(T2P*);
void t2p_pdf_tifftime(T2P*, TIFF*);
tsize_t t2p_write_pdf_pages(T2P*, TIFF*);
tsize_t t2p_write_pdf_page(uint32, T2P*, TIFF*);
void t2p_compose_pdf_page(T2P*);
void t2p_compose_pdf_page_orient(T2P_BOX*, uint16);
void t2p_compose_pdf_page_orient_flip(T2P_BOX*, uint16);
tsize_t t2p_write_pdf_page_content(T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_stream_dict(ttile_t, T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_cs(T2P*, TIFF*);
tsize_t t2p_write_pdf_transfer(T2P*, TIFF*);
tsize_t t2p_write_pdf_transfer_dict(T2P*, TIFF*, uint16);
tsize_t t2p_write_pdf_transfer_stream(T2P*, TIFF*, uint16);
tsize_t t2p_write_pdf_xobject_calcs(T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_icccs(T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_icccs_dict(T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_icccs_stream(T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_cs_stream(T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_decode(T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_stream_filter(ttile_t, T2P*, TIFF*);
tsize_t t2p_write_pdf_xreftable(T2P*, TIFF*);
tsize_t t2p_write_pdf_trailer(T2P*, TIFF*);
#define check_snprintf_ret(t2p, rv, buf) do { \
if ((rv) < 0) rv = 0; \
else if((rv) >= (int)sizeof(buf)) (rv) = sizeof(buf) - 1; \
else break; \
if ((t2p) != NULL) (t2p)->t2p_error = T2P_ERR_ERROR; \
} while(0)
static void
t2p_disable(TIFF *tif)
{
T2P *t2p = (T2P*) TIFFClientdata(tif);
t2p->outputdisable = 1;
}
static void
t2p_enable(TIFF *tif)
{
T2P *t2p = (T2P*) TIFFClientdata(tif);
t2p->outputdisable = 0;
}
/*
* Procs for TIFFClientOpen
*/
#ifdef OJPEG_SUPPORT
static tmsize_t
t2pReadFile(TIFF *tif, tdata_t data, tmsize_t size)
{
thandle_t client = TIFFClientdata(tif);
TIFFReadWriteProc proc = TIFFGetReadProc(tif);
if (proc)
return proc(client, data, size);
return -1;
}
#endif /* OJPEG_SUPPORT */
static tmsize_t
t2pWriteFile(TIFF *tif, tdata_t data, tmsize_t size)
{
thandle_t client = TIFFClientdata(tif);
TIFFReadWriteProc proc = TIFFGetWriteProc(tif);
if (proc)
return proc(client, data, size);
return -1;
}
static uint64
t2pSeekFile(TIFF *tif, toff_t offset, int whence)
{
thandle_t client = TIFFClientdata(tif);
TIFFSeekProc proc = TIFFGetSeekProc(tif);
if (proc)
return proc(client, offset, whence);
return -1;
}
static tmsize_t
t2p_readproc(thandle_t handle, tdata_t data, tmsize_t size)
{
(void) handle, (void) data, (void) size;
return -1;
}
static tmsize_t
t2p_writeproc(thandle_t handle, tdata_t data, tmsize_t size)
{
T2P *t2p = (T2P*) handle;
if (t2p->outputdisable <= 0 && t2p->outputfile) {
tsize_t written = fwrite(data, 1, size, t2p->outputfile);
t2p->outputwritten += written;
return written;
}
return size;
}
static uint64
t2p_seekproc(thandle_t handle, uint64 offset, int whence)
{
T2P *t2p = (T2P*) handle;
if (t2p->outputdisable <= 0 && t2p->outputfile)
return _TIFF_fseek_f(t2p->outputfile, (_TIFF_off_t) offset, whence);
return offset;
}
static int
t2p_closeproc(thandle_t handle)
{
T2P *t2p = (T2P*) handle;
return fclose(t2p->outputfile);
}
static uint64
t2p_sizeproc(thandle_t handle)
{
(void) handle;
return -1;
}
static int
t2p_mapproc(thandle_t handle, void **data, toff_t *offset)
{
(void) handle, (void) data, (void) offset;
return -1;
}
static void
t2p_unmapproc(thandle_t handle, void *data, toff_t offset)
{
(void) handle, (void) data, (void) offset;
}
#if defined(OJPEG_SUPPORT) || defined(JPEG_SUPPORT)
static uint64
checkAdd64(uint64 summand1, uint64 summand2, T2P* t2p)
{
uint64 bytes = summand1 + summand2;
if (bytes < summand1) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
bytes = 0;
}
return bytes;
}
#endif /* defined(OJPEG_SUPPORT) || defined(JPEG_SUPPORT) */
static uint64
checkMultiply64(uint64 first, uint64 second, T2P* t2p)
{
uint64 bytes = first * second;
if (second && bytes / second != first) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
bytes = 0;
}
return bytes;
}
/*
This is the main function.
The program converts one TIFF file to one PDF file, including multiple page
TIFF files, tiled TIFF files, black and white. grayscale, and color TIFF
files that contain data of TIFF photometric interpretations of bilevel,
grayscale, RGB, YCbCr, CMYK separation, and ICC L*a*b* as supported by
libtiff and PDF.
If you have multiple TIFF files to convert into one PDF file then use tiffcp
or other program to concatenate the files into a multiple page TIFF file.
If the input TIFF file is of huge dimensions (greater than 10000 pixels height
or width) convert the input image to a tiled TIFF if it is not already.
The standard output is standard output. Set the output file name with the
"-o output.pdf" option.
All black and white files are compressed into a single strip CCITT G4 Fax
compressed PDF, unless tiled, where tiled black and white images are
compressed into tiled CCITT G4 Fax compressed PDF, libtiff CCITT support
is assumed.
Color and grayscale data can be compressed using either JPEG compression,
ITU-T T.81, or Zip/Deflate LZ77 compression, per PNG 1.2 and RFC 1951. Set
the compression type using the -j or -z options. JPEG compression support
requires that libtiff be configured with JPEG support, and Zip/Deflate
compression support requires that libtiff is configured with Zip support,
in tiffconf.h. Use only one or the other of -j and -z. The -q option
sets the image compression quality, that is 1-100 with libjpeg JPEG
compression and one of 1, 10, 11, 12, 13, 14, or 15 for PNG group compression
predictor methods, add 100, 200, ..., 900 to set zlib compression quality 1-9.
PNG Group differencing predictor methods are not currently implemented.
If the input TIFF contains single strip CCITT G4 Fax compressed information,
then that is written to the PDF file without transcoding, unless the options
of no compression and no passthrough are set, -d and -n.
If the input TIFF contains JPEG or single strip Zip/Deflate compressed
information, and they are configured, then that is written to the PDF file
without transcoding, unless the options of no compression and no passthrough
are set.
The default page size upon which the TIFF image is placed is determined by
the resolution and extent of the image data. Default values for the TIFF
image resolution can be set using the -x and -y options. The page size can
be set using the -p option for paper size, or -w and -l for paper width and
length, then each page of the TIFF image is centered on its page. The
distance unit for default resolution and page width and length can be set
by the -u option, the default unit is inch.
Various items of the output document information can be set with the -e, -c,
-a, -t, -s, and -k tags. Setting the argument of the option to "" for these
tags causes the relevant document information field to be not written. Some
of the document information values otherwise get their information from the
input TIFF image, the software, author, document name, and image description.
The output PDF file conforms to the PDF 1.1 specification or PDF 1.2 if using
Zip/Deflate compression.
The Portable Document Format (PDF) specification is copyrighted by Adobe
Systems, Incorporated. Todos derechos reservados.
Here is a listing of the usage example and the options to the tiff2pdf
program that is part of the libtiff distribution. Options followed by
a colon have a required argument.
usage: tiff2pdf [options] input.tif
options:
-o: output to file name
-j: compress with JPEG (requires libjpeg configured with libtiff)
-z: compress with Zip/Deflate (requires zlib configured with libtiff)
-q: compression quality
-n: no compressed data passthrough
-d: do not compress (decompress)
-i: invert colors
-u: set distance unit, 'i' for inch, 'm' for centimeter
-x: set x resolution default
-y: set y resolution default
-w: width in units
-l: length in units
-r: 'd' for resolution default, 'o' for resolution override
-p: paper size, eg "letter", "legal", "a4"
-F: make the tiff fill the PDF page
-f: set pdf "fit window" user preference
-b: set PDF "Interpolate" user preference
-e: date, overrides image or current date/time default, YYYYMMDDHHMMSS
-c: creator, overrides image software default
-a: author, overrides image artist default
-t: title, overrides image document name default
-s: subject, overrides image image description default
-k: keywords
-h: usage
examples:
tiff2pdf -o output.pdf input.tiff
The above example would generate the file output.pdf from input.tiff.
tiff2pdf input.tiff
The above example would generate PDF output from input.tiff and write it
to standard output.
tiff2pdf -j -p letter -o output.pdf input.tiff
The above example would generate the file output.pdf from input.tiff,
putting the image pages on a letter sized page, compressing the output
with JPEG.
Please report bugs through:
http://bugzilla.remotesensing.org/buglist.cgi?product=libtiff
See also libtiff.3t, tiffcp.
*/
int main(int argc, char** argv){
#if !HAVE_DECL_OPTARG
extern char *optarg;
extern int optind;
#endif
const char *outfilename = NULL;
T2P *t2p = NULL;
TIFF *input = NULL, *output = NULL;
int c, ret = EXIT_SUCCESS;
t2p = t2p_init();
if (t2p == NULL){
TIFFError(TIFF2PDF_MODULE, "Can't initialize context");
goto fail;
}
while (argv &&
(c = getopt(argc, argv,
"o:q:u:x:y:w:l:r:p:e:c:a:t:s:k:jzndifbhF")) != -1){
switch (c) {
case 'o':
outfilename = optarg;
break;
#ifdef JPEG_SUPPORT
case 'j':
t2p->pdf_defaultcompression=T2P_COMPRESS_JPEG;
break;
#endif
#ifndef JPEG_SUPPORT
case 'j':
TIFFWarning(
TIFF2PDF_MODULE,
"JPEG support in libtiff required for JPEG compression, ignoring option");
break;
#endif
#ifdef ZIP_SUPPORT
case 'z':
t2p->pdf_defaultcompression=T2P_COMPRESS_ZIP;
break;
#endif
#ifndef ZIP_SUPPORT
case 'z':
TIFFWarning(
TIFF2PDF_MODULE,
"Zip support in libtiff required for Zip compression, ignoring option");
break;
#endif
case 'q':
t2p->pdf_defaultcompressionquality=atoi(optarg);
break;
case 'n':
t2p->pdf_nopassthrough=1;
break;
case 'd':
t2p->pdf_defaultcompression=T2P_COMPRESS_NONE;
break;
case 'u':
if(optarg[0]=='m'){
t2p->pdf_centimeters=1;
}
break;
case 'x':
t2p->pdf_defaultxres =
(float)atof(optarg) / (t2p->pdf_centimeters?2.54F:1.0F);
break;
case 'y':
t2p->pdf_defaultyres =
(float)atof(optarg) / (t2p->pdf_centimeters?2.54F:1.0F);
break;
case 'w':
t2p->pdf_overridepagesize=1;
t2p->pdf_defaultpagewidth =
((float)atof(optarg) * PS_UNIT_SIZE) / (t2p->pdf_centimeters?2.54F:1.0F);
break;
case 'l':
t2p->pdf_overridepagesize=1;
t2p->pdf_defaultpagelength =
((float)atof(optarg) * PS_UNIT_SIZE) / (t2p->pdf_centimeters?2.54F:1.0F);
break;
case 'r':
if(optarg[0]=='o'){
t2p->pdf_overrideres=1;
}
break;
case 'p':
if(tiff2pdf_match_paper_size(
&(t2p->pdf_defaultpagewidth),
&(t2p->pdf_defaultpagelength),
optarg)){
t2p->pdf_overridepagesize=1;
} else {
TIFFWarning(TIFF2PDF_MODULE,
"Unknown paper size %s, ignoring option",
optarg);
}
break;
case 'i':
t2p->pdf_colorspace_invert=1;
break;
case 'F':
t2p->pdf_image_fillpage = 1;
break;
case 'f':
t2p->pdf_fitwindow=1;
break;
case 'e':
if (strlen(optarg) == 0) {
t2p->pdf_datetime[0] = '\0';
} else {
t2p->pdf_datetime[0] = 'D';
t2p->pdf_datetime[1] = ':';
strncpy(t2p->pdf_datetime + 2, optarg,
sizeof(t2p->pdf_datetime) - 3);
t2p->pdf_datetime[sizeof(t2p->pdf_datetime) - 1] = '\0';
}
break;
case 'c':
strncpy(t2p->pdf_creator, optarg, sizeof(t2p->pdf_creator) - 1);
t2p->pdf_creator[sizeof(t2p->pdf_creator) - 1] = '\0';
break;
case 'a':
strncpy(t2p->pdf_author, optarg, sizeof(t2p->pdf_author) - 1);
t2p->pdf_author[sizeof(t2p->pdf_author) - 1] = '\0';
break;
case 't':
strncpy(t2p->pdf_title, optarg, sizeof(t2p->pdf_title) - 1);
t2p->pdf_title[sizeof(t2p->pdf_title) - 1] = '\0';
break;
case 's':
strncpy(t2p->pdf_subject, optarg, sizeof(t2p->pdf_subject) - 1);
t2p->pdf_subject[sizeof(t2p->pdf_subject) - 1] = '\0';
break;
case 'k':
strncpy(t2p->pdf_keywords, optarg, sizeof(t2p->pdf_keywords) - 1);
t2p->pdf_keywords[sizeof(t2p->pdf_keywords) - 1] = '\0';
break;
case 'b':
t2p->pdf_image_interpolate = 1;
break;
case 'h':
case '?':
tiff2pdf_usage();
goto success;
break;
}
}
/*
* Input
*/
if(argc > optind) {
input = TIFFOpen(argv[optind++], "r");
if (input==NULL) {
TIFFError(TIFF2PDF_MODULE,
"Can't open input file %s for reading",
argv[optind-1]);
goto fail;
}
} else {
TIFFError(TIFF2PDF_MODULE, "No input file specified");
tiff2pdf_usage();
goto fail;
}
if(argc > optind) {
TIFFError(TIFF2PDF_MODULE,
"No support for multiple input files");
tiff2pdf_usage();
goto fail;
}
/*
* Output
*/
t2p->outputdisable = 1;
if (outfilename) {
t2p->outputfile = fopen(outfilename, "wb");
if (t2p->outputfile == NULL) {
TIFFError(TIFF2PDF_MODULE,
"Can't open output file %s for writing",
outfilename);
goto fail;
}
} else {
outfilename = "-";
t2p->outputfile = stdout;
}
output = TIFFClientOpen(outfilename, "w", (thandle_t) t2p,
t2p_readproc, t2p_writeproc, t2p_seekproc,
t2p_closeproc, t2p_sizeproc,
t2p_mapproc, t2p_unmapproc);
t2p->outputdisable = 0;
if (output == NULL) {
TIFFError(TIFF2PDF_MODULE,
"Can't initialize output descriptor");
goto fail;
}
/*
* Validate
*/
t2p_validate(t2p);
t2pSeekFile(output, (toff_t) 0, SEEK_SET);
/*
* Write
*/
t2p_write_pdf(t2p, input, output);
if (t2p->t2p_error != 0) {
TIFFError(TIFF2PDF_MODULE,
"An error occurred creating output PDF file");
goto fail;
}
goto success;
fail:
ret = EXIT_FAILURE;
success:
if(input != NULL)
TIFFClose(input);
if (output != NULL)
TIFFClose(output);
if (t2p != NULL)
t2p_free(t2p);
return ret;
}
void tiff2pdf_usage(){
char* lines[]={
"usage: tiff2pdf [options] input.tiff",
"options:",
" -o: output to file name",
#ifdef JPEG_SUPPORT
" -j: compress with JPEG",
#endif
#ifdef ZIP_SUPPORT
" -z: compress with Zip/Deflate",
#endif
" -q: compression quality",
" -n: no compressed data passthrough",
" -d: do not compress (decompress)",
" -i: invert colors",
" -u: set distance unit, 'i' for inch, 'm' for centimeter",
" -x: set x resolution default in dots per unit",
" -y: set y resolution default in dots per unit",
" -w: width in units",
" -l: length in units",
" -r: 'd' for resolution default, 'o' for resolution override",
" -p: paper size, eg \"letter\", \"legal\", \"A4\"",
" -F: make the tiff fill the PDF page",
" -f: set PDF \"Fit Window\" user preference",
" -e: date, overrides image or current date/time default, YYYYMMDDHHMMSS",
" -c: sets document creator, overrides image software default",
" -a: sets document author, overrides image artist default",
" -t: sets document title, overrides image document name default",
" -s: sets document subject, overrides image image description default",
" -k: sets document keywords",
" -b: set PDF \"Interpolate\" user preference",
" -h: usage",
NULL
};
int i=0;
fprintf(stderr, "%s\n\n", TIFFGetVersion());
for (i=0;lines[i]!=NULL;i++){
fprintf(stderr, "%s\n", lines[i]);
}
return;
}
int tiff2pdf_match_paper_size(float* width, float* length, char* papersize){
size_t i, len;
const char* sizes[]={
"LETTER", "A4", "LEGAL",
"EXECUTIVE", "LETTER", "LEGAL", "LEDGER", "TABLOID",
"A", "B", "C", "D", "E", "F", "G", "H", "J", "K",
"A10", "A9", "A8", "A7", "A6", "A5", "A4", "A3", "A2", "A1", "A0",
"2A0", "4A0", "2A", "4A",
"B10", "B9", "B8", "B7", "B6", "B5", "B4", "B3", "B2", "B1", "B0",
"JISB10", "JISB9", "JISB8", "JISB7", "JISB6", "JISB5", "JISB4",
"JISB3", "JISB2", "JISB1", "JISB0",
"C10", "C9", "C8", "C7", "C6", "C5", "C4", "C3", "C2", "C1", "C0",
"RA2", "RA1", "RA0", "SRA4", "SRA3", "SRA2", "SRA1", "SRA0",
"A3EXTRA", "A4EXTRA",
"STATEMENT", "FOLIO", "QUARTO",
NULL
} ;
const int widths[]={
612, 595, 612,
522, 612,612,792,792,
612,792,1224,1584,2448,2016,792,2016,2448,2880,
74,105,147,210,298,420,595,842,1191,1684,2384,3370,4768,3370,4768,
88,125,176,249,354,499,709,1001,1417,2004,2835,
91,128,181,258,363,516,729,1032,1460,2064,2920,
79,113,162,230,323,459,649,918,1298,1298,2599,
1219,1729,2438,638,907,1276,1814,2551,
914,667,
396, 612, 609,
0
};
const int lengths[]={
792,842,1008,
756,792,1008,1224,1224,
792,1224,1584,2448,3168,2880,6480,10296,12672,10296,
105,147,210,298,420,595,842,1191,1684,2384,3370,4768,6741,4768,6741,
125,176,249,354,499,709,1001,1417,2004,2835,4008,
128,181,258,363,516,729,1032,1460,2064,2920,4127,
113,162,230,323,459,649,918,1298,1837,1837,3677,
1729,2438,3458,907,1276,1814,2551,3628,
1262,914,
612, 936, 780,
0
};
len=strlen(papersize);
for(i=0;i<len;i++){
papersize[i]=toupper((int) papersize[i]);
}
for(i=0;sizes[i]!=NULL; i++){
if (strcmp( (const char*)papersize, sizes[i])==0){
*width=(float)widths[i];
*length=(float)lengths[i];
return(1);
}
}
return(0);
}
/*
* This function allocates and initializes a T2P context struct pointer.
*/
T2P* t2p_init()
{
T2P* t2p = (T2P*) _TIFFmalloc(sizeof(T2P));
if(t2p==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_init",
(unsigned long) sizeof(T2P));
return( (T2P*) NULL );
}
_TIFFmemset(t2p, 0x00, sizeof(T2P));
t2p->pdf_majorversion=1;
t2p->pdf_minorversion=1;
t2p->pdf_defaultxres=300.0;
t2p->pdf_defaultyres=300.0;
t2p->pdf_defaultpagewidth=612.0;
t2p->pdf_defaultpagelength=792.0;
t2p->pdf_xrefcount=3; /* Catalog, Info, Pages */
return(t2p);
}
/*
* This function frees a T2P context struct pointer and any allocated data fields of it.
*/
void t2p_free(T2P* t2p)
{
int i = 0;
if (t2p != NULL) {
if(t2p->pdf_xrefoffsets != NULL){
_TIFFfree( (tdata_t) t2p->pdf_xrefoffsets);
}
if(t2p->tiff_pages != NULL){
_TIFFfree( (tdata_t) t2p->tiff_pages);
}
for(i=0;i<t2p->tiff_pagecount;i++){
if(t2p->tiff_tiles[i].tiles_tiles != NULL){
_TIFFfree( (tdata_t) t2p->tiff_tiles[i].tiles_tiles);
}
}
if(t2p->tiff_tiles != NULL){
_TIFFfree( (tdata_t) t2p->tiff_tiles);
}
if(t2p->pdf_palette != NULL){
_TIFFfree( (tdata_t) t2p->pdf_palette);
}
#ifdef OJPEG_SUPPORT
if(t2p->pdf_ojpegdata != NULL){
_TIFFfree( (tdata_t) t2p->pdf_ojpegdata);
}
#endif
_TIFFfree( (tdata_t) t2p );
}
return;
}
/*
This function validates the values of a T2P context struct pointer
before calling t2p_write_pdf with it.
*/
void t2p_validate(T2P* t2p){
#ifdef JPEG_SUPPORT
if(t2p->pdf_defaultcompression==T2P_COMPRESS_JPEG){
if(t2p->pdf_defaultcompressionquality>100 ||
t2p->pdf_defaultcompressionquality<1){
t2p->pdf_defaultcompressionquality=0;
}
}
#endif
#ifdef ZIP_SUPPORT
if(t2p->pdf_defaultcompression==T2P_COMPRESS_ZIP){
uint16 m=t2p->pdf_defaultcompressionquality%100;
if(t2p->pdf_defaultcompressionquality/100 > 9 ||
(m>1 && m<10) || m>15){
t2p->pdf_defaultcompressionquality=0;
}
if(t2p->pdf_defaultcompressionquality%100 !=0){
t2p->pdf_defaultcompressionquality/=100;
t2p->pdf_defaultcompressionquality*=100;
TIFFError(
TIFF2PDF_MODULE,
"PNG Group predictor differencing not implemented, assuming compression quality %u",
t2p->pdf_defaultcompressionquality);
}
t2p->pdf_defaultcompressionquality%=100;
if(t2p->pdf_minorversion<2){t2p->pdf_minorversion=2;}
}
#endif
(void)0;
return;
}
/*
This function scans the input TIFF file for pages. It attempts
to determine which IFD's of the TIFF file contain image document
pages. For each, it gathers some information that has to do
with the output of the PDF document as a whole.
*/
void t2p_read_tiff_init(T2P* t2p, TIFF* input){
tdir_t directorycount=0;
tdir_t i=0;
uint16 pagen=0;
uint16 paged=0;
uint16 xuint16=0;
directorycount=TIFFNumberOfDirectories(input);
t2p->tiff_pages = (T2P_PAGE*) _TIFFmalloc(TIFFSafeMultiply(tmsize_t,directorycount,sizeof(T2P_PAGE)));
if(t2p->tiff_pages==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate " TIFF_SIZE_FORMAT " bytes of memory for tiff_pages array, %s",
(TIFF_SIZE_T) directorycount * sizeof(T2P_PAGE),
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
_TIFFmemset( t2p->tiff_pages, 0x00, directorycount * sizeof(T2P_PAGE));
t2p->tiff_tiles = (T2P_TILES*) _TIFFmalloc(TIFFSafeMultiply(tmsize_t,directorycount,sizeof(T2P_TILES)));
if(t2p->tiff_tiles==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate " TIFF_SIZE_FORMAT " bytes of memory for tiff_tiles array, %s",
(TIFF_SIZE_T) directorycount * sizeof(T2P_TILES),
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
_TIFFmemset( t2p->tiff_tiles, 0x00, directorycount * sizeof(T2P_TILES));
for(i=0;i<directorycount;i++){
uint32 subfiletype = 0;
if(!TIFFSetDirectory(input, i)){
TIFFError(
TIFF2PDF_MODULE,
"Can't set directory %u of input file %s",
i,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(TIFFGetField(input, TIFFTAG_PAGENUMBER, &pagen, &paged)){
if((pagen>paged) && (paged != 0)){
t2p->tiff_pages[t2p->tiff_pagecount].page_number =
paged;
} else {
t2p->tiff_pages[t2p->tiff_pagecount].page_number =
pagen;
}
goto ispage2;
}
if(TIFFGetField(input, TIFFTAG_SUBFILETYPE, &subfiletype)){
if ( ((subfiletype & FILETYPE_PAGE) != 0)
|| (subfiletype == 0)){
goto ispage;
} else {
goto isnotpage;
}
}
if(TIFFGetField(input, TIFFTAG_OSUBFILETYPE, &subfiletype)){
if ((subfiletype == OFILETYPE_IMAGE)
|| (subfiletype == OFILETYPE_PAGE)
|| (subfiletype == 0) ){
goto ispage;
} else {
goto isnotpage;
}
}
ispage:
t2p->tiff_pages[t2p->tiff_pagecount].page_number=t2p->tiff_pagecount;
ispage2:
t2p->tiff_pages[t2p->tiff_pagecount].page_directory=i;
if(TIFFIsTiled(input)){
t2p->tiff_pages[t2p->tiff_pagecount].page_tilecount =
TIFFNumberOfTiles(input);
}
t2p->tiff_pagecount++;
isnotpage:
(void)0;
}
qsort((void*) t2p->tiff_pages, t2p->tiff_pagecount,
sizeof(T2P_PAGE), t2p_cmp_t2p_page);
for(i=0;i<t2p->tiff_pagecount;i++){
t2p->pdf_xrefcount += 5;
TIFFSetDirectory(input, t2p->tiff_pages[i].page_directory );
if((TIFFGetField(input, TIFFTAG_PHOTOMETRIC, &xuint16)
&& (xuint16==PHOTOMETRIC_PALETTE))
|| TIFFGetField(input, TIFFTAG_INDEXED, &xuint16)) {
t2p->tiff_pages[i].page_extra++;
t2p->pdf_xrefcount++;
}
#ifdef ZIP_SUPPORT
if (TIFFGetField(input, TIFFTAG_COMPRESSION, &xuint16)) {
if( (xuint16== COMPRESSION_DEFLATE ||
xuint16== COMPRESSION_ADOBE_DEFLATE) &&
((t2p->tiff_pages[i].page_tilecount != 0)
|| TIFFNumberOfStrips(input)==1) &&
(t2p->pdf_nopassthrough==0) ){
if(t2p->pdf_minorversion<2){t2p->pdf_minorversion=2;}
}
}
#endif
if (TIFFGetField(input, TIFFTAG_TRANSFERFUNCTION,
&(t2p->tiff_transferfunction[0]),
&(t2p->tiff_transferfunction[1]),
&(t2p->tiff_transferfunction[2]))) {
if((t2p->tiff_transferfunction[1] != (float*) NULL) &&
(t2p->tiff_transferfunction[2] != (float*) NULL) &&
(t2p->tiff_transferfunction[1] !=
t2p->tiff_transferfunction[0])) {
t2p->tiff_transferfunctioncount = 3;
t2p->tiff_pages[i].page_extra += 4;
t2p->pdf_xrefcount += 4;
} else {
t2p->tiff_transferfunctioncount = 1;
t2p->tiff_pages[i].page_extra += 2;
t2p->pdf_xrefcount += 2;
}
if(t2p->pdf_minorversion < 2)
t2p->pdf_minorversion = 2;
} else {
t2p->tiff_transferfunctioncount=0;
}
if( TIFFGetField(
input,
TIFFTAG_ICCPROFILE,
&(t2p->tiff_iccprofilelength),
&(t2p->tiff_iccprofile)) != 0){
t2p->tiff_pages[i].page_extra++;
t2p->pdf_xrefcount++;
if(t2p->pdf_minorversion<3){t2p->pdf_minorversion=3;}
}
t2p->tiff_tiles[i].tiles_tilecount=
t2p->tiff_pages[i].page_tilecount;
if( (TIFFGetField(input, TIFFTAG_PLANARCONFIG, &xuint16) != 0)
&& (xuint16 == PLANARCONFIG_SEPARATE ) ){
if( !TIFFGetField(input, TIFFTAG_SAMPLESPERPIXEL, &xuint16) )
{
TIFFError(
TIFF2PDF_MODULE,
"Missing SamplesPerPixel, %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if( (t2p->tiff_tiles[i].tiles_tilecount % xuint16) != 0 )
{
TIFFError(
TIFF2PDF_MODULE,
"Invalid tile count, %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->tiff_tiles[i].tiles_tilecount/= xuint16;
}
if( t2p->tiff_tiles[i].tiles_tilecount > 0){
t2p->pdf_xrefcount +=
(t2p->tiff_tiles[i].tiles_tilecount -1)*2;
TIFFGetField(input,
TIFFTAG_TILEWIDTH,
&( t2p->tiff_tiles[i].tiles_tilewidth) );
TIFFGetField(input,
TIFFTAG_TILELENGTH,
&( t2p->tiff_tiles[i].tiles_tilelength) );
t2p->tiff_tiles[i].tiles_tiles =
(T2P_TILE*) _TIFFmalloc(TIFFSafeMultiply(tmsize_t,t2p->tiff_tiles[i].tiles_tilecount,
sizeof(T2P_TILE)) );
if( t2p->tiff_tiles[i].tiles_tiles == NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate " TIFF_SIZE_FORMAT " bytes of memory for t2p_read_tiff_init, %s",
(TIFF_SIZE_T) t2p->tiff_tiles[i].tiles_tilecount * sizeof(T2P_TILE),
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
}
}
return;
}
/*
* This function is used by qsort to sort a T2P_PAGE* array of page structures
* by page number. If the page numbers are the same, we fall back to comparing
* directory numbers to preserve the order of the input file.
*/
int t2p_cmp_t2p_page(const void* e1, const void* e2){
int d;
d = (int32)(((T2P_PAGE*)e1)->page_number) - (int32)(((T2P_PAGE*)e2)->page_number);
if(d == 0){
d = (int32)(((T2P_PAGE*)e1)->page_directory) - (int32)(((T2P_PAGE*)e2)->page_directory);
}
return d;
}
/*
This function sets the input directory to the directory of a given
page and determines information about the image. It checks
the image characteristics to determine if it is possible to convert
the image data into a page of PDF output, setting values of the T2P
struct for this page. It determines what color space is used in
the output PDF to represent the image.
It determines if the image can be converted as raw data without
requiring transcoding of the image data.
*/
void t2p_read_tiff_data(T2P* t2p, TIFF* input){
int i=0;
uint16* r;
uint16* g;
uint16* b;
uint16* a;
uint16 xuint16;
uint16* xuint16p;
float* xfloatp;
t2p->pdf_transcode = T2P_TRANSCODE_ENCODE;
t2p->pdf_sample = T2P_SAMPLE_NOTHING;
t2p->pdf_switchdecode = t2p->pdf_colorspace_invert;
TIFFSetDirectory(input, t2p->tiff_pages[t2p->pdf_page].page_directory);
TIFFGetField(input, TIFFTAG_IMAGEWIDTH, &(t2p->tiff_width));
if(t2p->tiff_width == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with zero width",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
TIFFGetField(input, TIFFTAG_IMAGELENGTH, &(t2p->tiff_length));
if(t2p->tiff_length == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with zero length",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(TIFFGetField(input, TIFFTAG_COMPRESSION, &(t2p->tiff_compression)) == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with no compression tag",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if( TIFFIsCODECConfigured(t2p->tiff_compression) == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with compression type %u: not configured",
TIFFFileName(input),
t2p->tiff_compression
);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
TIFFGetFieldDefaulted(input, TIFFTAG_BITSPERSAMPLE, &(t2p->tiff_bitspersample));
switch(t2p->tiff_bitspersample){
case 1:
case 2:
case 4:
case 8:
break;
case 0:
TIFFWarning(
TIFF2PDF_MODULE,
"Image %s has 0 bits per sample, assuming 1",
TIFFFileName(input));
t2p->tiff_bitspersample=1;
break;
default:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with %u bits per sample",
TIFFFileName(input),
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
TIFFGetFieldDefaulted(input, TIFFTAG_SAMPLESPERPIXEL, &(t2p->tiff_samplesperpixel));
if(t2p->tiff_samplesperpixel>4){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with %u samples per pixel",
TIFFFileName(input),
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(t2p->tiff_samplesperpixel==0){
TIFFWarning(
TIFF2PDF_MODULE,
"Image %s has 0 samples per pixel, assuming 1",
TIFFFileName(input));
t2p->tiff_samplesperpixel=1;
}
if(TIFFGetField(input, TIFFTAG_SAMPLEFORMAT, &xuint16) != 0 ){
switch(xuint16){
case 0:
case 1:
case 4:
break;
default:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with sample format %u",
TIFFFileName(input),
xuint16);
t2p->t2p_error = T2P_ERR_ERROR;
return;
break;
}
}
TIFFGetFieldDefaulted(input, TIFFTAG_FILLORDER, &(t2p->tiff_fillorder));
if(TIFFGetField(input, TIFFTAG_PHOTOMETRIC, &(t2p->tiff_photometric)) == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with no photometric interpretation tag",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
switch(t2p->tiff_photometric){
case PHOTOMETRIC_MINISWHITE:
case PHOTOMETRIC_MINISBLACK:
if (t2p->tiff_bitspersample==1){
t2p->pdf_colorspace=T2P_CS_BILEVEL;
if(t2p->tiff_photometric==PHOTOMETRIC_MINISWHITE){
t2p->pdf_switchdecode ^= 1;
}
} else {
t2p->pdf_colorspace=T2P_CS_GRAY;
if(t2p->tiff_photometric==PHOTOMETRIC_MINISWHITE){
t2p->pdf_switchdecode ^= 1;
}
}
break;
case PHOTOMETRIC_RGB:
t2p->pdf_colorspace=T2P_CS_RGB;
if(t2p->tiff_samplesperpixel == 3){
break;
}
if(TIFFGetField(input, TIFFTAG_INDEXED, &xuint16)){
if(xuint16==1)
goto photometric_palette;
}
if(t2p->tiff_samplesperpixel > 3) {
if(t2p->tiff_samplesperpixel == 4) {
t2p->pdf_colorspace = T2P_CS_RGB;
if(TIFFGetField(input,
TIFFTAG_EXTRASAMPLES,
&xuint16, &xuint16p)
&& xuint16 == 1) {
if(xuint16p[0] == EXTRASAMPLE_ASSOCALPHA){
if( t2p->tiff_bitspersample != 8 )
{
TIFFError(
TIFF2PDF_MODULE,
"No support for BitsPerSample=%d for RGBA",
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_sample=T2P_SAMPLE_RGBAA_TO_RGB;
break;
}
if(xuint16p[0] == EXTRASAMPLE_UNASSALPHA){
if( t2p->tiff_bitspersample != 8 )
{
TIFFError(
TIFF2PDF_MODULE,
"No support for BitsPerSample=%d for RGBA",
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_sample=T2P_SAMPLE_RGBA_TO_RGB;
break;
}
TIFFWarning(
TIFF2PDF_MODULE,
"RGB image %s has 4 samples per pixel, assuming RGBA",
TIFFFileName(input));
break;
}
t2p->pdf_colorspace=T2P_CS_CMYK;
t2p->pdf_switchdecode ^= 1;
TIFFWarning(
TIFF2PDF_MODULE,
"RGB image %s has 4 samples per pixel, assuming inverse CMYK",
TIFFFileName(input));
break;
} else {
TIFFError(
TIFF2PDF_MODULE,
"No support for RGB image %s with %u samples per pixel",
TIFFFileName(input),
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
break;
}
} else {
TIFFError(
TIFF2PDF_MODULE,
"No support for RGB image %s with %u samples per pixel",
TIFFFileName(input),
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
break;
}
case PHOTOMETRIC_PALETTE:
photometric_palette:
if(t2p->tiff_samplesperpixel!=1){
TIFFError(
TIFF2PDF_MODULE,
"No support for palettized image %s with not one sample per pixel",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_colorspace=T2P_CS_RGB | T2P_CS_PALETTE;
t2p->pdf_palettesize=0x0001<<t2p->tiff_bitspersample;
if(!TIFFGetField(input, TIFFTAG_COLORMAP, &r, &g, &b)){
TIFFError(
TIFF2PDF_MODULE,
"Palettized image %s has no color map",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(t2p->pdf_palette != NULL){
_TIFFfree(t2p->pdf_palette);
t2p->pdf_palette=NULL;
}
t2p->pdf_palette = (unsigned char*)
_TIFFmalloc(TIFFSafeMultiply(tmsize_t,t2p->pdf_palettesize,3));
if(t2p->pdf_palette==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate %u bytes of memory for t2p_read_tiff_image, %s",
t2p->pdf_palettesize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
for(i=0;i<t2p->pdf_palettesize;i++){
t2p->pdf_palette[(i*3)] = (unsigned char) (r[i]>>8);
t2p->pdf_palette[(i*3)+1]= (unsigned char) (g[i]>>8);
t2p->pdf_palette[(i*3)+2]= (unsigned char) (b[i]>>8);
}
t2p->pdf_palettesize *= 3;
break;
case PHOTOMETRIC_SEPARATED:
if(TIFFGetField(input, TIFFTAG_INDEXED, &xuint16)){
if(xuint16==1){
goto photometric_palette_cmyk;
}
}
if( TIFFGetField(input, TIFFTAG_INKSET, &xuint16) ){
if(xuint16 != INKSET_CMYK){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s because its inkset is not CMYK",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
}
if(t2p->tiff_samplesperpixel==4){
t2p->pdf_colorspace=T2P_CS_CMYK;
} else {
TIFFError(
TIFF2PDF_MODULE,
"No support for %s because it has %u samples per pixel",
TIFFFileName(input),
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
break;
photometric_palette_cmyk:
if(t2p->tiff_samplesperpixel!=1){
TIFFError(
TIFF2PDF_MODULE,
"No support for palettized CMYK image %s with not one sample per pixel",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_colorspace=T2P_CS_CMYK | T2P_CS_PALETTE;
t2p->pdf_palettesize=0x0001<<t2p->tiff_bitspersample;
if(!TIFFGetField(input, TIFFTAG_COLORMAP, &r, &g, &b, &a)){
TIFFError(
TIFF2PDF_MODULE,
"Palettized image %s has no color map",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(t2p->pdf_palette != NULL){
_TIFFfree(t2p->pdf_palette);
t2p->pdf_palette=NULL;
}
t2p->pdf_palette = (unsigned char*)
_TIFFmalloc(TIFFSafeMultiply(tmsize_t,t2p->pdf_palettesize,4));
if(t2p->pdf_palette==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate %u bytes of memory for t2p_read_tiff_image, %s",
t2p->pdf_palettesize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
for(i=0;i<t2p->pdf_palettesize;i++){
t2p->pdf_palette[(i*4)] = (unsigned char) (r[i]>>8);
t2p->pdf_palette[(i*4)+1]= (unsigned char) (g[i]>>8);
t2p->pdf_palette[(i*4)+2]= (unsigned char) (b[i]>>8);
t2p->pdf_palette[(i*4)+3]= (unsigned char) (a[i]>>8);
}
t2p->pdf_palettesize *= 4;
break;
case PHOTOMETRIC_YCBCR:
t2p->pdf_colorspace=T2P_CS_RGB;
if(t2p->tiff_samplesperpixel==1){
t2p->pdf_colorspace=T2P_CS_GRAY;
t2p->tiff_photometric=PHOTOMETRIC_MINISBLACK;
break;
}
t2p->pdf_sample=T2P_SAMPLE_YCBCR_TO_RGB;
#ifdef JPEG_SUPPORT
if(t2p->pdf_defaultcompression==T2P_COMPRESS_JPEG){
t2p->pdf_sample=T2P_SAMPLE_NOTHING;
}
#endif
break;
case PHOTOMETRIC_CIELAB:
if( t2p->tiff_samplesperpixel != 3){
TIFFError(
TIFF2PDF_MODULE,
"Unsupported samplesperpixel = %d for CIELAB",
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if( t2p->tiff_bitspersample != 8){
TIFFError(
TIFF2PDF_MODULE,
"Invalid bitspersample = %d for CIELAB",
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_labrange[0]= -127;
t2p->pdf_labrange[1]= 127;
t2p->pdf_labrange[2]= -127;
t2p->pdf_labrange[3]= 127;
t2p->pdf_sample=T2P_SAMPLE_LAB_SIGNED_TO_UNSIGNED;
t2p->pdf_colorspace=T2P_CS_LAB;
break;
case PHOTOMETRIC_ICCLAB:
t2p->pdf_labrange[0]= 0;
t2p->pdf_labrange[1]= 255;
t2p->pdf_labrange[2]= 0;
t2p->pdf_labrange[3]= 255;
t2p->pdf_colorspace=T2P_CS_LAB;
break;
case PHOTOMETRIC_ITULAB:
if( t2p->tiff_samplesperpixel != 3){
TIFFError(
TIFF2PDF_MODULE,
"Unsupported samplesperpixel = %d for ITULAB",
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if( t2p->tiff_bitspersample != 8){
TIFFError(
TIFF2PDF_MODULE,
"Invalid bitspersample = %d for ITULAB",
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_labrange[0]=-85;
t2p->pdf_labrange[1]=85;
t2p->pdf_labrange[2]=-75;
t2p->pdf_labrange[3]=124;
t2p->pdf_sample=T2P_SAMPLE_LAB_SIGNED_TO_UNSIGNED;
t2p->pdf_colorspace=T2P_CS_LAB;
break;
case PHOTOMETRIC_LOGL:
case PHOTOMETRIC_LOGLUV:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with photometric interpretation LogL/LogLuv",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
default:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with photometric interpretation %u",
TIFFFileName(input),
t2p->tiff_photometric);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(TIFFGetField(input, TIFFTAG_PLANARCONFIG, &(t2p->tiff_planar))){
switch(t2p->tiff_planar){
case 0:
TIFFWarning(
TIFF2PDF_MODULE,
"Image %s has planar configuration 0, assuming 1",
TIFFFileName(input));
t2p->tiff_planar=PLANARCONFIG_CONTIG;
case PLANARCONFIG_CONTIG:
break;
case PLANARCONFIG_SEPARATE:
t2p->pdf_sample=T2P_SAMPLE_PLANAR_SEPARATE_TO_CONTIG;
if(t2p->tiff_bitspersample!=8){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with separated planar configuration and %u bits per sample",
TIFFFileName(input),
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
break;
default:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with planar configuration %u",
TIFFFileName(input),
t2p->tiff_planar);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
}
TIFFGetFieldDefaulted(input, TIFFTAG_ORIENTATION,
&(t2p->tiff_orientation));
if(t2p->tiff_orientation>8){
TIFFWarning(TIFF2PDF_MODULE,
"Image %s has orientation %u, assuming 0",
TIFFFileName(input), t2p->tiff_orientation);
t2p->tiff_orientation=0;
}
if(TIFFGetField(input, TIFFTAG_XRESOLUTION, &(t2p->tiff_xres) ) == 0){
t2p->tiff_xres=0.0;
}
if(TIFFGetField(input, TIFFTAG_YRESOLUTION, &(t2p->tiff_yres) ) == 0){
t2p->tiff_yres=0.0;
}
TIFFGetFieldDefaulted(input, TIFFTAG_RESOLUTIONUNIT,
&(t2p->tiff_resunit));
if(t2p->tiff_resunit == RESUNIT_CENTIMETER) {
t2p->tiff_xres *= 2.54F;
t2p->tiff_yres *= 2.54F;
} else if (t2p->tiff_resunit != RESUNIT_INCH
&& t2p->pdf_centimeters != 0) {
t2p->tiff_xres *= 2.54F;
t2p->tiff_yres *= 2.54F;
}
t2p_compose_pdf_page(t2p);
if( t2p->t2p_error == T2P_ERR_ERROR )
return;
t2p->pdf_transcode = T2P_TRANSCODE_ENCODE;
if(t2p->pdf_nopassthrough==0){
#ifdef CCITT_SUPPORT
if(t2p->tiff_compression==COMPRESSION_CCITTFAX4
){
if(TIFFIsTiled(input) || (TIFFNumberOfStrips(input)==1) ){
t2p->pdf_transcode = T2P_TRANSCODE_RAW;
t2p->pdf_compression=T2P_COMPRESS_G4;
}
}
#endif
#ifdef ZIP_SUPPORT
if(t2p->tiff_compression== COMPRESSION_ADOBE_DEFLATE
|| t2p->tiff_compression==COMPRESSION_DEFLATE){
if(TIFFIsTiled(input) || (TIFFNumberOfStrips(input)==1) ){
t2p->pdf_transcode = T2P_TRANSCODE_RAW;
t2p->pdf_compression=T2P_COMPRESS_ZIP;
}
}
#endif
#ifdef OJPEG_SUPPORT
if(t2p->tiff_compression==COMPRESSION_OJPEG){
t2p->pdf_transcode = T2P_TRANSCODE_RAW;
t2p->pdf_compression=T2P_COMPRESS_JPEG;
t2p_process_ojpeg_tables(t2p, input);
}
#endif
#ifdef JPEG_SUPPORT
if(t2p->tiff_compression==COMPRESSION_JPEG){
t2p->pdf_transcode = T2P_TRANSCODE_RAW;
t2p->pdf_compression=T2P_COMPRESS_JPEG;
}
#endif
(void)0;
}
if(t2p->pdf_transcode!=T2P_TRANSCODE_RAW){
t2p->pdf_compression = t2p->pdf_defaultcompression;
}
#ifdef JPEG_SUPPORT
if(t2p->pdf_defaultcompression==T2P_COMPRESS_JPEG){
if(t2p->pdf_colorspace & T2P_CS_PALETTE){
t2p->pdf_sample|=T2P_SAMPLE_REALIZE_PALETTE;
t2p->pdf_colorspace ^= T2P_CS_PALETTE;
t2p->tiff_pages[t2p->pdf_page].page_extra--;
}
}
if(t2p->tiff_compression==COMPRESSION_JPEG){
if(t2p->tiff_planar==PLANARCONFIG_SEPARATE){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with JPEG compression and separated planar configuration",
TIFFFileName(input));
t2p->t2p_error=T2P_ERR_ERROR;
return;
}
}
#endif
#ifdef OJPEG_SUPPORT
if(t2p->tiff_compression==COMPRESSION_OJPEG){
if(t2p->tiff_planar==PLANARCONFIG_SEPARATE){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with OJPEG compression and separated planar configuration",
TIFFFileName(input));
t2p->t2p_error=T2P_ERR_ERROR;
return;
}
}
#endif
if(t2p->pdf_sample & T2P_SAMPLE_REALIZE_PALETTE){
if(t2p->pdf_colorspace & T2P_CS_CMYK){
t2p->tiff_samplesperpixel=4;
t2p->tiff_photometric=PHOTOMETRIC_SEPARATED;
} else {
t2p->tiff_samplesperpixel=3;
t2p->tiff_photometric=PHOTOMETRIC_RGB;
}
}
if (TIFFGetField(input, TIFFTAG_TRANSFERFUNCTION,
&(t2p->tiff_transferfunction[0]),
&(t2p->tiff_transferfunction[1]),
&(t2p->tiff_transferfunction[2]))) {
if((t2p->tiff_transferfunction[1] != (float*) NULL) &&
(t2p->tiff_transferfunction[2] != (float*) NULL) &&
(t2p->tiff_transferfunction[1] !=
t2p->tiff_transferfunction[0])) {
t2p->tiff_transferfunctioncount=3;
} else {
t2p->tiff_transferfunctioncount=1;
}
} else {
t2p->tiff_transferfunctioncount=0;
}
if(TIFFGetField(input, TIFFTAG_WHITEPOINT, &xfloatp)!=0){
t2p->tiff_whitechromaticities[0]=xfloatp[0];
t2p->tiff_whitechromaticities[1]=xfloatp[1];
if(t2p->pdf_colorspace & T2P_CS_GRAY){
t2p->pdf_colorspace |= T2P_CS_CALGRAY;
}
if(t2p->pdf_colorspace & T2P_CS_RGB){
t2p->pdf_colorspace |= T2P_CS_CALRGB;
}
}
if(TIFFGetField(input, TIFFTAG_PRIMARYCHROMATICITIES, &xfloatp)!=0){
t2p->tiff_primarychromaticities[0]=xfloatp[0];
t2p->tiff_primarychromaticities[1]=xfloatp[1];
t2p->tiff_primarychromaticities[2]=xfloatp[2];
t2p->tiff_primarychromaticities[3]=xfloatp[3];
t2p->tiff_primarychromaticities[4]=xfloatp[4];
t2p->tiff_primarychromaticities[5]=xfloatp[5];
if(t2p->pdf_colorspace & T2P_CS_RGB){
t2p->pdf_colorspace |= T2P_CS_CALRGB;
}
}
if(t2p->pdf_colorspace & T2P_CS_LAB){
if(TIFFGetField(input, TIFFTAG_WHITEPOINT, &xfloatp) != 0){
t2p->tiff_whitechromaticities[0]=xfloatp[0];
t2p->tiff_whitechromaticities[1]=xfloatp[1];
} else {
t2p->tiff_whitechromaticities[0]=0.3457F; /* 0.3127F; */
t2p->tiff_whitechromaticities[1]=0.3585F; /* 0.3290F; */
}
}
if(TIFFGetField(input,
TIFFTAG_ICCPROFILE,
&(t2p->tiff_iccprofilelength),
&(t2p->tiff_iccprofile))!=0){
t2p->pdf_colorspace |= T2P_CS_ICCBASED;
} else {
t2p->tiff_iccprofilelength=0;
t2p->tiff_iccprofile=NULL;
}
#ifdef CCITT_SUPPORT
if( t2p->tiff_bitspersample==1 &&
t2p->tiff_samplesperpixel==1){
t2p->pdf_compression = T2P_COMPRESS_G4;
}
#endif
return;
}
/*
This function returns the necessary size of a data buffer to contain the raw or
uncompressed image data from the input TIFF for a page.
*/
void t2p_read_tiff_size(T2P* t2p, TIFF* input){
uint64* sbc=NULL;
#if defined(JPEG_SUPPORT) || defined (OJPEG_SUPPORT)
unsigned char* jpt=NULL;
tstrip_t i=0;
tstrip_t stripcount=0;
#endif
uint64 k = 0;
if(t2p->pdf_transcode == T2P_TRANSCODE_RAW){
#ifdef CCITT_SUPPORT
if(t2p->pdf_compression == T2P_COMPRESS_G4 ){
TIFFGetField(input, TIFFTAG_STRIPBYTECOUNTS, &sbc);
if (sbc[0] != (uint64)(tmsize_t)sbc[0]) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
t2p->tiff_datasize=(tmsize_t)sbc[0];
return;
}
#endif
#ifdef ZIP_SUPPORT
if(t2p->pdf_compression == T2P_COMPRESS_ZIP){
TIFFGetField(input, TIFFTAG_STRIPBYTECOUNTS, &sbc);
if (sbc[0] != (uint64)(tmsize_t)sbc[0]) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
t2p->tiff_datasize=(tmsize_t)sbc[0];
return;
}
#endif
#ifdef OJPEG_SUPPORT
if(t2p->tiff_compression == COMPRESSION_OJPEG){
if(!TIFFGetField(input, TIFFTAG_STRIPBYTECOUNTS, &sbc)){
TIFFError(TIFF2PDF_MODULE,
"Input file %s missing field: TIFFTAG_STRIPBYTECOUNTS",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
stripcount=TIFFNumberOfStrips(input);
for(i=0;i<stripcount;i++){
k = checkAdd64(k, sbc[i], t2p);
}
if(TIFFGetField(input, TIFFTAG_JPEGIFOFFSET, &(t2p->tiff_dataoffset))){
if(t2p->tiff_dataoffset != 0){
if(TIFFGetField(input, TIFFTAG_JPEGIFBYTECOUNT, &(t2p->tiff_datasize))!=0){
if((uint64)t2p->tiff_datasize < k) {
TIFFWarning(TIFF2PDF_MODULE,
"Input file %s has short JPEG interchange file byte count",
TIFFFileName(input));
t2p->pdf_ojpegiflength=t2p->tiff_datasize;
k = checkAdd64(k, t2p->tiff_datasize, t2p);
k = checkAdd64(k, 6, t2p);
k = checkAdd64(k, stripcount, t2p);
k = checkAdd64(k, stripcount, t2p);
t2p->tiff_datasize = (tsize_t) k;
if ((uint64) t2p->tiff_datasize != k) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
return;
}
return;
}else {
TIFFError(TIFF2PDF_MODULE,
"Input file %s missing field: TIFFTAG_JPEGIFBYTECOUNT",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
}
}
k = checkAdd64(k, stripcount, t2p);
k = checkAdd64(k, stripcount, t2p);
k = checkAdd64(k, 2048, t2p);
t2p->tiff_datasize = (tsize_t) k;
if ((uint64) t2p->tiff_datasize != k) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
return;
}
#endif
#ifdef JPEG_SUPPORT
if(t2p->tiff_compression == COMPRESSION_JPEG) {
uint32 count = 0;
if(TIFFGetField(input, TIFFTAG_JPEGTABLES, &count, &jpt) != 0 ){
if(count > 4){
k += count;
k -= 2; /* don't use EOI of header */
}
} else {
k = 2; /* SOI for first strip */
}
stripcount=TIFFNumberOfStrips(input);
if(!TIFFGetField(input, TIFFTAG_STRIPBYTECOUNTS, &sbc)){
TIFFError(TIFF2PDF_MODULE,
"Input file %s missing field: TIFFTAG_STRIPBYTECOUNTS",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
for(i=0;i<stripcount;i++){
k = checkAdd64(k, sbc[i], t2p);
k -=2; /* don't use EOI of strip */
k +=2; /* add space for restart marker */
}
k = checkAdd64(k, 2, t2p); /* use EOI of last strip */
k = checkAdd64(k, 6, t2p); /* for DRI marker of first strip */
t2p->tiff_datasize = (tsize_t) k;
if ((uint64) t2p->tiff_datasize != k) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
return;
}
#endif
(void) 0;
}
k = checkMultiply64(TIFFScanlineSize(input), t2p->tiff_length, t2p);
if(t2p->tiff_planar==PLANARCONFIG_SEPARATE){
k = checkMultiply64(k, t2p->tiff_samplesperpixel, t2p);
}
if (k == 0) {
/* Assume we had overflow inside TIFFScanlineSize */
t2p->t2p_error = T2P_ERR_ERROR;
}
t2p->tiff_datasize = (tsize_t) k;
if ((uint64) t2p->tiff_datasize != k) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
return;
}
/*
This function returns the necessary size of a data buffer to contain the raw or
uncompressed image data from the input TIFF for a tile of a page.
*/
void t2p_read_tiff_size_tile(T2P* t2p, TIFF* input, ttile_t tile){
uint64* tbc = NULL;
uint16 edge=0;
#ifdef JPEG_SUPPORT
unsigned char* jpt;
#endif
uint64 k;
edge |= t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile);
edge |= t2p_tile_is_bottom_edge(t2p->tiff_tiles[t2p->pdf_page], tile);
if(t2p->pdf_transcode==T2P_TRANSCODE_RAW){
if(edge
#if defined(JPEG_SUPPORT) || defined(OJPEG_SUPPORT)
&& !(t2p->pdf_compression==T2P_COMPRESS_JPEG)
#endif
){
t2p->tiff_datasize=TIFFTileSize(input);
if (t2p->tiff_datasize == 0) {
/* Assume we had overflow inside TIFFTileSize */
t2p->t2p_error = T2P_ERR_ERROR;
}
return;
} else {
TIFFGetField(input, TIFFTAG_TILEBYTECOUNTS, &tbc);
k=tbc[tile];
#ifdef OJPEG_SUPPORT
if(t2p->tiff_compression==COMPRESSION_OJPEG){
k = checkAdd64(k, 2048, t2p);
}
#endif
#ifdef JPEG_SUPPORT
if(t2p->tiff_compression==COMPRESSION_JPEG) {
uint32 count = 0;
if(TIFFGetField(input, TIFFTAG_JPEGTABLES, &count, &jpt)!=0){
if(count > 4){
k = checkAdd64(k, count, t2p);
k -= 2; /* don't use EOI of header or SOI of tile */
}
}
}
#endif
t2p->tiff_datasize = (tsize_t) k;
if ((uint64) t2p->tiff_datasize != k) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
return;
}
}
k = TIFFTileSize(input);
if(t2p->tiff_planar==PLANARCONFIG_SEPARATE){
k = checkMultiply64(k, t2p->tiff_samplesperpixel, t2p);
}
if (k == 0) {
/* Assume we had overflow inside TIFFTileSize */
t2p->t2p_error = T2P_ERR_ERROR;
}
t2p->tiff_datasize = (tsize_t) k;
if ((uint64) t2p->tiff_datasize != k) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
return;
}
/*
* This functions returns a non-zero value when the tile is on the right edge
* and does not have full imaged tile width.
*/
int t2p_tile_is_right_edge(T2P_TILES tiles, ttile_t tile){
if( ((tile+1) % tiles.tiles_tilecountx == 0)
&& (tiles.tiles_edgetilewidth != 0) ){
return(1);
} else {
return(0);
}
}
/*
* This functions returns a non-zero value when the tile is on the bottom edge
* and does not have full imaged tile length.
*/
int t2p_tile_is_bottom_edge(T2P_TILES tiles, ttile_t tile){
if( ((tile+1) > (tiles.tiles_tilecount-tiles.tiles_tilecountx) )
&& (tiles.tiles_edgetilelength != 0) ){
return(1);
} else {
return(0);
}
}
/*
* This function returns a non-zero value when the tile is a right edge tile
* or a bottom edge tile.
*/
int t2p_tile_is_edge(T2P_TILES tiles, ttile_t tile){
return(t2p_tile_is_right_edge(tiles, tile) | t2p_tile_is_bottom_edge(tiles, tile) );
}
/*
This function returns a non-zero value when the tile is a right edge tile and a bottom
edge tile.
*/
int t2p_tile_is_corner_edge(T2P_TILES tiles, ttile_t tile){
return(t2p_tile_is_right_edge(tiles, tile) & t2p_tile_is_bottom_edge(tiles, tile) );
}
/*
This function reads the raster image data from the input TIFF for an image and writes
the data to the output PDF XObject image dictionary stream. It returns the amount written
or zero on error.
*/
tsize_t t2p_readwrite_pdf_image(T2P* t2p, TIFF* input, TIFF* output){
tsize_t written=0;
unsigned char* buffer=NULL;
unsigned char* samplebuffer=NULL;
tsize_t bufferoffset=0;
tsize_t samplebufferoffset=0;
tsize_t read=0;
tstrip_t i=0;
tstrip_t j=0;
tstrip_t stripcount=0;
tsize_t stripsize=0;
tsize_t sepstripcount=0;
tsize_t sepstripsize=0;
#ifdef OJPEG_SUPPORT
toff_t inputoffset=0;
uint16 h_samp=1;
uint16 v_samp=1;
uint16 ri=1;
uint32 rows=0;
#endif /* ifdef OJPEG_SUPPORT */
#ifdef JPEG_SUPPORT
unsigned char* jpt;
float* xfloatp;
uint64* sbc;
unsigned char* stripbuffer;
tsize_t striplength=0;
uint32 max_striplength=0;
#endif /* ifdef JPEG_SUPPORT */
/* Fail if prior error (in particular, can't trust tiff_datasize) */
if (t2p->t2p_error != T2P_ERR_OK)
return(0);
if(t2p->pdf_transcode == T2P_TRANSCODE_RAW){
#ifdef CCITT_SUPPORT
if(t2p->pdf_compression == T2P_COMPRESS_G4){
buffer = (unsigned char*)
_TIFFmalloc(t2p->tiff_datasize);
if (buffer == NULL) {
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for "
"t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
TIFFReadRawStrip(input, 0, (tdata_t) buffer,
t2p->tiff_datasize);
if (t2p->tiff_fillorder==FILLORDER_LSB2MSB){
/*
* make sure is lsb-to-msb
* bit-endianness fill order
*/
TIFFReverseBits(buffer,
t2p->tiff_datasize);
}
t2pWriteFile(output, (tdata_t) buffer,
t2p->tiff_datasize);
_TIFFfree(buffer);
return(t2p->tiff_datasize);
}
#endif /* ifdef CCITT_SUPPORT */
#ifdef ZIP_SUPPORT
if (t2p->pdf_compression == T2P_COMPRESS_ZIP) {
buffer = (unsigned char*)
_TIFFmalloc(t2p->tiff_datasize);
if(buffer == NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
memset(buffer, 0, t2p->tiff_datasize);
TIFFReadRawStrip(input, 0, (tdata_t) buffer,
t2p->tiff_datasize);
if (t2p->tiff_fillorder==FILLORDER_LSB2MSB) {
TIFFReverseBits(buffer,
t2p->tiff_datasize);
}
t2pWriteFile(output, (tdata_t) buffer,
t2p->tiff_datasize);
_TIFFfree(buffer);
return(t2p->tiff_datasize);
}
#endif /* ifdef ZIP_SUPPORT */
#ifdef OJPEG_SUPPORT
if(t2p->tiff_compression == COMPRESSION_OJPEG) {
if(t2p->tiff_dataoffset != 0) {
buffer = (unsigned char*)
_TIFFmalloc(t2p->tiff_datasize);
if(buffer == NULL) {
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
memset(buffer, 0, t2p->tiff_datasize);
if(t2p->pdf_ojpegiflength==0){
inputoffset=t2pSeekFile(input, 0,
SEEK_CUR);
t2pSeekFile(input,
t2p->tiff_dataoffset,
SEEK_SET);
t2pReadFile(input, (tdata_t) buffer,
t2p->tiff_datasize);
t2pSeekFile(input, inputoffset,
SEEK_SET);
t2pWriteFile(output, (tdata_t) buffer,
t2p->tiff_datasize);
_TIFFfree(buffer);
return(t2p->tiff_datasize);
} else {
inputoffset=t2pSeekFile(input, 0,
SEEK_CUR);
t2pSeekFile(input,
t2p->tiff_dataoffset,
SEEK_SET);
bufferoffset = t2pReadFile(input,
(tdata_t) buffer,
t2p->pdf_ojpegiflength);
t2p->pdf_ojpegiflength = 0;
t2pSeekFile(input, inputoffset,
SEEK_SET);
TIFFGetField(input,
TIFFTAG_YCBCRSUBSAMPLING,
&h_samp, &v_samp);
buffer[bufferoffset++]= 0xff;
buffer[bufferoffset++]= 0xdd;
buffer[bufferoffset++]= 0x00;
buffer[bufferoffset++]= 0x04;
h_samp*=8;
v_samp*=8;
ri=(t2p->tiff_width+h_samp-1) / h_samp;
TIFFGetField(input,
TIFFTAG_ROWSPERSTRIP,
&rows);
ri*=(rows+v_samp-1)/v_samp;
buffer[bufferoffset++]= (ri>>8) & 0xff;
buffer[bufferoffset++]= ri & 0xff;
stripcount=TIFFNumberOfStrips(input);
for(i=0;i<stripcount;i++){
if(i != 0 ){
buffer[bufferoffset++]=0xff;
buffer[bufferoffset++]=(0xd0 | ((i-1)%8));
}
bufferoffset+=TIFFReadRawStrip(input,
i,
(tdata_t) &(((unsigned char*)buffer)[bufferoffset]),
-1);
}
t2pWriteFile(output, (tdata_t) buffer, bufferoffset);
_TIFFfree(buffer);
return(bufferoffset);
}
} else {
if(! t2p->pdf_ojpegdata){
TIFFError(TIFF2PDF_MODULE,
"No support for OJPEG image %s with bad tables",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
buffer = (unsigned char*)
_TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
memset(buffer, 0, t2p->tiff_datasize);
_TIFFmemcpy(buffer, t2p->pdf_ojpegdata, t2p->pdf_ojpegdatalength);
bufferoffset=t2p->pdf_ojpegdatalength;
stripcount=TIFFNumberOfStrips(input);
for(i=0;i<stripcount;i++){
if(i != 0){
buffer[bufferoffset++]=0xff;
buffer[bufferoffset++]=(0xd0 | ((i-1)%8));
}
bufferoffset+=TIFFReadRawStrip(input,
i,
(tdata_t) &(((unsigned char*)buffer)[bufferoffset]),
-1);
}
if( ! ( (buffer[bufferoffset-1]==0xd9) && (buffer[bufferoffset-2]==0xff) ) ){
buffer[bufferoffset++]=0xff;
buffer[bufferoffset++]=0xd9;
}
t2pWriteFile(output, (tdata_t) buffer, bufferoffset);
_TIFFfree(buffer);
return(bufferoffset);
#if 0
/*
This hunk of code removed code is clearly
mis-placed and we are not sure where it
should be (if anywhere)
*/
TIFFError(TIFF2PDF_MODULE,
"No support for OJPEG image %s with no JPEG File Interchange offset",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
#endif
}
}
#endif /* ifdef OJPEG_SUPPORT */
#ifdef JPEG_SUPPORT
if(t2p->tiff_compression == COMPRESSION_JPEG) {
uint32 count = 0;
buffer = (unsigned char*)
_TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
memset(buffer, 0, t2p->tiff_datasize);
if (TIFFGetField(input, TIFFTAG_JPEGTABLES, &count, &jpt) != 0) {
if(count > 4) {
_TIFFmemcpy(buffer, jpt, count);
bufferoffset += count - 2;
}
}
stripcount=TIFFNumberOfStrips(input);
TIFFGetField(input, TIFFTAG_STRIPBYTECOUNTS, &sbc);
for(i=0;i<stripcount;i++){
if(sbc[i]>max_striplength) max_striplength=sbc[i];
}
stripbuffer = (unsigned char*)
_TIFFmalloc(max_striplength);
if(stripbuffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %u bytes of memory for t2p_readwrite_pdf_image, %s",
max_striplength,
TIFFFileName(input));
_TIFFfree(buffer);
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
for(i=0;i<stripcount;i++){
striplength=TIFFReadRawStrip(input, i, (tdata_t) stripbuffer, -1);
if(!t2p_process_jpeg_strip(
stripbuffer,
&striplength,
buffer,
t2p->tiff_datasize,
&bufferoffset,
i,
t2p->tiff_length)){
TIFFError(TIFF2PDF_MODULE,
"Can't process JPEG data in input file %s",
TIFFFileName(input));
_TIFFfree(samplebuffer);
_TIFFfree(buffer);
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
}
buffer[bufferoffset++]=0xff;
buffer[bufferoffset++]=0xd9;
t2pWriteFile(output, (tdata_t) buffer, bufferoffset);
_TIFFfree(stripbuffer);
_TIFFfree(buffer);
return(bufferoffset);
}
#endif /* ifdef JPEG_SUPPORT */
(void)0;
}
if(t2p->pdf_sample==T2P_SAMPLE_NOTHING){
buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
memset(buffer, 0, t2p->tiff_datasize);
stripsize=TIFFStripSize(input);
stripcount=TIFFNumberOfStrips(input);
for(i=0;i<stripcount;i++){
read =
TIFFReadEncodedStrip(input,
i,
(tdata_t) &buffer[bufferoffset],
TIFFmin(stripsize, t2p->tiff_datasize - bufferoffset));
if(read==-1){
TIFFError(TIFF2PDF_MODULE,
"Error on decoding strip %u of %s",
i,
TIFFFileName(input));
_TIFFfree(buffer);
t2p->t2p_error=T2P_ERR_ERROR;
return(0);
}
bufferoffset+=read;
}
} else {
if(t2p->pdf_sample & T2P_SAMPLE_PLANAR_SEPARATE_TO_CONTIG){
sepstripsize=TIFFStripSize(input);
sepstripcount=TIFFNumberOfStrips(input);
stripsize=sepstripsize*t2p->tiff_samplesperpixel;
stripcount=sepstripcount/t2p->tiff_samplesperpixel;
buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
memset(buffer, 0, t2p->tiff_datasize);
samplebuffer = (unsigned char*) _TIFFmalloc(stripsize);
if(samplebuffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
_TIFFfree(buffer);
return(0);
}
for(i=0;i<stripcount;i++){
samplebufferoffset=0;
for(j=0;j<t2p->tiff_samplesperpixel;j++){
read =
TIFFReadEncodedStrip(input,
i + j*stripcount,
(tdata_t) &(samplebuffer[samplebufferoffset]),
TIFFmin(sepstripsize, stripsize - samplebufferoffset));
if(read==-1){
TIFFError(TIFF2PDF_MODULE,
"Error on decoding strip %u of %s",
i + j*stripcount,
TIFFFileName(input));
_TIFFfree(buffer);
t2p->t2p_error=T2P_ERR_ERROR;
return(0);
}
samplebufferoffset+=read;
}
t2p_sample_planar_separate_to_contig(
t2p,
&(buffer[bufferoffset]),
samplebuffer,
samplebufferoffset);
bufferoffset+=samplebufferoffset;
}
_TIFFfree(samplebuffer);
goto dataready;
}
buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
memset(buffer, 0, t2p->tiff_datasize);
stripsize=TIFFStripSize(input);
stripcount=TIFFNumberOfStrips(input);
for(i=0;i<stripcount;i++){
read =
TIFFReadEncodedStrip(input,
i,
(tdata_t) &buffer[bufferoffset],
TIFFmin(stripsize, t2p->tiff_datasize - bufferoffset));
if(read==-1){
TIFFError(TIFF2PDF_MODULE,
"Error on decoding strip %u of %s",
i,
TIFFFileName(input));
_TIFFfree(samplebuffer);
_TIFFfree(buffer);
t2p->t2p_error=T2P_ERR_ERROR;
return(0);
}
bufferoffset+=read;
}
if(t2p->pdf_sample & T2P_SAMPLE_REALIZE_PALETTE){
// FIXME: overflow?
samplebuffer=(unsigned char*)_TIFFrealloc(
(tdata_t) buffer,
t2p->tiff_datasize * t2p->tiff_samplesperpixel);
if(samplebuffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
_TIFFfree(buffer);
return(0);
} else {
buffer=samplebuffer;
t2p->tiff_datasize *= t2p->tiff_samplesperpixel;
}
t2p_sample_realize_palette(t2p, buffer);
}
if(t2p->pdf_sample & T2P_SAMPLE_RGBA_TO_RGB){
t2p->tiff_datasize=t2p_sample_rgba_to_rgb(
(tdata_t)buffer,
t2p->tiff_width*t2p->tiff_length);
}
if(t2p->pdf_sample & T2P_SAMPLE_RGBAA_TO_RGB){
t2p->tiff_datasize=t2p_sample_rgbaa_to_rgb(
(tdata_t)buffer,
t2p->tiff_width*t2p->tiff_length);
}
if(t2p->pdf_sample & T2P_SAMPLE_YCBCR_TO_RGB){
samplebuffer=(unsigned char*)_TIFFrealloc(
(tdata_t)buffer,
t2p->tiff_width*t2p->tiff_length*4);
if(samplebuffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
_TIFFfree(buffer);
return(0);
} else {
buffer=samplebuffer;
}
if(!TIFFReadRGBAImageOriented(
input,
t2p->tiff_width,
t2p->tiff_length,
(uint32*)buffer,
ORIENTATION_TOPLEFT,
0)){
TIFFError(TIFF2PDF_MODULE,
"Can't use TIFFReadRGBAImageOriented to extract RGB image from %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
t2p->tiff_datasize=t2p_sample_abgr_to_rgb(
(tdata_t) buffer,
t2p->tiff_width*t2p->tiff_length);
}
if(t2p->pdf_sample & T2P_SAMPLE_LAB_SIGNED_TO_UNSIGNED){
t2p->tiff_datasize=t2p_sample_lab_signed_to_unsigned(
(tdata_t)buffer,
t2p->tiff_width*t2p->tiff_length);
}
}
dataready:
t2p_disable(output);
TIFFSetField(output, TIFFTAG_PHOTOMETRIC, t2p->tiff_photometric);
TIFFSetField(output, TIFFTAG_BITSPERSAMPLE, t2p->tiff_bitspersample);
TIFFSetField(output, TIFFTAG_SAMPLESPERPIXEL, t2p->tiff_samplesperpixel);
TIFFSetField(output, TIFFTAG_IMAGEWIDTH, t2p->tiff_width);
TIFFSetField(output, TIFFTAG_IMAGELENGTH, t2p->tiff_length);
TIFFSetField(output, TIFFTAG_ROWSPERSTRIP, t2p->tiff_length);
TIFFSetField(output, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
TIFFSetField(output, TIFFTAG_FILLORDER, FILLORDER_MSB2LSB);
switch(t2p->pdf_compression){
case T2P_COMPRESS_NONE:
TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
break;
#ifdef CCITT_SUPPORT
case T2P_COMPRESS_G4:
TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_CCITTFAX4);
break;
#endif /* ifdef CCITT_SUPPORT */
#ifdef JPEG_SUPPORT
case T2P_COMPRESS_JPEG:
if(t2p->tiff_photometric==PHOTOMETRIC_YCBCR) {
uint16 hor = 0, ver = 0;
if (TIFFGetField(input, TIFFTAG_YCBCRSUBSAMPLING, &hor, &ver) !=0 ) {
if(hor != 0 && ver != 0){
TIFFSetField(output, TIFFTAG_YCBCRSUBSAMPLING, hor, ver);
}
}
if(TIFFGetField(input, TIFFTAG_REFERENCEBLACKWHITE, &xfloatp)!=0){
TIFFSetField(output, TIFFTAG_REFERENCEBLACKWHITE, xfloatp);
}
}
if(TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_JPEG)==0){
TIFFError(TIFF2PDF_MODULE,
"Unable to use JPEG compression for input %s and output %s",
TIFFFileName(input),
TIFFFileName(output));
_TIFFfree(buffer);
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
TIFFSetField(output, TIFFTAG_JPEGTABLESMODE, 0);
if(t2p->pdf_colorspace & (T2P_CS_RGB | T2P_CS_LAB)){
TIFFSetField(output, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_YCBCR);
if(t2p->tiff_photometric != PHOTOMETRIC_YCBCR){
TIFFSetField(output, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RGB);
} else {
TIFFSetField(output, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RAW);
}
}
if(t2p->pdf_colorspace & T2P_CS_GRAY){
(void)0;
}
if(t2p->pdf_colorspace & T2P_CS_CMYK){
(void)0;
}
if(t2p->pdf_defaultcompressionquality != 0){
TIFFSetField(output,
TIFFTAG_JPEGQUALITY,
t2p->pdf_defaultcompressionquality);
}
break;
#endif /* ifdef JPEG_SUPPORT */
#ifdef ZIP_SUPPORT
case T2P_COMPRESS_ZIP:
TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE);
if(t2p->pdf_defaultcompressionquality%100 != 0){
TIFFSetField(output,
TIFFTAG_PREDICTOR,
t2p->pdf_defaultcompressionquality % 100);
}
if(t2p->pdf_defaultcompressionquality/100 != 0){
TIFFSetField(output,
TIFFTAG_ZIPQUALITY,
(t2p->pdf_defaultcompressionquality / 100));
}
break;
#endif /* ifdef ZIP_SUPPORT */
default:
break;
}
t2p_enable(output);
t2p->outputwritten = 0;
#ifdef JPEG_SUPPORT
if(t2p->pdf_compression == T2P_COMPRESS_JPEG
&& t2p->tiff_photometric == PHOTOMETRIC_YCBCR){
bufferoffset = TIFFWriteEncodedStrip(output, (tstrip_t)0,
buffer,
stripsize * stripcount);
} else
#endif /* ifdef JPEG_SUPPORT */
{
bufferoffset = TIFFWriteEncodedStrip(output, (tstrip_t)0,
buffer,
t2p->tiff_datasize);
}
if (buffer != NULL) {
_TIFFfree(buffer);
buffer=NULL;
}
if (bufferoffset == (tsize_t)-1) {
TIFFError(TIFF2PDF_MODULE,
"Error writing encoded strip to output PDF %s",
TIFFFileName(output));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
written = t2p->outputwritten;
return(written);
}
/*
* This function reads the raster image data from the input TIFF for an image
* tile and writes the data to the output PDF XObject image dictionary stream
* for the tile. It returns the amount written or zero on error.
*/
tsize_t t2p_readwrite_pdf_image_tile(T2P* t2p, TIFF* input, TIFF* output, ttile_t tile){
uint16 edge=0;
tsize_t written=0;
unsigned char* buffer=NULL;
tsize_t bufferoffset=0;
unsigned char* samplebuffer=NULL;
tsize_t samplebufferoffset=0;
tsize_t read=0;
uint16 i=0;
ttile_t tilecount=0;
/* tsize_t tilesize=0; */
ttile_t septilecount=0;
tsize_t septilesize=0;
#ifdef JPEG_SUPPORT
unsigned char* jpt;
float* xfloatp;
uint32 xuint32=0;
#endif
/* Fail if prior error (in particular, can't trust tiff_datasize) */
if (t2p->t2p_error != T2P_ERR_OK)
return(0);
edge |= t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile);
edge |= t2p_tile_is_bottom_edge(t2p->tiff_tiles[t2p->pdf_page], tile);
if( (t2p->pdf_transcode == T2P_TRANSCODE_RAW) && ((edge == 0)
#if defined(JPEG_SUPPORT) || defined(OJPEG_SUPPORT)
|| (t2p->pdf_compression == T2P_COMPRESS_JPEG)
#endif
)
){
#ifdef CCITT_SUPPORT
if(t2p->pdf_compression == T2P_COMPRESS_G4){
buffer= (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory "
"for t2p_readwrite_pdf_image_tile, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
TIFFReadRawTile(input, tile, (tdata_t) buffer, t2p->tiff_datasize);
if (t2p->tiff_fillorder==FILLORDER_LSB2MSB){
TIFFReverseBits(buffer, t2p->tiff_datasize);
}
t2pWriteFile(output, (tdata_t) buffer, t2p->tiff_datasize);
_TIFFfree(buffer);
return(t2p->tiff_datasize);
}
#endif
#ifdef ZIP_SUPPORT
if(t2p->pdf_compression == T2P_COMPRESS_ZIP){
buffer= (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory "
"for t2p_readwrite_pdf_image_tile, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
TIFFReadRawTile(input, tile, (tdata_t) buffer, t2p->tiff_datasize);
if (t2p->tiff_fillorder==FILLORDER_LSB2MSB){
TIFFReverseBits(buffer, t2p->tiff_datasize);
}
t2pWriteFile(output, (tdata_t) buffer, t2p->tiff_datasize);
_TIFFfree(buffer);
return(t2p->tiff_datasize);
}
#endif
#ifdef OJPEG_SUPPORT
if(t2p->tiff_compression == COMPRESSION_OJPEG){
if(! t2p->pdf_ojpegdata){
TIFFError(TIFF2PDF_MODULE,
"No support for OJPEG image %s with "
"bad tables",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
buffer=(unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory "
"for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
_TIFFmemcpy(buffer, t2p->pdf_ojpegdata, t2p->pdf_ojpegdatalength);
if(edge!=0){
if(t2p_tile_is_bottom_edge(t2p->tiff_tiles[t2p->pdf_page], tile)){
buffer[7]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilelength >> 8) & 0xff;
buffer[8]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilelength ) & 0xff;
}
if(t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile)){
buffer[9]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilewidth >> 8) & 0xff;
buffer[10]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilewidth ) & 0xff;
}
}
bufferoffset=t2p->pdf_ojpegdatalength;
bufferoffset+=TIFFReadRawTile(input,
tile,
(tdata_t) &(((unsigned char*)buffer)[bufferoffset]),
-1);
((unsigned char*)buffer)[bufferoffset++]=0xff;
((unsigned char*)buffer)[bufferoffset++]=0xd9;
t2pWriteFile(output, (tdata_t) buffer, bufferoffset);
_TIFFfree(buffer);
return(bufferoffset);
}
#endif
#ifdef JPEG_SUPPORT
if(t2p->tiff_compression == COMPRESSION_JPEG){
unsigned char table_end[2];
uint32 count = 0;
buffer= (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate " TIFF_SIZE_FORMAT " bytes of memory "
"for t2p_readwrite_pdf_image_tile, %s",
(TIFF_SIZE_T) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(TIFFGetField(input, TIFFTAG_JPEGTABLES, &count, &jpt) != 0) {
if (count >= 4) {
int retTIFFReadRawTile;
/* Ignore EOI marker of JpegTables */
_TIFFmemcpy(buffer, jpt, count - 2);
bufferoffset += count - 2;
/* Store last 2 bytes of the JpegTables */
table_end[0] = buffer[bufferoffset-2];
table_end[1] = buffer[bufferoffset-1];
xuint32 = bufferoffset;
bufferoffset -= 2;
retTIFFReadRawTile= TIFFReadRawTile(
input,
tile,
(tdata_t) &(((unsigned char*)buffer)[bufferoffset]),
-1);
if( retTIFFReadRawTile < 0 )
{
_TIFFfree(buffer);
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
bufferoffset += retTIFFReadRawTile;
/* Overwrite SOI marker of image scan with previously */
/* saved end of JpegTables */
buffer[xuint32-2]=table_end[0];
buffer[xuint32-1]=table_end[1];
}
}
t2pWriteFile(output, (tdata_t) buffer, bufferoffset);
_TIFFfree(buffer);
return(bufferoffset);
}
#endif
(void)0;
}
if(t2p->pdf_sample==T2P_SAMPLE_NOTHING){
buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for "
"t2p_readwrite_pdf_image_tile, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
read = TIFFReadEncodedTile(
input,
tile,
(tdata_t) &buffer[bufferoffset],
t2p->tiff_datasize);
if(read==-1){
TIFFError(TIFF2PDF_MODULE,
"Error on decoding tile %u of %s",
tile,
TIFFFileName(input));
_TIFFfree(buffer);
t2p->t2p_error=T2P_ERR_ERROR;
return(0);
}
} else {
if(t2p->pdf_sample == T2P_SAMPLE_PLANAR_SEPARATE_TO_CONTIG){
septilesize=TIFFTileSize(input);
septilecount=TIFFNumberOfTiles(input);
/* tilesize=septilesize*t2p->tiff_samplesperpixel; */
tilecount=septilecount/t2p->tiff_samplesperpixel;
buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory "
"for t2p_readwrite_pdf_image_tile, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
samplebuffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(samplebuffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory "
"for t2p_readwrite_pdf_image_tile, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
samplebufferoffset=0;
for(i=0;i<t2p->tiff_samplesperpixel;i++){
read =
TIFFReadEncodedTile(input,
tile + i*tilecount,
(tdata_t) &(samplebuffer[samplebufferoffset]),
septilesize);
if(read==-1){
TIFFError(TIFF2PDF_MODULE,
"Error on decoding tile %u of %s",
tile + i*tilecount,
TIFFFileName(input));
_TIFFfree(samplebuffer);
_TIFFfree(buffer);
t2p->t2p_error=T2P_ERR_ERROR;
return(0);
}
samplebufferoffset+=read;
}
t2p_sample_planar_separate_to_contig(
t2p,
&(buffer[bufferoffset]),
samplebuffer,
samplebufferoffset);
bufferoffset+=samplebufferoffset;
_TIFFfree(samplebuffer);
}
if(buffer==NULL){
buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory "
"for t2p_readwrite_pdf_image_tile, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
read = TIFFReadEncodedTile(
input,
tile,
(tdata_t) &buffer[bufferoffset],
t2p->tiff_datasize);
if(read==-1){
TIFFError(TIFF2PDF_MODULE,
"Error on decoding tile %u of %s",
tile,
TIFFFileName(input));
_TIFFfree(buffer);
t2p->t2p_error=T2P_ERR_ERROR;
return(0);
}
}
if(t2p->pdf_sample & T2P_SAMPLE_RGBA_TO_RGB){
t2p->tiff_datasize=t2p_sample_rgba_to_rgb(
(tdata_t)buffer,
t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth
*t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
}
if(t2p->pdf_sample & T2P_SAMPLE_RGBAA_TO_RGB){
t2p->tiff_datasize=t2p_sample_rgbaa_to_rgb(
(tdata_t)buffer,
t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth
*t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
}
if(t2p->pdf_sample & T2P_SAMPLE_YCBCR_TO_RGB){
TIFFError(TIFF2PDF_MODULE,
"No support for YCbCr to RGB in tile for %s",
TIFFFileName(input));
_TIFFfree(buffer);
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(t2p->pdf_sample & T2P_SAMPLE_LAB_SIGNED_TO_UNSIGNED){
t2p->tiff_datasize=t2p_sample_lab_signed_to_unsigned(
(tdata_t)buffer,
t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth
*t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
}
}
if(t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile) != 0){
t2p_tile_collapse_left(
buffer,
TIFFTileRowSize(input),
t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth,
t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilewidth,
t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
}
t2p_disable(output);
TIFFSetField(output, TIFFTAG_PHOTOMETRIC, t2p->tiff_photometric);
TIFFSetField(output, TIFFTAG_BITSPERSAMPLE, t2p->tiff_bitspersample);
TIFFSetField(output, TIFFTAG_SAMPLESPERPIXEL, t2p->tiff_samplesperpixel);
if(t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile) == 0){
TIFFSetField(
output,
TIFFTAG_IMAGEWIDTH,
t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth);
} else {
TIFFSetField(
output,
TIFFTAG_IMAGEWIDTH,
t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilewidth);
}
if(t2p_tile_is_bottom_edge(t2p->tiff_tiles[t2p->pdf_page], tile) == 0){
TIFFSetField(
output,
TIFFTAG_IMAGELENGTH,
t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
TIFFSetField(
output,
TIFFTAG_ROWSPERSTRIP,
t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
} else {
TIFFSetField(
output,
TIFFTAG_IMAGELENGTH,
t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilelength);
TIFFSetField(
output,
TIFFTAG_ROWSPERSTRIP,
t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilelength);
}
TIFFSetField(output, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
TIFFSetField(output, TIFFTAG_FILLORDER, FILLORDER_MSB2LSB);
switch(t2p->pdf_compression){
case T2P_COMPRESS_NONE:
TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
break;
#ifdef CCITT_SUPPORT
case T2P_COMPRESS_G4:
TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_CCITTFAX4);
break;
#endif
#ifdef JPEG_SUPPORT
case T2P_COMPRESS_JPEG:
if (t2p->tiff_photometric==PHOTOMETRIC_YCBCR) {
uint16 hor = 0, ver = 0;
if (TIFFGetField(input, TIFFTAG_YCBCRSUBSAMPLING, &hor, &ver)!=0) {
if (hor != 0 && ver != 0) {
TIFFSetField(output, TIFFTAG_YCBCRSUBSAMPLING, hor, ver);
}
}
if(TIFFGetField(input, TIFFTAG_REFERENCEBLACKWHITE, &xfloatp)!=0){
TIFFSetField(output, TIFFTAG_REFERENCEBLACKWHITE, xfloatp);
}
}
TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_JPEG);
TIFFSetField(output, TIFFTAG_JPEGTABLESMODE, 0); /* JPEGTABLESMODE_NONE */
if(t2p->pdf_colorspace & (T2P_CS_RGB | T2P_CS_LAB)){
TIFFSetField(output, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_YCBCR);
if(t2p->tiff_photometric != PHOTOMETRIC_YCBCR){
TIFFSetField(output, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RGB);
} else {
TIFFSetField(output, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RAW);
}
}
if(t2p->pdf_colorspace & T2P_CS_GRAY){
(void)0;
}
if(t2p->pdf_colorspace & T2P_CS_CMYK){
(void)0;
}
if(t2p->pdf_defaultcompressionquality != 0){
TIFFSetField(output,
TIFFTAG_JPEGQUALITY,
t2p->pdf_defaultcompressionquality);
}
break;
#endif
#ifdef ZIP_SUPPORT
case T2P_COMPRESS_ZIP:
TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE);
if(t2p->pdf_defaultcompressionquality%100 != 0){
TIFFSetField(output,
TIFFTAG_PREDICTOR,
t2p->pdf_defaultcompressionquality % 100);
}
if(t2p->pdf_defaultcompressionquality/100 != 0){
TIFFSetField(output,
TIFFTAG_ZIPQUALITY,
(t2p->pdf_defaultcompressionquality / 100));
}
break;
#endif
default:
break;
}
t2p_enable(output);
t2p->outputwritten = 0;
bufferoffset = TIFFWriteEncodedStrip(output, (tstrip_t) 0, buffer,
TIFFStripSize(output));
if (buffer != NULL) {
_TIFFfree(buffer);
buffer = NULL;
}
if (bufferoffset == -1) {
TIFFError(TIFF2PDF_MODULE,
"Error writing encoded tile to output PDF %s",
TIFFFileName(output));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
written = t2p->outputwritten;
return(written);
}
#ifdef OJPEG_SUPPORT
int t2p_process_ojpeg_tables(T2P* t2p, TIFF* input){
uint16 proc=0;
void* q;
uint32 q_length=0;
void* dc;
uint32 dc_length=0;
void* ac;
uint32 ac_length=0;
uint16* lp;
uint16* pt;
uint16 h_samp=1;
uint16 v_samp=1;
unsigned char* ojpegdata;
uint16 table_count;
uint32 offset_table;
uint32 offset_ms_l;
uint32 code_count;
uint32 i=0;
uint32 dest=0;
uint16 ri=0;
uint32 rows=0;
if(!TIFFGetField(input, TIFFTAG_JPEGPROC, &proc)){
TIFFError(TIFF2PDF_MODULE,
"Missing JPEGProc field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(proc!=JPEGPROC_BASELINE && proc!=JPEGPROC_LOSSLESS){
TIFFError(TIFF2PDF_MODULE,
"Bad JPEGProc field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(!TIFFGetField(input, TIFFTAG_JPEGQTABLES, &q_length, &q)){
TIFFError(TIFF2PDF_MODULE,
"Missing JPEGQTables field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(q_length < (64U * t2p->tiff_samplesperpixel)){
TIFFError(TIFF2PDF_MODULE,
"Bad JPEGQTables field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(!TIFFGetField(input, TIFFTAG_JPEGDCTABLES, &dc_length, &dc)){
TIFFError(TIFF2PDF_MODULE,
"Missing JPEGDCTables field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(proc==JPEGPROC_BASELINE){
if(!TIFFGetField(input, TIFFTAG_JPEGACTABLES, &ac_length, &ac)){
TIFFError(TIFF2PDF_MODULE,
"Missing JPEGACTables field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
} else {
if(!TIFFGetField(input, TIFFTAG_JPEGLOSSLESSPREDICTORS, &lp)){
TIFFError(TIFF2PDF_MODULE,
"Missing JPEGLosslessPredictors field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(!TIFFGetField(input, TIFFTAG_JPEGPOINTTRANSFORM, &pt)){
TIFFError(TIFF2PDF_MODULE,
"Missing JPEGPointTransform field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
}
if(!TIFFGetField(input, TIFFTAG_YCBCRSUBSAMPLING, &h_samp, &v_samp)){
h_samp=1;
v_samp=1;
}
if(t2p->pdf_ojpegdata != NULL){
_TIFFfree(t2p->pdf_ojpegdata);
t2p->pdf_ojpegdata=NULL;
}
t2p->pdf_ojpegdata = _TIFFmalloc(2048);
if(t2p->pdf_ojpegdata == NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %u bytes of memory for t2p_process_ojpeg_tables, %s",
2048,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
_TIFFmemset(t2p->pdf_ojpegdata, 0x00, 2048);
t2p->pdf_ojpegdatalength = 0;
table_count=t2p->tiff_samplesperpixel;
if(proc==JPEGPROC_BASELINE){
if(table_count>2) table_count=2;
}
ojpegdata=(unsigned char*)t2p->pdf_ojpegdata;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xd8;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xff;
if(proc==JPEGPROC_BASELINE){
ojpegdata[t2p->pdf_ojpegdatalength++]=0xc0;
} else {
ojpegdata[t2p->pdf_ojpegdatalength++]=0xc3;
}
ojpegdata[t2p->pdf_ojpegdatalength++]=0x00;
ojpegdata[t2p->pdf_ojpegdatalength++]=(8 + 3*t2p->tiff_samplesperpixel);
ojpegdata[t2p->pdf_ojpegdatalength++]=(t2p->tiff_bitspersample & 0xff);
if(TIFFIsTiled(input)){
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength >> 8) & 0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength ) & 0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth >> 8) & 0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth ) & 0xff;
} else {
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_length >> 8) & 0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_length ) & 0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_width >> 8) & 0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_width ) & 0xff;
}
ojpegdata[t2p->pdf_ojpegdatalength++]=(t2p->tiff_samplesperpixel & 0xff);
for(i=0;i<t2p->tiff_samplesperpixel;i++){
ojpegdata[t2p->pdf_ojpegdatalength++]=i;
if(i==0){
ojpegdata[t2p->pdf_ojpegdatalength] |= h_samp<<4 & 0xf0;;
ojpegdata[t2p->pdf_ojpegdatalength++] |= v_samp & 0x0f;
} else {
ojpegdata[t2p->pdf_ojpegdatalength++]= 0x11;
}
ojpegdata[t2p->pdf_ojpegdatalength++]=i;
}
for(dest=0;dest<t2p->tiff_samplesperpixel;dest++){
ojpegdata[t2p->pdf_ojpegdatalength++]=0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xdb;
ojpegdata[t2p->pdf_ojpegdatalength++]=0x00;
ojpegdata[t2p->pdf_ojpegdatalength++]=0x43;
ojpegdata[t2p->pdf_ojpegdatalength++]=dest;
_TIFFmemcpy( &(ojpegdata[t2p->pdf_ojpegdatalength++]),
&(((unsigned char*)q)[64*dest]), 64);
t2p->pdf_ojpegdatalength+=64;
}
offset_table=0;
for(dest=0;dest<table_count;dest++){
ojpegdata[t2p->pdf_ojpegdatalength++]=0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xc4;
offset_ms_l=t2p->pdf_ojpegdatalength;
t2p->pdf_ojpegdatalength+=2;
ojpegdata[t2p->pdf_ojpegdatalength++]=dest & 0x0f;
_TIFFmemcpy( &(ojpegdata[t2p->pdf_ojpegdatalength]),
&(((unsigned char*)dc)[offset_table]), 16);
code_count=0;
offset_table+=16;
for(i=0;i<16;i++){
code_count+=ojpegdata[t2p->pdf_ojpegdatalength++];
}
ojpegdata[offset_ms_l]=((19+code_count)>>8) & 0xff;
ojpegdata[offset_ms_l+1]=(19+code_count) & 0xff;
_TIFFmemcpy( &(ojpegdata[t2p->pdf_ojpegdatalength]),
&(((unsigned char*)dc)[offset_table]), code_count);
offset_table+=code_count;
t2p->pdf_ojpegdatalength+=code_count;
}
if(proc==JPEGPROC_BASELINE){
offset_table=0;
for(dest=0;dest<table_count;dest++){
ojpegdata[t2p->pdf_ojpegdatalength++]=0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xc4;
offset_ms_l=t2p->pdf_ojpegdatalength;
t2p->pdf_ojpegdatalength+=2;
ojpegdata[t2p->pdf_ojpegdatalength] |= 0x10;
ojpegdata[t2p->pdf_ojpegdatalength++] |=dest & 0x0f;
_TIFFmemcpy( &(ojpegdata[t2p->pdf_ojpegdatalength]),
&(((unsigned char*)ac)[offset_table]), 16);
code_count=0;
offset_table+=16;
for(i=0;i<16;i++){
code_count+=ojpegdata[t2p->pdf_ojpegdatalength++];
}
ojpegdata[offset_ms_l]=((19+code_count)>>8) & 0xff;
ojpegdata[offset_ms_l+1]=(19+code_count) & 0xff;
_TIFFmemcpy( &(ojpegdata[t2p->pdf_ojpegdatalength]),
&(((unsigned char*)ac)[offset_table]), code_count);
offset_table+=code_count;
t2p->pdf_ojpegdatalength+=code_count;
}
}
if(TIFFNumberOfStrips(input)>1){
ojpegdata[t2p->pdf_ojpegdatalength++]=0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xdd;
ojpegdata[t2p->pdf_ojpegdatalength++]=0x00;
ojpegdata[t2p->pdf_ojpegdatalength++]=0x04;
h_samp*=8;
v_samp*=8;
ri=(t2p->tiff_width+h_samp-1) / h_samp;
TIFFGetField(input, TIFFTAG_ROWSPERSTRIP, &rows);
ri*=(rows+v_samp-1)/v_samp;
ojpegdata[t2p->pdf_ojpegdatalength++]= (ri>>8) & 0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]= ri & 0xff;
}
ojpegdata[t2p->pdf_ojpegdatalength++]=0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xda;
ojpegdata[t2p->pdf_ojpegdatalength++]=0x00;
ojpegdata[t2p->pdf_ojpegdatalength++]=(6 + 2*t2p->tiff_samplesperpixel);
ojpegdata[t2p->pdf_ojpegdatalength++]=t2p->tiff_samplesperpixel & 0xff;
for(i=0;i<t2p->tiff_samplesperpixel;i++){
ojpegdata[t2p->pdf_ojpegdatalength++]= i & 0xff;
if(proc==JPEGPROC_BASELINE){
ojpegdata[t2p->pdf_ojpegdatalength] |=
( ( (i>(table_count-1U)) ? (table_count-1U) : i) << 4U) & 0xf0;
ojpegdata[t2p->pdf_ojpegdatalength++] |=
( (i>(table_count-1U)) ? (table_count-1U) : i) & 0x0f;
} else {
ojpegdata[t2p->pdf_ojpegdatalength++] = (i << 4) & 0xf0;
}
}
if(proc==JPEGPROC_BASELINE){
t2p->pdf_ojpegdatalength++;
ojpegdata[t2p->pdf_ojpegdatalength++]=0x3f;
t2p->pdf_ojpegdatalength++;
} else {
ojpegdata[t2p->pdf_ojpegdatalength++]= (lp[0] & 0xff);
t2p->pdf_ojpegdatalength++;
ojpegdata[t2p->pdf_ojpegdatalength++]= (pt[0] & 0x0f);
}
return(1);
}
#endif
#ifdef JPEG_SUPPORT
int t2p_process_jpeg_strip(
unsigned char* strip,
tsize_t* striplength,
unsigned char* buffer,
tsize_t buffersize,
tsize_t* bufferoffset,
tstrip_t no,
uint32 height){
tsize_t i=0;
while (i < *striplength) {
tsize_t datalen;
uint16 ri;
uint16 v_samp;
uint16 h_samp;
int j;
int ncomp;
/* marker header: one or more FFs */
if (strip[i] != 0xff)
return(0);
i++;
while (i < *striplength && strip[i] == 0xff)
i++;
if (i >= *striplength)
return(0);
/* SOI is the only pre-SOS marker without a length word */
if (strip[i] == 0xd8)
datalen = 0;
else {
if ((*striplength - i) <= 2)
return(0);
datalen = (strip[i+1] << 8) | strip[i+2];
if (datalen < 2 || datalen >= (*striplength - i))
return(0);
}
switch( strip[i] ){
case 0xd8: /* SOI - start of image */
if( *bufferoffset + 2 > buffersize )
return(0);
_TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), 2);
*bufferoffset+=2;
break;
case 0xc0: /* SOF0 */
case 0xc1: /* SOF1 */
case 0xc3: /* SOF3 */
case 0xc9: /* SOF9 */
case 0xca: /* SOF10 */
if(no==0){
if( *bufferoffset + datalen + 2 + 6 > buffersize )
return(0);
_TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), datalen+2);
if( *bufferoffset + 9 >= buffersize )
return(0);
ncomp = buffer[*bufferoffset+9];
if (ncomp < 1 || ncomp > 4)
return(0);
v_samp=1;
h_samp=1;
if( *bufferoffset + 11 + 3*(ncomp-1) >= buffersize )
return(0);
for(j=0;j<ncomp;j++){
uint16 samp = buffer[*bufferoffset+11+(3*j)];
if( (samp>>4) > h_samp)
h_samp = (samp>>4);
if( (samp & 0x0f) > v_samp)
v_samp = (samp & 0x0f);
}
v_samp*=8;
h_samp*=8;
ri=((( ((uint16)(buffer[*bufferoffset+5])<<8) |
(uint16)(buffer[*bufferoffset+6]) )+v_samp-1)/
v_samp);
ri*=((( ((uint16)(buffer[*bufferoffset+7])<<8) |
(uint16)(buffer[*bufferoffset+8]) )+h_samp-1)/
h_samp);
buffer[*bufferoffset+5]=
(unsigned char) ((height>>8) & 0xff);
buffer[*bufferoffset+6]=
(unsigned char) (height & 0xff);
*bufferoffset+=datalen+2;
/* insert a DRI marker */
buffer[(*bufferoffset)++]=0xff;
buffer[(*bufferoffset)++]=0xdd;
buffer[(*bufferoffset)++]=0x00;
buffer[(*bufferoffset)++]=0x04;
buffer[(*bufferoffset)++]=(ri >> 8) & 0xff;
buffer[(*bufferoffset)++]= ri & 0xff;
}
break;
case 0xc4: /* DHT */
case 0xdb: /* DQT */
if( *bufferoffset + datalen + 2 > buffersize )
return(0);
_TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), datalen+2);
*bufferoffset+=datalen+2;
break;
case 0xda: /* SOS */
if(no==0){
if( *bufferoffset + datalen + 2 > buffersize )
return(0);
_TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), datalen+2);
*bufferoffset+=datalen+2;
} else {
if( *bufferoffset + 2 > buffersize )
return(0);
buffer[(*bufferoffset)++]=0xff;
buffer[(*bufferoffset)++]=
(unsigned char)(0xd0 | ((no-1)%8));
}
i += datalen + 1;
/* copy remainder of strip */
if( *bufferoffset + *striplength - i > buffersize )
return(0);
_TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i]), *striplength - i);
*bufferoffset+= *striplength - i;
return(1);
default:
/* ignore any other marker */
break;
}
i += datalen + 1;
}
/* failed to find SOS marker */
return(0);
}
#endif
/*
This functions converts a tilewidth x tilelength buffer of samples into an edgetilewidth x
tilelength buffer of samples.
*/
void t2p_tile_collapse_left(
tdata_t buffer,
tsize_t scanwidth,
uint32 tilewidth,
uint32 edgetilewidth,
uint32 tilelength){
uint32 i;
tsize_t edgescanwidth=0;
edgescanwidth = (scanwidth * edgetilewidth + (tilewidth - 1))/ tilewidth;
for(i=0;i<tilelength;i++){
/* We use memmove() since there can be overlaps in src and dst buffers for the first items */
memmove(
&(((char*)buffer)[edgescanwidth*i]),
&(((char*)buffer)[scanwidth*i]),
edgescanwidth);
}
return;
}
/*
* This function calls TIFFWriteDirectory on the output after blanking its
* output by replacing the read, write, and seek procedures with empty
* implementations, then it replaces the original implementations.
*/
void
t2p_write_advance_directory(T2P* t2p, TIFF* output)
{
t2p_disable(output);
if(!TIFFWriteDirectory(output)){
TIFFError(TIFF2PDF_MODULE,
"Error writing virtual directory to output PDF %s",
TIFFFileName(output));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p_enable(output);
return;
}
tsize_t t2p_sample_planar_separate_to_contig(
T2P* t2p,
unsigned char* buffer,
unsigned char* samplebuffer,
tsize_t samplebuffersize){
tsize_t stride=0;
tsize_t i=0;
tsize_t j=0;
stride=samplebuffersize/t2p->tiff_samplesperpixel;
for(i=0;i<stride;i++){
for(j=0;j<t2p->tiff_samplesperpixel;j++){
buffer[i*t2p->tiff_samplesperpixel + j] = samplebuffer[i + j*stride];
}
}
return(samplebuffersize);
}
tsize_t t2p_sample_realize_palette(T2P* t2p, unsigned char* buffer){
uint32 sample_count=0;
uint16 component_count=0;
uint32 palette_offset=0;
uint32 sample_offset=0;
uint32 i=0;
uint32 j=0;
sample_count=t2p->tiff_width*t2p->tiff_length;
component_count=t2p->tiff_samplesperpixel;
if( sample_count * component_count > t2p->tiff_datasize )
{
TIFFError(TIFF2PDF_MODULE, "Error: sample_count * component_count > t2p->tiff_datasize");
t2p->t2p_error = T2P_ERR_ERROR;
return 1;
}
for(i=sample_count;i>0;i--){
palette_offset=buffer[i-1] * component_count;
sample_offset= (i-1) * component_count;
for(j=0;j<component_count;j++){
buffer[sample_offset+j]=t2p->pdf_palette[palette_offset+j];
}
}
return(0);
}
/*
This functions converts in place a buffer of ABGR interleaved data
into RGB interleaved data, discarding A.
*/
tsize_t t2p_sample_abgr_to_rgb(tdata_t data, uint32 samplecount)
{
uint32 i=0;
uint32 sample=0;
for(i=0;i<samplecount;i++){
sample=((uint32*)data)[i];
((char*)data)[i*3]= (char) (sample & 0xff);
((char*)data)[i*3+1]= (char) ((sample>>8) & 0xff);
((char*)data)[i*3+2]= (char) ((sample>>16) & 0xff);
}
return(i*3);
}
/*
* This functions converts in place a buffer of RGBA interleaved data
* into RGB interleaved data, discarding A.
*/
tsize_t
t2p_sample_rgbaa_to_rgb(tdata_t data, uint32 samplecount)
{
uint32 i;
/* For the 3 first samples, there is overlapping between souce and
destination, so use memmove().
See http://bugzilla.maptools.org/show_bug.cgi?id=2577 */
for(i = 0; i < 3 && i < samplecount; i++)
memmove((uint8*)data + i * 3, (uint8*)data + i * 4, 3);
for(; i < samplecount; i++)
memcpy((uint8*)data + i * 3, (uint8*)data + i * 4, 3);
return(i * 3);
}
/*
* This functions converts in place a buffer of RGBA interleaved data
* into RGB interleaved data, adding 255-A to each component sample.
*/
tsize_t
t2p_sample_rgba_to_rgb(tdata_t data, uint32 samplecount)
{
uint32 i = 0;
uint32 sample = 0;
uint8 alpha = 0;
for (i = 0; i < samplecount; i++) {
sample=((uint32*)data)[i];
alpha=(uint8)((255 - ((sample >> 24) & 0xff)));
((uint8 *)data)[i * 3] = (uint8) ((sample >> 16) & 0xff) + alpha;
((uint8 *)data)[i * 3 + 1] = (uint8) ((sample >> 8) & 0xff) + alpha;
((uint8 *)data)[i * 3 + 2] = (uint8) (sample & 0xff) + alpha;
}
return (i * 3);
}
/*
This function converts the a and b samples of Lab data from signed
to unsigned.
*/
tsize_t t2p_sample_lab_signed_to_unsigned(tdata_t buffer, uint32 samplecount){
uint32 i=0;
for(i=0;i<samplecount;i++){
if( (((unsigned char*)buffer)[(i*3)+1] & 0x80) !=0){
((unsigned char*)buffer)[(i*3)+1] =
(unsigned char)(0x80 + ((char*)buffer)[(i*3)+1]);
} else {
((unsigned char*)buffer)[(i*3)+1] |= 0x80;
}
if( (((unsigned char*)buffer)[(i*3)+2] & 0x80) !=0){
((unsigned char*)buffer)[(i*3)+2] =
(unsigned char)(0x80 + ((char*)buffer)[(i*3)+2]);
} else {
((unsigned char*)buffer)[(i*3)+2] |= 0x80;
}
}
return(samplecount*3);
}
/*
This function writes the PDF header to output.
*/
tsize_t t2p_write_pdf_header(T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[16];
int buflen=0;
buflen = snprintf(buffer, sizeof(buffer), "%%PDF-%u.%u ",
t2p->pdf_majorversion&0xff,
t2p->pdf_minorversion&0xff);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t)"\n%\342\343\317\323\n", 7);
return(written);
}
/*
This function writes the beginning of a PDF object to output.
*/
tsize_t t2p_write_pdf_obj_start(uint32 number, TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)number);
check_snprintf_ret((T2P*)NULL, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen );
written += t2pWriteFile(output, (tdata_t) " 0 obj\n", 7);
return(written);
}
/*
This function writes the end of a PDF object to output.
*/
tsize_t t2p_write_pdf_obj_end(TIFF* output){
tsize_t written=0;
written += t2pWriteFile(output, (tdata_t) "endobj\n", 7);
return(written);
}
/*
This function writes a PDF name object to output.
*/
tsize_t t2p_write_pdf_name(unsigned char* name, TIFF* output){
tsize_t written=0;
uint32 i=0;
char buffer[64];
uint16 nextchar=0;
size_t namelen=0;
namelen = strlen((char *)name);
if (namelen>126) {
namelen=126;
}
written += t2pWriteFile(output, (tdata_t) "/", 1);
for (i=0;i<namelen;i++){
if ( ((unsigned char)name[i]) < 0x21){
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
nextchar=1;
}
if ( ((unsigned char)name[i]) > 0x7E){
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
nextchar=1;
}
if (nextchar==0){
switch (name[i]){
case 0x23:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x25:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x28:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x29:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x2F:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x3C:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x3E:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x5B:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x5D:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x7B:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x7D:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
default:
written += t2pWriteFile(output, (tdata_t) &name[i], 1);
}
}
nextchar=0;
}
written += t2pWriteFile(output, (tdata_t) " ", 1);
return(written);
}
/*
* This function writes a PDF string object to output.
*/
tsize_t t2p_write_pdf_string(char* pdfstr, TIFF* output)
{
tsize_t written = 0;
uint32 i = 0;
char buffer[64];
size_t len = 0;
len = strlen(pdfstr);
written += t2pWriteFile(output, (tdata_t) "(", 1);
for (i=0; i<len; i++) {
if((pdfstr[i]&0x80) || (pdfstr[i]==127) || (pdfstr[i]<32)){
snprintf(buffer, sizeof(buffer), "\\%.3o", ((unsigned char)pdfstr[i]));
written += t2pWriteFile(output, (tdata_t)buffer, 4);
} else {
switch (pdfstr[i]){
case 0x08:
written += t2pWriteFile(output, (tdata_t) "\\b", 2);
break;
case 0x09:
written += t2pWriteFile(output, (tdata_t) "\\t", 2);
break;
case 0x0A:
written += t2pWriteFile(output, (tdata_t) "\\n", 2);
break;
case 0x0C:
written += t2pWriteFile(output, (tdata_t) "\\f", 2);
break;
case 0x0D:
written += t2pWriteFile(output, (tdata_t) "\\r", 2);
break;
case 0x28:
written += t2pWriteFile(output, (tdata_t) "\\(", 2);
break;
case 0x29:
written += t2pWriteFile(output, (tdata_t) "\\)", 2);
break;
case 0x5C:
written += t2pWriteFile(output, (tdata_t) "\\\\", 2);
break;
default:
written += t2pWriteFile(output, (tdata_t) &pdfstr[i], 1);
}
}
}
written += t2pWriteFile(output, (tdata_t) ") ", 1);
return(written);
}
/*
This function writes a buffer of data to output.
*/
tsize_t t2p_write_pdf_stream(tdata_t buffer, tsize_t len, TIFF* output){
tsize_t written=0;
written += t2pWriteFile(output, (tdata_t) buffer, len);
return(written);
}
/*
This functions writes the beginning of a PDF stream to output.
*/
tsize_t t2p_write_pdf_stream_start(TIFF* output){
tsize_t written=0;
written += t2pWriteFile(output, (tdata_t) "stream\n", 7);
return(written);
}
/*
This function writes the end of a PDF stream to output.
*/
tsize_t t2p_write_pdf_stream_end(TIFF* output){
tsize_t written=0;
written += t2pWriteFile(output, (tdata_t) "\nendstream\n", 11);
return(written);
}
/*
This function writes a stream dictionary for a PDF stream to output.
*/
tsize_t t2p_write_pdf_stream_dict(tsize_t len, uint32 number, TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
written += t2pWriteFile(output, (tdata_t) "/Length ", 8);
if(len!=0){
written += t2p_write_pdf_stream_length(len, output);
} else {
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)number);
check_snprintf_ret((T2P*)NULL, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R \n", 6);
}
return(written);
}
/*
This functions writes the beginning of a PDF stream dictionary to output.
*/
tsize_t t2p_write_pdf_stream_dict_start(TIFF* output){
tsize_t written=0;
written += t2pWriteFile(output, (tdata_t) "<< \n", 4);
return(written);
}
/*
This function writes the end of a PDF stream dictionary to output.
*/
tsize_t t2p_write_pdf_stream_dict_end(TIFF* output){
tsize_t written=0;
written += t2pWriteFile(output, (tdata_t) " >>\n", 4);
return(written);
}
/*
This function writes a number to output.
*/
tsize_t t2p_write_pdf_stream_length(tsize_t len, TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)len);
check_snprintf_ret((T2P*)NULL, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
return(written);
}
/*
* This function writes the PDF Catalog structure to output.
*/
tsize_t t2p_write_pdf_catalog(T2P* t2p, TIFF* output)
{
tsize_t written = 0;
char buffer[32];
int buflen = 0;
written += t2pWriteFile(output,
(tdata_t)"<< \n/Type /Catalog \n/Pages ",
27);
buflen = snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_pages);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer,
TIFFmin((size_t)buflen, sizeof(buffer) - 1));
written += t2pWriteFile(output, (tdata_t) " 0 R \n", 6);
if(t2p->pdf_fitwindow){
written += t2pWriteFile(output,
(tdata_t) "/ViewerPreferences <</FitWindow true>>\n",
39);
}
written += t2pWriteFile(output, (tdata_t)">>\n", 3);
return(written);
}
/*
This function writes the PDF Info structure to output.
*/
tsize_t t2p_write_pdf_info(T2P* t2p, TIFF* input, TIFF* output)
{
tsize_t written = 0;
char* info;
char buffer[512];
if(t2p->pdf_datetime[0] == '\0')
t2p_pdf_tifftime(t2p, input);
if (strlen(t2p->pdf_datetime) > 0) {
written += t2pWriteFile(output, (tdata_t) "<< \n/CreationDate ", 18);
written += t2p_write_pdf_string(t2p->pdf_datetime, output);
written += t2pWriteFile(output, (tdata_t) "\n/ModDate ", 10);
written += t2p_write_pdf_string(t2p->pdf_datetime, output);
}
written += t2pWriteFile(output, (tdata_t) "\n/Producer ", 11);
snprintf(buffer, sizeof(buffer), "libtiff / tiff2pdf - %d", TIFFLIB_VERSION);
written += t2p_write_pdf_string(buffer, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
if (t2p->pdf_creator[0] != '\0') {
written += t2pWriteFile(output, (tdata_t) "/Creator ", 9);
written += t2p_write_pdf_string(t2p->pdf_creator, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
} else {
if (TIFFGetField(input, TIFFTAG_SOFTWARE, &info) != 0 && info) {
if(strlen(info) >= sizeof(t2p->pdf_creator))
info[sizeof(t2p->pdf_creator) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) "/Creator ", 9);
written += t2p_write_pdf_string(info, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
}
if (t2p->pdf_author[0] != '\0') {
written += t2pWriteFile(output, (tdata_t) "/Author ", 8);
written += t2p_write_pdf_string(t2p->pdf_author, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
} else {
if ((TIFFGetField(input, TIFFTAG_ARTIST, &info) != 0
|| TIFFGetField(input, TIFFTAG_COPYRIGHT, &info) != 0)
&& info) {
if (strlen(info) >= sizeof(t2p->pdf_author))
info[sizeof(t2p->pdf_author) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) "/Author ", 8);
written += t2p_write_pdf_string(info, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
}
if (t2p->pdf_title[0] != '\0') {
written += t2pWriteFile(output, (tdata_t) "/Title ", 7);
written += t2p_write_pdf_string(t2p->pdf_title, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
} else {
if (TIFFGetField(input, TIFFTAG_DOCUMENTNAME, &info) != 0){
if(strlen(info) > 511) {
info[512] = '\0';
}
written += t2pWriteFile(output, (tdata_t) "/Title ", 7);
written += t2p_write_pdf_string(info, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
}
if (t2p->pdf_subject[0] != '\0') {
written += t2pWriteFile(output, (tdata_t) "/Subject ", 9);
written += t2p_write_pdf_string(t2p->pdf_subject, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
} else {
if (TIFFGetField(input, TIFFTAG_IMAGEDESCRIPTION, &info) != 0 && info) {
if (strlen(info) >= sizeof(t2p->pdf_subject))
info[sizeof(t2p->pdf_subject) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) "/Subject ", 9);
written += t2p_write_pdf_string(info, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
}
if (t2p->pdf_keywords[0] != '\0') {
written += t2pWriteFile(output, (tdata_t) "/Keywords ", 10);
written += t2p_write_pdf_string(t2p->pdf_keywords, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
written += t2pWriteFile(output, (tdata_t) ">> \n", 4);
return(written);
}
/*
* This function fills a string of a T2P struct with the current time as a PDF
* date string, it is called by t2p_pdf_tifftime.
*/
void t2p_pdf_currenttime(T2P* t2p)
{
struct tm* currenttime;
time_t timenow;
if (time(&timenow) == (time_t) -1) {
TIFFError(TIFF2PDF_MODULE,
"Can't get the current time: %s", strerror(errno));
timenow = (time_t) 0;
}
currenttime = localtime(&timenow);
snprintf(t2p->pdf_datetime, sizeof(t2p->pdf_datetime),
"D:%.4d%.2d%.2d%.2d%.2d%.2d",
(currenttime->tm_year + 1900) % 65536,
(currenttime->tm_mon + 1) % 256,
(currenttime->tm_mday) % 256,
(currenttime->tm_hour) % 256,
(currenttime->tm_min) % 256,
(currenttime->tm_sec) % 256);
return;
}
/*
* This function fills a string of a T2P struct with the date and time of a
* TIFF file if it exists or the current time as a PDF date string.
*/
void t2p_pdf_tifftime(T2P* t2p, TIFF* input)
{
char* datetime;
if (TIFFGetField(input, TIFFTAG_DATETIME, &datetime) != 0
&& (strlen(datetime) >= 19) ){
t2p->pdf_datetime[0]='D';
t2p->pdf_datetime[1]=':';
t2p->pdf_datetime[2]=datetime[0];
t2p->pdf_datetime[3]=datetime[1];
t2p->pdf_datetime[4]=datetime[2];
t2p->pdf_datetime[5]=datetime[3];
t2p->pdf_datetime[6]=datetime[5];
t2p->pdf_datetime[7]=datetime[6];
t2p->pdf_datetime[8]=datetime[8];
t2p->pdf_datetime[9]=datetime[9];
t2p->pdf_datetime[10]=datetime[11];
t2p->pdf_datetime[11]=datetime[12];
t2p->pdf_datetime[12]=datetime[14];
t2p->pdf_datetime[13]=datetime[15];
t2p->pdf_datetime[14]=datetime[17];
t2p->pdf_datetime[15]=datetime[18];
t2p->pdf_datetime[16] = '\0';
} else {
t2p_pdf_currenttime(t2p);
}
return;
}
/*
* This function writes a PDF Pages Tree structure to output.
*/
tsize_t t2p_write_pdf_pages(T2P* t2p, TIFF* output)
{
tsize_t written=0;
tdir_t i=0;
char buffer[32];
int buflen=0;
int page=0;
written += t2pWriteFile(output,
(tdata_t) "<< \n/Type /Pages \n/Kids [ ", 26);
page = t2p->pdf_pages+1;
for (i=0;i<t2p->tiff_pagecount;i++){
buflen=snprintf(buffer, sizeof(buffer), "%d", page);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
if ( ((i+1)%8)==0 ) {
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
page +=3;
page += t2p->tiff_pages[i].page_extra;
if(t2p->tiff_pages[i].page_tilecount>0){
page += (2 * t2p->tiff_pages[i].page_tilecount);
} else {
page +=2;
}
}
written += t2pWriteFile(output, (tdata_t) "] \n/Count ", 10);
buflen=snprintf(buffer, sizeof(buffer), "%d", t2p->tiff_pagecount);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " \n>> \n", 6);
return(written);
}
/*
This function writes a PDF Page structure to output.
*/
tsize_t t2p_write_pdf_page(uint32 object, T2P* t2p, TIFF* output){
unsigned int i=0;
tsize_t written=0;
char buffer[256];
int buflen=0;
written += t2pWriteFile(output, (tdata_t) "<<\n/Type /Page \n/Parent ", 24);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_pages);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R \n", 6);
written += t2pWriteFile(output, (tdata_t) "/MediaBox [", 11);
buflen=snprintf(buffer, sizeof(buffer), "%.4f",t2p->pdf_mediabox.x1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " ", 1);
buflen=snprintf(buffer, sizeof(buffer), "%.4f",t2p->pdf_mediabox.y1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " ", 1);
buflen=snprintf(buffer, sizeof(buffer), "%.4f",t2p->pdf_mediabox.x2);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " ", 1);
buflen=snprintf(buffer, sizeof(buffer), "%.4f",t2p->pdf_mediabox.y2);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "] \n", 3);
written += t2pWriteFile(output, (tdata_t) "/Contents ", 10);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)(object + 1));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R \n", 6);
written += t2pWriteFile(output, (tdata_t) "/Resources << \n", 15);
if( t2p->tiff_tiles[t2p->pdf_page].tiles_tilecount != 0 ){
written += t2pWriteFile(output, (tdata_t) "/XObject <<\n", 12);
for(i=0;i<t2p->tiff_tiles[t2p->pdf_page].tiles_tilecount;i++){
written += t2pWriteFile(output, (tdata_t) "/Im", 3);
buflen = snprintf(buffer, sizeof(buffer), "%u", t2p->pdf_page+1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "_", 1);
buflen = snprintf(buffer, sizeof(buffer), "%u", i+1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " ", 1);
buflen = snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)(object+3+(2*i)+t2p->tiff_pages[t2p->pdf_page].page_extra));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
if(i%4==3){
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
}
written += t2pWriteFile(output, (tdata_t) ">>\n", 3);
} else {
written += t2pWriteFile(output, (tdata_t) "/XObject <<\n", 12);
written += t2pWriteFile(output, (tdata_t) "/Im", 3);
buflen = snprintf(buffer, sizeof(buffer), "%u", t2p->pdf_page+1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " ", 1);
buflen = snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)(object+3+(2*i)+t2p->tiff_pages[t2p->pdf_page].page_extra));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
written += t2pWriteFile(output, (tdata_t) ">>\n", 3);
}
if(t2p->tiff_transferfunctioncount != 0) {
written += t2pWriteFile(output, (tdata_t) "/ExtGState <<", 13);
t2pWriteFile(output, (tdata_t) "/GS1 ", 5);
buflen = snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)(object + 3));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
written += t2pWriteFile(output, (tdata_t) ">> \n", 4);
}
written += t2pWriteFile(output, (tdata_t) "/ProcSet [ ", 11);
if(t2p->pdf_colorspace & T2P_CS_BILEVEL
|| t2p->pdf_colorspace & T2P_CS_GRAY
){
written += t2pWriteFile(output, (tdata_t) "/ImageB ", 8);
} else {
written += t2pWriteFile(output, (tdata_t) "/ImageC ", 8);
if(t2p->pdf_colorspace & T2P_CS_PALETTE){
written += t2pWriteFile(output, (tdata_t) "/ImageI ", 8);
}
}
written += t2pWriteFile(output, (tdata_t) "]\n>>\n>>\n", 8);
return(written);
}
/*
This function composes the page size and image and tile locations on a page.
*/
void t2p_compose_pdf_page(T2P* t2p){
uint32 i=0;
uint32 i2=0;
T2P_TILE* tiles=NULL;
T2P_BOX* boxp=NULL;
uint32 tilecountx=0;
uint32 tilecounty=0;
uint32 tilewidth=0;
uint32 tilelength=0;
int istiled=0;
float f=0;
float width_ratio=0;
float length_ratio=0;
t2p->pdf_xres = t2p->tiff_xres;
t2p->pdf_yres = t2p->tiff_yres;
if(t2p->pdf_overrideres) {
t2p->pdf_xres = t2p->pdf_defaultxres;
t2p->pdf_yres = t2p->pdf_defaultyres;
}
if(t2p->pdf_xres == 0.0)
t2p->pdf_xres = t2p->pdf_defaultxres;
if(t2p->pdf_yres == 0.0)
t2p->pdf_yres = t2p->pdf_defaultyres;
if (t2p->pdf_image_fillpage) {
width_ratio = t2p->pdf_defaultpagewidth/t2p->tiff_width;
length_ratio = t2p->pdf_defaultpagelength/t2p->tiff_length;
if (width_ratio < length_ratio ) {
t2p->pdf_imagewidth = t2p->pdf_defaultpagewidth;
t2p->pdf_imagelength = t2p->tiff_length * width_ratio;
} else {
t2p->pdf_imagewidth = t2p->tiff_width * length_ratio;
t2p->pdf_imagelength = t2p->pdf_defaultpagelength;
}
} else if (t2p->tiff_resunit != RESUNIT_CENTIMETER /* RESUNIT_NONE and */
&& t2p->tiff_resunit != RESUNIT_INCH) { /* other cases */
t2p->pdf_imagewidth = ((float)(t2p->tiff_width))/t2p->pdf_xres;
t2p->pdf_imagelength = ((float)(t2p->tiff_length))/t2p->pdf_yres;
} else {
t2p->pdf_imagewidth =
((float)(t2p->tiff_width))*PS_UNIT_SIZE/t2p->pdf_xres;
t2p->pdf_imagelength =
((float)(t2p->tiff_length))*PS_UNIT_SIZE/t2p->pdf_yres;
}
if(t2p->pdf_overridepagesize != 0) {
t2p->pdf_pagewidth = t2p->pdf_defaultpagewidth;
t2p->pdf_pagelength = t2p->pdf_defaultpagelength;
} else {
t2p->pdf_pagewidth = t2p->pdf_imagewidth;
t2p->pdf_pagelength = t2p->pdf_imagelength;
}
t2p->pdf_mediabox.x1=0.0;
t2p->pdf_mediabox.y1=0.0;
t2p->pdf_mediabox.x2=t2p->pdf_pagewidth;
t2p->pdf_mediabox.y2=t2p->pdf_pagelength;
t2p->pdf_imagebox.x1=0.0;
t2p->pdf_imagebox.y1=0.0;
t2p->pdf_imagebox.x2=t2p->pdf_imagewidth;
t2p->pdf_imagebox.y2=t2p->pdf_imagelength;
if(t2p->pdf_overridepagesize!=0){
t2p->pdf_imagebox.x1+=((t2p->pdf_pagewidth-t2p->pdf_imagewidth)/2.0F);
t2p->pdf_imagebox.y1+=((t2p->pdf_pagelength-t2p->pdf_imagelength)/2.0F);
t2p->pdf_imagebox.x2+=((t2p->pdf_pagewidth-t2p->pdf_imagewidth)/2.0F);
t2p->pdf_imagebox.y2+=((t2p->pdf_pagelength-t2p->pdf_imagelength)/2.0F);
}
if(t2p->tiff_orientation > 4){
f=t2p->pdf_mediabox.x2;
t2p->pdf_mediabox.x2=t2p->pdf_mediabox.y2;
t2p->pdf_mediabox.y2=f;
}
istiled=((t2p->tiff_tiles[t2p->pdf_page]).tiles_tilecount==0) ? 0 : 1;
if(istiled==0){
t2p_compose_pdf_page_orient(&(t2p->pdf_imagebox), t2p->tiff_orientation);
return;
} else {
tilewidth=(t2p->tiff_tiles[t2p->pdf_page]).tiles_tilewidth;
tilelength=(t2p->tiff_tiles[t2p->pdf_page]).tiles_tilelength;
if( tilewidth > INT_MAX ||
tilelength > INT_MAX ||
t2p->tiff_width > INT_MAX - tilewidth ||
t2p->tiff_length > INT_MAX - tilelength )
{
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
tilecountx=(t2p->tiff_width +
tilewidth -1)/
tilewidth;
(t2p->tiff_tiles[t2p->pdf_page]).tiles_tilecountx=tilecountx;
tilecounty=(t2p->tiff_length +
tilelength -1)/
tilelength;
(t2p->tiff_tiles[t2p->pdf_page]).tiles_tilecounty=tilecounty;
(t2p->tiff_tiles[t2p->pdf_page]).tiles_edgetilewidth=
t2p->tiff_width % tilewidth;
(t2p->tiff_tiles[t2p->pdf_page]).tiles_edgetilelength=
t2p->tiff_length % tilelength;
tiles=(t2p->tiff_tiles[t2p->pdf_page]).tiles_tiles;
for(i2=0;i2<tilecounty-1;i2++){
for(i=0;i<tilecountx-1;i++){
boxp=&(tiles[i2*tilecountx+i].tile_box);
boxp->x1 =
t2p->pdf_imagebox.x1
+ ((float)(t2p->pdf_imagewidth * i * tilewidth)
/ (float)t2p->tiff_width);
boxp->x2 =
t2p->pdf_imagebox.x1
+ ((float)(t2p->pdf_imagewidth * (i+1) * tilewidth)
/ (float)t2p->tiff_width);
boxp->y1 =
t2p->pdf_imagebox.y2
- ((float)(t2p->pdf_imagelength * (i2+1) * tilelength)
/ (float)t2p->tiff_length);
boxp->y2 =
t2p->pdf_imagebox.y2
- ((float)(t2p->pdf_imagelength * i2 * tilelength)
/ (float)t2p->tiff_length);
}
boxp=&(tiles[i2*tilecountx+i].tile_box);
boxp->x1 =
t2p->pdf_imagebox.x1
+ ((float)(t2p->pdf_imagewidth * i * tilewidth)
/ (float)t2p->tiff_width);
boxp->x2 = t2p->pdf_imagebox.x2;
boxp->y1 =
t2p->pdf_imagebox.y2
- ((float)(t2p->pdf_imagelength * (i2+1) * tilelength)
/ (float)t2p->tiff_length);
boxp->y2 =
t2p->pdf_imagebox.y2
- ((float)(t2p->pdf_imagelength * i2 * tilelength)
/ (float)t2p->tiff_length);
}
for(i=0;i<tilecountx-1;i++){
boxp=&(tiles[i2*tilecountx+i].tile_box);
boxp->x1 =
t2p->pdf_imagebox.x1
+ ((float)(t2p->pdf_imagewidth * i * tilewidth)
/ (float)t2p->tiff_width);
boxp->x2 =
t2p->pdf_imagebox.x1
+ ((float)(t2p->pdf_imagewidth * (i+1) * tilewidth)
/ (float)t2p->tiff_width);
boxp->y1 = t2p->pdf_imagebox.y1;
boxp->y2 =
t2p->pdf_imagebox.y2
- ((float)(t2p->pdf_imagelength * i2 * tilelength)
/ (float)t2p->tiff_length);
}
boxp=&(tiles[i2*tilecountx+i].tile_box);
boxp->x1 =
t2p->pdf_imagebox.x1
+ ((float)(t2p->pdf_imagewidth * i * tilewidth)
/ (float)t2p->tiff_width);
boxp->x2 = t2p->pdf_imagebox.x2;
boxp->y1 = t2p->pdf_imagebox.y1;
boxp->y2 =
t2p->pdf_imagebox.y2
- ((float)(t2p->pdf_imagelength * i2 * tilelength)
/ (float)t2p->tiff_length);
}
if(t2p->tiff_orientation==0 || t2p->tiff_orientation==1){
for(i=0;i<(t2p->tiff_tiles[t2p->pdf_page]).tiles_tilecount;i++){
t2p_compose_pdf_page_orient( &(tiles[i].tile_box) , 0);
}
return;
}
for(i=0;i<(t2p->tiff_tiles[t2p->pdf_page]).tiles_tilecount;i++){
boxp=&(tiles[i].tile_box);
boxp->x1 -= t2p->pdf_imagebox.x1;
boxp->x2 -= t2p->pdf_imagebox.x1;
boxp->y1 -= t2p->pdf_imagebox.y1;
boxp->y2 -= t2p->pdf_imagebox.y1;
if(t2p->tiff_orientation==2 || t2p->tiff_orientation==3){
boxp->x1 = t2p->pdf_imagebox.x2 - t2p->pdf_imagebox.x1 - boxp->x1;
boxp->x2 = t2p->pdf_imagebox.x2 - t2p->pdf_imagebox.x1 - boxp->x2;
}
if(t2p->tiff_orientation==3 || t2p->tiff_orientation==4){
boxp->y1 = t2p->pdf_imagebox.y2 - t2p->pdf_imagebox.y1 - boxp->y1;
boxp->y2 = t2p->pdf_imagebox.y2 - t2p->pdf_imagebox.y1 - boxp->y2;
}
if(t2p->tiff_orientation==8 || t2p->tiff_orientation==5){
boxp->y1 = t2p->pdf_imagebox.y2 - t2p->pdf_imagebox.y1 - boxp->y1;
boxp->y2 = t2p->pdf_imagebox.y2 - t2p->pdf_imagebox.y1 - boxp->y2;
}
if(t2p->tiff_orientation==5 || t2p->tiff_orientation==6){
boxp->x1 = t2p->pdf_imagebox.x2 - t2p->pdf_imagebox.x1 - boxp->x1;
boxp->x2 = t2p->pdf_imagebox.x2 - t2p->pdf_imagebox.x1 - boxp->x2;
}
if(t2p->tiff_orientation > 4){
f=boxp->x1;
boxp->x1 = boxp->y1;
boxp->y1 = f;
f=boxp->x2;
boxp->x2 = boxp->y2;
boxp->y2 = f;
t2p_compose_pdf_page_orient_flip(boxp, t2p->tiff_orientation);
} else {
t2p_compose_pdf_page_orient(boxp, t2p->tiff_orientation);
}
}
return;
}
void t2p_compose_pdf_page_orient(T2P_BOX* boxp, uint16 orientation){
float m1[9];
float f=0.0;
if( boxp->x1 > boxp->x2){
f=boxp->x1;
boxp->x1=boxp->x2;
boxp->x2 = f;
}
if( boxp->y1 > boxp->y2){
f=boxp->y1;
boxp->y1=boxp->y2;
boxp->y2 = f;
}
boxp->mat[0]=m1[0]=boxp->x2-boxp->x1;
boxp->mat[1]=m1[1]=0.0;
boxp->mat[2]=m1[2]=0.0;
boxp->mat[3]=m1[3]=0.0;
boxp->mat[4]=m1[4]=boxp->y2-boxp->y1;
boxp->mat[5]=m1[5]=0.0;
boxp->mat[6]=m1[6]=boxp->x1;
boxp->mat[7]=m1[7]=boxp->y1;
boxp->mat[8]=m1[8]=1.0;
switch(orientation){
case 0:
case 1:
break;
case 2:
boxp->mat[0]=0.0F-m1[0];
boxp->mat[6]+=m1[0];
break;
case 3:
boxp->mat[0]=0.0F-m1[0];
boxp->mat[4]=0.0F-m1[4];
boxp->mat[6]+=m1[0];
boxp->mat[7]+=m1[4];
break;
case 4:
boxp->mat[4]=0.0F-m1[4];
boxp->mat[7]+=m1[4];
break;
case 5:
boxp->mat[0]=0.0F;
boxp->mat[1]=0.0F-m1[0];
boxp->mat[3]=0.0F-m1[4];
boxp->mat[4]=0.0F;
boxp->mat[6]+=m1[4];
boxp->mat[7]+=m1[0];
break;
case 6:
boxp->mat[0]=0.0F;
boxp->mat[1]=0.0F-m1[0];
boxp->mat[3]=m1[4];
boxp->mat[4]=0.0F;
boxp->mat[7]+=m1[0];
break;
case 7:
boxp->mat[0]=0.0F;
boxp->mat[1]=m1[0];
boxp->mat[3]=m1[4];
boxp->mat[4]=0.0F;
break;
case 8:
boxp->mat[0]=0.0F;
boxp->mat[1]=m1[0];
boxp->mat[3]=0.0F-m1[4];
boxp->mat[4]=0.0F;
boxp->mat[6]+=m1[4];
break;
}
return;
}
void t2p_compose_pdf_page_orient_flip(T2P_BOX* boxp, uint16 orientation){
float m1[9];
float f=0.0;
if( boxp->x1 > boxp->x2){
f=boxp->x1;
boxp->x1=boxp->x2;
boxp->x2 = f;
}
if( boxp->y1 > boxp->y2){
f=boxp->y1;
boxp->y1=boxp->y2;
boxp->y2 = f;
}
boxp->mat[0]=m1[0]=boxp->x2-boxp->x1;
boxp->mat[1]=m1[1]=0.0F;
boxp->mat[2]=m1[2]=0.0F;
boxp->mat[3]=m1[3]=0.0F;
boxp->mat[4]=m1[4]=boxp->y2-boxp->y1;
boxp->mat[5]=m1[5]=0.0F;
boxp->mat[6]=m1[6]=boxp->x1;
boxp->mat[7]=m1[7]=boxp->y1;
boxp->mat[8]=m1[8]=1.0F;
switch(orientation){
case 5:
boxp->mat[0]=0.0F;
boxp->mat[1]=0.0F-m1[4];
boxp->mat[3]=0.0F-m1[0];
boxp->mat[4]=0.0F;
boxp->mat[6]+=m1[0];
boxp->mat[7]+=m1[4];
break;
case 6:
boxp->mat[0]=0.0F;
boxp->mat[1]=0.0F-m1[4];
boxp->mat[3]=m1[0];
boxp->mat[4]=0.0F;
boxp->mat[7]+=m1[4];
break;
case 7:
boxp->mat[0]=0.0F;
boxp->mat[1]=m1[4];
boxp->mat[3]=m1[0];
boxp->mat[4]=0.0F;
break;
case 8:
boxp->mat[0]=0.0F;
boxp->mat[1]=m1[4];
boxp->mat[3]=0.0F-m1[0];
boxp->mat[4]=0.0F;
boxp->mat[6]+=m1[0];
break;
}
return;
}
/*
This function writes a PDF Contents stream to output.
*/
tsize_t t2p_write_pdf_page_content_stream(T2P* t2p, TIFF* output){
tsize_t written=0;
ttile_t i=0;
char buffer[512];
int buflen=0;
T2P_BOX box;
if(t2p->tiff_tiles[t2p->pdf_page].tiles_tilecount>0){
for(i=0;i<t2p->tiff_tiles[t2p->pdf_page].tiles_tilecount; i++){
box=t2p->tiff_tiles[t2p->pdf_page].tiles_tiles[i].tile_box;
buflen=snprintf(buffer, sizeof(buffer),
"q %s %.4f %.4f %.4f %.4f %.4f %.4f cm /Im%d_%ld Do Q\n",
t2p->tiff_transferfunctioncount?"/GS1 gs ":"",
box.mat[0],
box.mat[1],
box.mat[3],
box.mat[4],
box.mat[6],
box.mat[7],
t2p->pdf_page + 1,
(long)(i + 1));
check_snprintf_ret(t2p, buflen, buffer);
written += t2p_write_pdf_stream(buffer, buflen, output);
}
} else {
box=t2p->pdf_imagebox;
buflen=snprintf(buffer, sizeof(buffer),
"q %s %.4f %.4f %.4f %.4f %.4f %.4f cm /Im%d Do Q\n",
t2p->tiff_transferfunctioncount?"/GS1 gs ":"",
box.mat[0],
box.mat[1],
box.mat[3],
box.mat[4],
box.mat[6],
box.mat[7],
t2p->pdf_page+1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2p_write_pdf_stream(buffer, buflen, output);
}
return(written);
}
/*
This function writes a PDF Image XObject stream dictionary to output.
*/
tsize_t t2p_write_pdf_xobject_stream_dict(ttile_t tile,
T2P* t2p,
TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
written += t2p_write_pdf_stream_dict(0, t2p->pdf_xrefcount+1, output);
written += t2pWriteFile(output,
(tdata_t) "/Type /XObject \n/Subtype /Image \n/Name /Im",
42);
buflen=snprintf(buffer, sizeof(buffer), "%u", t2p->pdf_page+1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
if(tile != 0){
written += t2pWriteFile(output, (tdata_t) "_", 1);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)tile);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
}
written += t2pWriteFile(output, (tdata_t) "\n/Width ", 8);
if(tile==0){
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->tiff_width);
} else {
if(t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile-1)!=0){
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilewidth);
} else {
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth);
}
}
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n/Height ", 9);
if(tile==0){
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->tiff_length);
} else {
if(t2p_tile_is_bottom_edge(t2p->tiff_tiles[t2p->pdf_page], tile-1)!=0){
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilelength);
} else {
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
}
}
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n/BitsPerComponent ", 19);
buflen=snprintf(buffer, sizeof(buffer), "%u", t2p->tiff_bitspersample);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n/ColorSpace ", 13);
written += t2p_write_pdf_xobject_cs(t2p, output);
if (t2p->pdf_image_interpolate)
written += t2pWriteFile(output,
(tdata_t) "\n/Interpolate true", 18);
if( (t2p->pdf_switchdecode != 0)
#ifdef CCITT_SUPPORT
&& ! (t2p->pdf_colorspace & T2P_CS_BILEVEL
&& t2p->pdf_compression == T2P_COMPRESS_G4)
#endif
){
written += t2p_write_pdf_xobject_decode(t2p, output);
}
written += t2p_write_pdf_xobject_stream_filter(tile, t2p, output);
return(written);
}
/*
* This function writes a PDF Image XObject Colorspace name to output.
*/
tsize_t t2p_write_pdf_xobject_cs(T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[128];
int buflen=0;
float X_W=1.0;
float Y_W=1.0;
float Z_W=1.0;
if( (t2p->pdf_colorspace & T2P_CS_ICCBASED) != 0){
written += t2p_write_pdf_xobject_icccs(t2p, output);
return(written);
}
if( (t2p->pdf_colorspace & T2P_CS_PALETTE) != 0){
written += t2pWriteFile(output, (tdata_t) "[ /Indexed ", 11);
t2p->pdf_colorspace ^= T2P_CS_PALETTE;
written += t2p_write_pdf_xobject_cs(t2p, output);
t2p->pdf_colorspace |= T2P_CS_PALETTE;
buflen=snprintf(buffer, sizeof(buffer), "%u", (0x0001 << t2p->tiff_bitspersample)-1 );
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " ", 1);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_palettecs );
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ]\n", 7);
return(written);
}
if(t2p->pdf_colorspace & T2P_CS_BILEVEL){
written += t2pWriteFile(output, (tdata_t) "/DeviceGray \n", 13);
}
if(t2p->pdf_colorspace & T2P_CS_GRAY){
if(t2p->pdf_colorspace & T2P_CS_CALGRAY){
written += t2p_write_pdf_xobject_calcs(t2p, output);
} else {
written += t2pWriteFile(output, (tdata_t) "/DeviceGray \n", 13);
}
}
if(t2p->pdf_colorspace & T2P_CS_RGB){
if(t2p->pdf_colorspace & T2P_CS_CALRGB){
written += t2p_write_pdf_xobject_calcs(t2p, output);
} else {
written += t2pWriteFile(output, (tdata_t) "/DeviceRGB \n", 12);
}
}
if(t2p->pdf_colorspace & T2P_CS_CMYK){
written += t2pWriteFile(output, (tdata_t) "/DeviceCMYK \n", 13);
}
if(t2p->pdf_colorspace & T2P_CS_LAB){
written += t2pWriteFile(output, (tdata_t) "[/Lab << \n", 10);
written += t2pWriteFile(output, (tdata_t) "/WhitePoint ", 12);
X_W = t2p->tiff_whitechromaticities[0];
Y_W = t2p->tiff_whitechromaticities[1];
Z_W = 1.0F - (X_W + Y_W);
X_W /= Y_W;
Z_W /= Y_W;
Y_W = 1.0F;
buflen=snprintf(buffer, sizeof(buffer), "[%.4f %.4f %.4f] \n", X_W, Y_W, Z_W);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "/Range ", 7);
buflen=snprintf(buffer, sizeof(buffer), "[%d %d %d %d] \n",
t2p->pdf_labrange[0],
t2p->pdf_labrange[1],
t2p->pdf_labrange[2],
t2p->pdf_labrange[3]);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) ">>] \n", 5);
}
return(written);
}
tsize_t t2p_write_pdf_transfer(T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
written += t2pWriteFile(output, (tdata_t) "<< /Type /ExtGState \n/TR ", 25);
if(t2p->tiff_transferfunctioncount == 1){
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)(t2p->pdf_xrefcount + 1));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
} else {
written += t2pWriteFile(output, (tdata_t) "[ ", 2);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)(t2p->pdf_xrefcount + 1));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)(t2p->pdf_xrefcount + 2));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)(t2p->pdf_xrefcount + 3));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
written += t2pWriteFile(output, (tdata_t) "/Identity ] ", 12);
}
written += t2pWriteFile(output, (tdata_t) " >> \n", 5);
return(written);
}
tsize_t t2p_write_pdf_transfer_dict(T2P* t2p, TIFF* output, uint16 i){
tsize_t written=0;
char buffer[32];
int buflen=0;
(void)i; /* XXX */
written += t2pWriteFile(output, (tdata_t) "/FunctionType 0 \n", 17);
written += t2pWriteFile(output, (tdata_t) "/Domain [0.0 1.0] \n", 19);
written += t2pWriteFile(output, (tdata_t) "/Range [0.0 1.0] \n", 18);
buflen=snprintf(buffer, sizeof(buffer), "/Size [%u] \n", (1<<t2p->tiff_bitspersample));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "/BitsPerSample 16 \n", 19);
written += t2p_write_pdf_stream_dict(((tsize_t)1)<<(t2p->tiff_bitspersample+1), 0, output);
return(written);
}
tsize_t t2p_write_pdf_transfer_stream(T2P* t2p, TIFF* output, uint16 i){
tsize_t written=0;
written += t2p_write_pdf_stream(
t2p->tiff_transferfunction[i],
(((tsize_t)1)<<(t2p->tiff_bitspersample+1)),
output);
return(written);
}
/*
This function writes a PDF Image XObject Colorspace array to output.
*/
tsize_t t2p_write_pdf_xobject_calcs(T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[256];
int buflen=0;
float X_W=0.0;
float Y_W=0.0;
float Z_W=0.0;
float X_R=0.0;
float Y_R=0.0;
float Z_R=0.0;
float X_G=0.0;
float Y_G=0.0;
float Z_G=0.0;
float X_B=0.0;
float Y_B=0.0;
float Z_B=0.0;
float x_w=0.0;
float y_w=0.0;
float z_w=0.0;
float x_r=0.0;
float y_r=0.0;
float x_g=0.0;
float y_g=0.0;
float x_b=0.0;
float y_b=0.0;
float R=1.0;
float G=1.0;
float B=1.0;
written += t2pWriteFile(output, (tdata_t) "[", 1);
if(t2p->pdf_colorspace & T2P_CS_CALGRAY){
written += t2pWriteFile(output, (tdata_t) "/CalGray ", 9);
X_W = t2p->tiff_whitechromaticities[0];
Y_W = t2p->tiff_whitechromaticities[1];
Z_W = 1.0F - (X_W + Y_W);
X_W /= Y_W;
Z_W /= Y_W;
Y_W = 1.0F;
}
if(t2p->pdf_colorspace & T2P_CS_CALRGB){
written += t2pWriteFile(output, (tdata_t) "/CalRGB ", 8);
x_w = t2p->tiff_whitechromaticities[0];
y_w = t2p->tiff_whitechromaticities[1];
x_r = t2p->tiff_primarychromaticities[0];
y_r = t2p->tiff_primarychromaticities[1];
x_g = t2p->tiff_primarychromaticities[2];
y_g = t2p->tiff_primarychromaticities[3];
x_b = t2p->tiff_primarychromaticities[4];
y_b = t2p->tiff_primarychromaticities[5];
z_w = y_w * ((x_g - x_b)*y_r - (x_r-x_b)*y_g + (x_r-x_g)*y_b);
Y_R = (y_r/R) * ((x_g-x_b)*y_w - (x_w-x_b)*y_g + (x_w-x_g)*y_b) / z_w;
X_R = Y_R * x_r / y_r;
Z_R = Y_R * (((1-x_r)/y_r)-1);
Y_G = ((0.0F-(y_g))/G) * ((x_r-x_b)*y_w - (x_w-x_b)*y_r + (x_w-x_r)*y_b) / z_w;
X_G = Y_G * x_g / y_g;
Z_G = Y_G * (((1-x_g)/y_g)-1);
Y_B = (y_b/B) * ((x_r-x_g)*y_w - (x_w-x_g)*y_r + (x_w-x_r)*y_g) / z_w;
X_B = Y_B * x_b / y_b;
Z_B = Y_B * (((1-x_b)/y_b)-1);
X_W = (X_R * R) + (X_G * G) + (X_B * B);
Y_W = (Y_R * R) + (Y_G * G) + (Y_B * B);
Z_W = (Z_R * R) + (Z_G * G) + (Z_B * B);
X_W /= Y_W;
Z_W /= Y_W;
Y_W = 1.0;
}
written += t2pWriteFile(output, (tdata_t) "<< \n", 4);
if(t2p->pdf_colorspace & T2P_CS_CALGRAY){
written += t2pWriteFile(output, (tdata_t) "/WhitePoint ", 12);
buflen=snprintf(buffer, sizeof(buffer), "[%.4f %.4f %.4f] \n", X_W, Y_W, Z_W);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "/Gamma 2.2 \n", 12);
}
if(t2p->pdf_colorspace & T2P_CS_CALRGB){
written += t2pWriteFile(output, (tdata_t) "/WhitePoint ", 12);
buflen=snprintf(buffer, sizeof(buffer), "[%.4f %.4f %.4f] \n", X_W, Y_W, Z_W);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "/Matrix ", 8);
buflen=snprintf(buffer, sizeof(buffer), "[%.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f] \n",
X_R, Y_R, Z_R,
X_G, Y_G, Z_G,
X_B, Y_B, Z_B);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "/Gamma [2.2 2.2 2.2] \n", 22);
}
written += t2pWriteFile(output, (tdata_t) ">>] \n", 5);
return(written);
}
/*
This function writes a PDF Image XObject Colorspace array to output.
*/
tsize_t t2p_write_pdf_xobject_icccs(T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
written += t2pWriteFile(output, (tdata_t) "[/ICCBased ", 11);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_icccs);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R] \n", 7);
return(written);
}
tsize_t t2p_write_pdf_xobject_icccs_dict(T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
written += t2pWriteFile(output, (tdata_t) "/N ", 3);
buflen=snprintf(buffer, sizeof(buffer), "%u \n", t2p->tiff_samplesperpixel);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "/Alternate ", 11);
t2p->pdf_colorspace ^= T2P_CS_ICCBASED;
written += t2p_write_pdf_xobject_cs(t2p, output);
t2p->pdf_colorspace |= T2P_CS_ICCBASED;
written += t2p_write_pdf_stream_dict(t2p->tiff_iccprofilelength, 0, output);
return(written);
}
tsize_t t2p_write_pdf_xobject_icccs_stream(T2P* t2p, TIFF* output){
tsize_t written=0;
written += t2p_write_pdf_stream(
(tdata_t) t2p->tiff_iccprofile,
(tsize_t) t2p->tiff_iccprofilelength,
output);
return(written);
}
/*
This function writes a palette stream for an indexed color space to output.
*/
tsize_t t2p_write_pdf_xobject_palettecs_stream(T2P* t2p, TIFF* output){
tsize_t written=0;
written += t2p_write_pdf_stream(
(tdata_t) t2p->pdf_palette,
(tsize_t) t2p->pdf_palettesize,
output);
return(written);
}
/*
This function writes a PDF Image XObject Decode array to output.
*/
tsize_t t2p_write_pdf_xobject_decode(T2P* t2p, TIFF* output){
tsize_t written=0;
int i=0;
written += t2pWriteFile(output, (tdata_t) "/Decode [ ", 10);
for (i=0;i<t2p->tiff_samplesperpixel;i++){
written += t2pWriteFile(output, (tdata_t) "1 0 ", 4);
}
written += t2pWriteFile(output, (tdata_t) "]\n", 2);
return(written);
}
/*
This function writes a PDF Image XObject stream filter name and parameters to
output.
*/
tsize_t t2p_write_pdf_xobject_stream_filter(ttile_t tile, T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
if(t2p->pdf_compression==T2P_COMPRESS_NONE){
return(written);
}
written += t2pWriteFile(output, (tdata_t) "/Filter ", 8);
switch(t2p->pdf_compression){
#ifdef CCITT_SUPPORT
case T2P_COMPRESS_G4:
written += t2pWriteFile(output, (tdata_t) "/CCITTFaxDecode ", 16);
written += t2pWriteFile(output, (tdata_t) "/DecodeParms ", 13);
written += t2pWriteFile(output, (tdata_t) "<< /K -1 ", 9);
if(tile==0){
written += t2pWriteFile(output, (tdata_t) "/Columns ", 9);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_width);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " /Rows ", 7);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_length);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
} else {
if(t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile-1)==0){
written += t2pWriteFile(output, (tdata_t) "/Columns ", 9);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
} else {
written += t2pWriteFile(output, (tdata_t) "/Columns ", 9);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilewidth);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
}
if(t2p_tile_is_bottom_edge(t2p->tiff_tiles[t2p->pdf_page], tile-1)==0){
written += t2pWriteFile(output, (tdata_t) " /Rows ", 7);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
} else {
written += t2pWriteFile(output, (tdata_t) " /Rows ", 7);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilelength);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
}
}
if(t2p->pdf_switchdecode == 0){
written += t2pWriteFile(output, (tdata_t) " /BlackIs1 true ", 16);
}
written += t2pWriteFile(output, (tdata_t) ">>\n", 3);
break;
#endif
#ifdef JPEG_SUPPORT
case T2P_COMPRESS_JPEG:
written += t2pWriteFile(output, (tdata_t) "/DCTDecode ", 11);
if(t2p->tiff_photometric != PHOTOMETRIC_YCBCR) {
written += t2pWriteFile(output, (tdata_t) "/DecodeParms ", 13);
written += t2pWriteFile(output, (tdata_t) "<< /ColorTransform 1 >>\n", 24);
}
break;
#endif
#ifdef ZIP_SUPPORT
case T2P_COMPRESS_ZIP:
written += t2pWriteFile(output, (tdata_t) "/FlateDecode ", 13);
if(t2p->pdf_compressionquality%100){
written += t2pWriteFile(output, (tdata_t) "/DecodeParms ", 13);
written += t2pWriteFile(output, (tdata_t) "<< /Predictor ", 14);
buflen=snprintf(buffer, sizeof(buffer), "%u", t2p->pdf_compressionquality%100);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " /Columns ", 10);
buflen = snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_width);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " /Colors ", 9);
buflen=snprintf(buffer, sizeof(buffer), "%u", t2p->tiff_samplesperpixel);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " /BitsPerComponent ", 19);
buflen=snprintf(buffer, sizeof(buffer), "%u", t2p->tiff_bitspersample);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) ">>\n", 3);
}
break;
#endif
default:
break;
}
return(written);
}
/*
This function writes a PDF xref table to output.
*/
tsize_t t2p_write_pdf_xreftable(T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[64];
int buflen=0;
uint32 i=0;
written += t2pWriteFile(output, (tdata_t) "xref\n0 ", 7);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)(t2p->pdf_xrefcount + 1));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " \n0000000000 65535 f \n", 22);
for (i=0;i<t2p->pdf_xrefcount;i++){
snprintf(buffer, sizeof(buffer), "%.10lu 00000 n \n",
(unsigned long)t2p->pdf_xrefoffsets[i]);
written += t2pWriteFile(output, (tdata_t) buffer, 20);
}
return(written);
}
/*
* This function writes a PDF trailer to output.
*/
tsize_t t2p_write_pdf_trailer(T2P* t2p, TIFF* output)
{
tsize_t written = 0;
char buffer[32];
int buflen = 0;
size_t i = 0;
for (i = 0; i < sizeof(t2p->pdf_fileid) - 8; i += 8)
snprintf(t2p->pdf_fileid + i, 9, "%.8X", rand());
written += t2pWriteFile(output, (tdata_t) "trailer\n<<\n/Size ", 17);
buflen = snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)(t2p->pdf_xrefcount+1));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n/Root ", 7);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_catalog);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R \n/Info ", 12);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_info);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R \n/ID[<", 11);
written += t2pWriteFile(output, (tdata_t) t2p->pdf_fileid,
sizeof(t2p->pdf_fileid) - 1);
written += t2pWriteFile(output, (tdata_t) "><", 2);
written += t2pWriteFile(output, (tdata_t) t2p->pdf_fileid,
sizeof(t2p->pdf_fileid) - 1);
written += t2pWriteFile(output, (tdata_t) ">]\n>>\nstartxref\n", 16);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_startxref);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n%%EOF\n", 7);
return(written);
}
/*
This function writes a PDF to a file given a pointer to a TIFF.
The idea with using a TIFF* as output for a PDF file is that the file
can be created with TIFFClientOpen for memory-mapped use within the TIFF
library, and TIFFWriteEncodedStrip can be used to write compressed data to
the output. The output is not actually a TIFF file, it is a PDF file.
This function uses only t2pWriteFile and TIFFWriteEncodedStrip to write to
the output TIFF file. When libtiff would otherwise be writing data to the
output file, the write procedure of the TIFF structure is replaced with an
empty implementation.
The first argument to the function is an initialized and validated T2P
context struct pointer.
The second argument to the function is the TIFF* that is the input that has
been opened for reading and no other functions have been called upon it.
The third argument to the function is the TIFF* that is the output that has
been opened for writing. It has to be opened so that it hasn't written any
data to the output. If the output is seekable then it's OK to seek to the
beginning of the file. The function only writes to the output PDF and does
not seek. See the example usage in the main() function.
TIFF* output = TIFFOpen("output.pdf", "w");
assert(output != NULL);
if(output->tif_seekproc != NULL){
t2pSeekFile(output, (toff_t) 0, SEEK_SET);
}
This function returns the file size of the output PDF file. On error it
returns zero and the t2p->t2p_error variable is set to T2P_ERR_ERROR.
After this function completes, call t2p_free on t2p, TIFFClose on input,
and TIFFClose on output.
*/
tsize_t t2p_write_pdf(T2P* t2p, TIFF* input, TIFF* output){
tsize_t written=0;
ttile_t i2=0;
tsize_t streamlen=0;
uint16 i=0;
t2p_read_tiff_init(t2p, input);
if(t2p->t2p_error!=T2P_ERR_OK){return(0);}
t2p->pdf_xrefoffsets= (uint32*) _TIFFmalloc(TIFFSafeMultiply(tmsize_t,t2p->pdf_xrefcount,sizeof(uint32)) );
if(t2p->pdf_xrefoffsets==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate %u bytes of memory for t2p_write_pdf",
(unsigned int) (t2p->pdf_xrefcount * sizeof(uint32)) );
t2p->t2p_error = T2P_ERR_ERROR;
return(written);
}
t2p->pdf_xrefcount=0;
t2p->pdf_catalog=1;
t2p->pdf_info=2;
t2p->pdf_pages=3;
written += t2p_write_pdf_header(t2p, output);
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
t2p->pdf_catalog=t2p->pdf_xrefcount;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_catalog(t2p, output);
written += t2p_write_pdf_obj_end(output);
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
t2p->pdf_info=t2p->pdf_xrefcount;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_info(t2p, input, output);
written += t2p_write_pdf_obj_end(output);
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
t2p->pdf_pages=t2p->pdf_xrefcount;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_pages(t2p, output);
written += t2p_write_pdf_obj_end(output);
for(t2p->pdf_page=0;t2p->pdf_page<t2p->tiff_pagecount;t2p->pdf_page++){
t2p_read_tiff_data(t2p, input);
if(t2p->t2p_error!=T2P_ERR_OK){return(0);}
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_page(t2p->pdf_xrefcount, t2p, output);
written += t2p_write_pdf_obj_end(output);
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_dict_start(output);
written += t2p_write_pdf_stream_dict(0, t2p->pdf_xrefcount+1, output);
written += t2p_write_pdf_stream_dict_end(output);
written += t2p_write_pdf_stream_start(output);
streamlen=written;
written += t2p_write_pdf_page_content_stream(t2p, output);
streamlen=written-streamlen;
written += t2p_write_pdf_stream_end(output);
written += t2p_write_pdf_obj_end(output);
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_length(streamlen, output);
written += t2p_write_pdf_obj_end(output);
if(t2p->tiff_transferfunctioncount != 0){
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_transfer(t2p, output);
written += t2p_write_pdf_obj_end(output);
for(i=0; i < t2p->tiff_transferfunctioncount; i++){
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_dict_start(output);
written += t2p_write_pdf_transfer_dict(t2p, output, i);
written += t2p_write_pdf_stream_dict_end(output);
written += t2p_write_pdf_stream_start(output);
/* streamlen=written; */ /* value not used */
written += t2p_write_pdf_transfer_stream(t2p, output, i);
/* streamlen=written-streamlen; */ /* value not used */
written += t2p_write_pdf_stream_end(output);
written += t2p_write_pdf_obj_end(output);
}
}
if( (t2p->pdf_colorspace & T2P_CS_PALETTE) != 0){
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
t2p->pdf_palettecs=t2p->pdf_xrefcount;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_dict_start(output);
written += t2p_write_pdf_stream_dict(t2p->pdf_palettesize, 0, output);
written += t2p_write_pdf_stream_dict_end(output);
written += t2p_write_pdf_stream_start(output);
/* streamlen=written; */ /* value not used */
written += t2p_write_pdf_xobject_palettecs_stream(t2p, output);
/* streamlen=written-streamlen; */ /* value not used */
written += t2p_write_pdf_stream_end(output);
written += t2p_write_pdf_obj_end(output);
}
if( (t2p->pdf_colorspace & T2P_CS_ICCBASED) != 0){
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
t2p->pdf_icccs=t2p->pdf_xrefcount;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_dict_start(output);
written += t2p_write_pdf_xobject_icccs_dict(t2p, output);
written += t2p_write_pdf_stream_dict_end(output);
written += t2p_write_pdf_stream_start(output);
/* streamlen=written; */ /* value not used */
written += t2p_write_pdf_xobject_icccs_stream(t2p, output);
/* streamlen=written-streamlen; */ /* value not used */
written += t2p_write_pdf_stream_end(output);
written += t2p_write_pdf_obj_end(output);
}
if(t2p->tiff_tiles[t2p->pdf_page].tiles_tilecount !=0){
for(i2=0;i2<t2p->tiff_tiles[t2p->pdf_page].tiles_tilecount;i2++){
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_dict_start(output);
written += t2p_write_pdf_xobject_stream_dict(
i2+1,
t2p,
output);
written += t2p_write_pdf_stream_dict_end(output);
written += t2p_write_pdf_stream_start(output);
streamlen=written;
t2p_read_tiff_size_tile(t2p, input, i2);
written += t2p_readwrite_pdf_image_tile(t2p, input, output, i2);
t2p_write_advance_directory(t2p, output);
if(t2p->t2p_error!=T2P_ERR_OK){return(0);}
streamlen=written-streamlen;
written += t2p_write_pdf_stream_end(output);
written += t2p_write_pdf_obj_end(output);
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_length(streamlen, output);
written += t2p_write_pdf_obj_end(output);
}
} else {
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_dict_start(output);
written += t2p_write_pdf_xobject_stream_dict(
0,
t2p,
output);
written += t2p_write_pdf_stream_dict_end(output);
written += t2p_write_pdf_stream_start(output);
streamlen=written;
t2p_read_tiff_size(t2p, input);
written += t2p_readwrite_pdf_image(t2p, input, output);
t2p_write_advance_directory(t2p, output);
if(t2p->t2p_error!=T2P_ERR_OK){return(0);}
streamlen=written-streamlen;
written += t2p_write_pdf_stream_end(output);
written += t2p_write_pdf_obj_end(output);
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_length(streamlen, output);
written += t2p_write_pdf_obj_end(output);
}
}
t2p->pdf_startxref = written;
written += t2p_write_pdf_xreftable(t2p, output);
written += t2p_write_pdf_trailer(t2p, output);
t2p_disable(output);
return(written);
}
/* vim: set ts=8 sts=8 sw=8 noet: */
/*
* Local Variables:
* mode: c
* c-basic-offset: 8
* fill-column: 78
* End:
*/
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_4800_1 |
crossvul-cpp_data_bad_2201_0 | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* plugins/kdb/ldap/libkdb_ldap/ldap_principal2.c */
/*
* Copyright (c) 2004-2005, Novell, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The copyright holder's name is not used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <time.h>
#include "ldap_main.h"
#include "kdb_ldap.h"
#include "ldap_principal.h"
#include "princ_xdr.h"
#include "ldap_tkt_policy.h"
#include "ldap_pwd_policy.h"
#include "ldap_err.h"
#include <kadm5/admin.h>
extern char* principal_attributes[];
extern char* max_pwd_life_attr[];
static char *
getstringtime(krb5_timestamp);
krb5_error_code
berval2tl_data(struct berval *in, krb5_tl_data **out)
{
*out = (krb5_tl_data *) malloc (sizeof (krb5_tl_data));
if (*out == NULL)
return ENOMEM;
(*out)->tl_data_length = in->bv_len - 2;
(*out)->tl_data_contents = (krb5_octet *) malloc
((*out)->tl_data_length * sizeof (krb5_octet));
if ((*out)->tl_data_contents == NULL) {
free (*out);
return ENOMEM;
}
UNSTORE16_INT (in->bv_val, (*out)->tl_data_type);
memcpy ((*out)->tl_data_contents, in->bv_val + 2, (*out)->tl_data_length);
return 0;
}
/*
* look up a principal in the directory.
*/
krb5_error_code
krb5_ldap_get_principal(krb5_context context, krb5_const_principal searchfor,
unsigned int flags, krb5_db_entry **entry_ptr)
{
char *user=NULL, *filter=NULL, *filtuser=NULL;
unsigned int tree=0, ntrees=1, princlen=0;
krb5_error_code tempst=0, st=0;
char **values=NULL, **subtree=NULL, *cname=NULL;
LDAP *ld=NULL;
LDAPMessage *result=NULL, *ent=NULL;
krb5_ldap_context *ldap_context=NULL;
kdb5_dal_handle *dal_handle=NULL;
krb5_ldap_server_handle *ldap_server_handle=NULL;
krb5_principal cprinc=NULL;
krb5_boolean found=FALSE;
krb5_db_entry *entry = NULL;
*entry_ptr = NULL;
/* Clear the global error string */
krb5_clear_error_message(context);
if (searchfor == NULL)
return EINVAL;
dal_handle = context->dal_handle;
ldap_context = (krb5_ldap_context *) dal_handle->db_context;
CHECK_LDAP_HANDLE(ldap_context);
if (is_principal_in_realm(ldap_context, searchfor) != 0) {
st = KRB5_KDB_NOENTRY;
krb5_set_error_message(context, st,
_("Principal does not belong to realm"));
goto cleanup;
}
if ((st=krb5_unparse_name(context, searchfor, &user)) != 0)
goto cleanup;
if ((st=krb5_ldap_unparse_principal_name(user)) != 0)
goto cleanup;
filtuser = ldap_filter_correct(user);
if (filtuser == NULL) {
st = ENOMEM;
goto cleanup;
}
princlen = strlen(FILTER) + strlen(filtuser) + 2 + 1; /* 2 for closing brackets */
if ((filter = malloc(princlen)) == NULL) {
st = ENOMEM;
goto cleanup;
}
snprintf(filter, princlen, FILTER"%s))", filtuser);
if ((st = krb5_get_subtree_info(ldap_context, &subtree, &ntrees)) != 0)
goto cleanup;
GET_HANDLE();
for (tree=0; tree < ntrees && !found; ++tree) {
LDAP_SEARCH(subtree[tree], ldap_context->lrparams->search_scope, filter, principal_attributes);
for (ent=ldap_first_entry(ld, result); ent != NULL && !found; ent=ldap_next_entry(ld, ent)) {
/* get the associated directory user information */
if ((values=ldap_get_values(ld, ent, "krbprincipalname")) != NULL) {
int i;
/* a wild-card in a principal name can return a list of kerberos principals.
* Make sure that the correct principal is returned.
* NOTE: a principalname k* in ldap server will return all the principals starting with a k
*/
for (i=0; values[i] != NULL; ++i) {
if (strcmp(values[i], user) == 0) {
found = TRUE;
break;
}
}
ldap_value_free(values);
if (!found) /* no matching principal found */
continue;
}
if ((values=ldap_get_values(ld, ent, "krbcanonicalname")) != NULL) {
if (values[0] && strcmp(values[0], user) != 0) {
/* We matched an alias, not the canonical name. */
if (flags & KRB5_KDB_FLAG_ALIAS_OK) {
st = krb5_ldap_parse_principal_name(values[0], &cname);
if (st != 0)
goto cleanup;
st = krb5_parse_name(context, cname, &cprinc);
if (st != 0)
goto cleanup;
} else /* No canonicalization, so don't return aliases. */
found = FALSE;
}
ldap_value_free(values);
if (!found)
continue;
}
entry = k5alloc(sizeof(*entry), &st);
if (entry == NULL)
goto cleanup;
if ((st = populate_krb5_db_entry(context, ldap_context, ld, ent,
cprinc ? cprinc : searchfor,
entry)) != 0)
goto cleanup;
}
ldap_msgfree(result);
result = NULL;
} /* for (tree=0 ... */
if (found) {
*entry_ptr = entry;
entry = NULL;
} else
st = KRB5_KDB_NOENTRY;
cleanup:
ldap_msgfree(result);
krb5_ldap_free_principal(context, entry);
if (filter)
free (filter);
if (subtree) {
for (; ntrees; --ntrees)
if (subtree[ntrees-1])
free (subtree[ntrees-1]);
free (subtree);
}
if (ldap_server_handle)
krb5_ldap_put_handle_to_pool(ldap_context, ldap_server_handle);
if (user)
free(user);
if (filtuser)
free(filtuser);
if (cname)
free(cname);
if (cprinc)
krb5_free_principal(context, cprinc);
return st;
}
typedef enum{ ADD_PRINCIPAL, MODIFY_PRINCIPAL } OPERATION;
/*
* ptype is creating confusions. Additionally the logic
* surronding ptype is redundunt and can be achevied
* with the help of dn and containerdn members.
* so dropping the ptype member
*/
typedef struct _xargs_t {
char *dn;
char *linkdn;
krb5_boolean dn_from_kbd;
char *containerdn;
char *tktpolicydn;
}xargs_t;
static void
free_xargs(xargs_t xargs)
{
if (xargs.dn)
free (xargs.dn);
if (xargs.linkdn)
free(xargs.linkdn);
if (xargs.containerdn)
free (xargs.containerdn);
if (xargs.tktpolicydn)
free (xargs.tktpolicydn);
}
static krb5_error_code
process_db_args(krb5_context context, char **db_args, xargs_t *xargs,
OPERATION optype)
{
int i=0;
krb5_error_code st=0;
char *arg=NULL, *arg_val=NULL;
char **dptr=NULL;
unsigned int arg_val_len=0;
if (db_args) {
for (i=0; db_args[i]; ++i) {
arg = strtok_r(db_args[i], "=", &arg_val);
if (strcmp(arg, TKTPOLICY_ARG) == 0) {
dptr = &xargs->tktpolicydn;
} else {
if (strcmp(arg, USERDN_ARG) == 0) {
if (optype == MODIFY_PRINCIPAL ||
xargs->dn != NULL || xargs->containerdn != NULL ||
xargs->linkdn != NULL) {
st = EINVAL;
krb5_set_error_message(context, st,
_("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->dn;
} else if (strcmp(arg, CONTAINERDN_ARG) == 0) {
if (optype == MODIFY_PRINCIPAL ||
xargs->dn != NULL || xargs->containerdn != NULL) {
st = EINVAL;
krb5_set_error_message(context, st,
_("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->containerdn;
} else if (strcmp(arg, LINKDN_ARG) == 0) {
if (xargs->dn != NULL || xargs->linkdn != NULL) {
st = EINVAL;
krb5_set_error_message(context, st,
_("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->linkdn;
} else {
st = EINVAL;
krb5_set_error_message(context, st,
_("unknown option: %s"), arg);
goto cleanup;
}
xargs->dn_from_kbd = TRUE;
if (arg_val == NULL || strlen(arg_val) == 0) {
st = EINVAL;
krb5_set_error_message(context, st,
_("%s option value missing"), arg);
goto cleanup;
}
}
if (arg_val == NULL) {
st = EINVAL;
krb5_set_error_message(context, st,
_("%s option value missing"), arg);
goto cleanup;
}
arg_val_len = strlen(arg_val) + 1;
if (strcmp(arg, TKTPOLICY_ARG) == 0) {
if ((st = krb5_ldap_name_to_policydn (context,
arg_val,
dptr)) != 0)
goto cleanup;
} else {
*dptr = k5memdup(arg_val, arg_val_len, &st);
if (*dptr == NULL)
goto cleanup;
}
}
}
cleanup:
return st;
}
krb5int_access accessor;
static krb5_error_code
asn1_encode_sequence_of_keys(krb5_key_data *key_data, krb5_int16 n_key_data,
krb5_int32 mkvno, krb5_data **code)
{
krb5_error_code err;
ldap_seqof_key_data val;
/*
* This should be pushed back into other library initialization
* code.
*/
err = kldap_ensure_initialized ();
if (err)
return err;
val.key_data = key_data;
val.n_key_data = n_key_data;
val.mkvno = mkvno;
val.kvno = key_data[0].key_data_kvno;
return accessor.asn1_ldap_encode_sequence_of_keys(&val, code);
}
static krb5_error_code
asn1_decode_sequence_of_keys(krb5_data *in, krb5_key_data **out,
krb5_int16 *n_key_data, krb5_kvno *mkvno)
{
krb5_error_code err;
ldap_seqof_key_data *p;
int i;
/*
* This should be pushed back into other library initialization
* code.
*/
err = kldap_ensure_initialized ();
if (err)
return err;
err = accessor.asn1_ldap_decode_sequence_of_keys(in, &p);
if (err)
return err;
/* Set kvno and key_data_ver in each key_data element. */
for (i = 0; i < p->n_key_data; i++) {
p->key_data[i].key_data_kvno = p->kvno;
/* The decoder sets key_data_ver to 1 if no salt is present, but leaves
* it at 0 if salt is present. */
if (p->key_data[i].key_data_ver == 0)
p->key_data[i].key_data_ver = 2;
}
*out = p->key_data;
*n_key_data = p->n_key_data;
*mkvno = p->mkvno;
free(p);
return 0;
}
/* Decoding ASN.1 encoded key */
static struct berval **
krb5_encode_krbsecretkey(krb5_key_data *key_data_in, int n_key_data,
krb5_kvno mkvno) {
struct berval **ret = NULL;
int currkvno;
int num_versions = 1;
int i, j, last;
krb5_error_code err = 0;
krb5_key_data *key_data;
if (n_key_data <= 0)
return NULL;
/* Make a shallow copy of the key data so we can alter it. */
key_data = k5calloc(n_key_data, sizeof(*key_data), &err);
if (key_data_in == NULL)
goto cleanup;
memcpy(key_data, key_data_in, n_key_data * sizeof(*key_data));
/* Unpatched krb5 1.11 and 1.12 cannot decode KrbKey sequences with no salt
* field. For compatibility, always encode a salt field. */
for (i = 0; i < n_key_data; i++) {
if (key_data[i].key_data_ver == 1) {
key_data[i].key_data_ver = 2;
key_data[i].key_data_type[1] = KRB5_KDB_SALTTYPE_NORMAL;
key_data[i].key_data_length[1] = 0;
key_data[i].key_data_contents[1] = NULL;
}
}
/* Find the number of key versions */
for (i = 0; i < n_key_data - 1; i++)
if (key_data[i].key_data_kvno != key_data[i + 1].key_data_kvno)
num_versions++;
ret = (struct berval **) calloc (num_versions + 1, sizeof (struct berval *));
if (ret == NULL) {
err = ENOMEM;
goto cleanup;
}
for (i = 0, last = 0, j = 0, currkvno = key_data[0].key_data_kvno; i < n_key_data; i++) {
krb5_data *code;
if (i == n_key_data - 1 || key_data[i + 1].key_data_kvno != currkvno) {
ret[j] = k5alloc(sizeof(struct berval), &err);
if (ret[j] == NULL)
goto cleanup;
err = asn1_encode_sequence_of_keys(key_data + last,
(krb5_int16)i - last + 1,
mkvno, &code);
if (err)
goto cleanup;
/*CHECK_NULL(ret[j]); */
ret[j]->bv_len = code->length;
ret[j]->bv_val = code->data;
free(code);
j++;
last = i + 1;
currkvno = key_data[i].key_data_kvno;
}
}
ret[num_versions] = NULL;
cleanup:
free(key_data);
if (err != 0) {
if (ret != NULL) {
for (i = 0; i <= num_versions; i++)
if (ret[i] != NULL)
free (ret[i]);
free (ret);
ret = NULL;
}
}
return ret;
}
static krb5_error_code
tl_data2berval (krb5_tl_data *in, struct berval **out)
{
*out = (struct berval *) malloc (sizeof (struct berval));
if (*out == NULL)
return ENOMEM;
(*out)->bv_len = in->tl_data_length + 2;
(*out)->bv_val = (char *) malloc ((*out)->bv_len);
if ((*out)->bv_val == NULL) {
free (*out);
return ENOMEM;
}
STORE16_INT((*out)->bv_val, in->tl_data_type);
memcpy ((*out)->bv_val + 2, in->tl_data_contents, in->tl_data_length);
return 0;
}
krb5_error_code
krb5_ldap_put_principal(krb5_context context, krb5_db_entry *entry,
char **db_args)
{
int l=0, kerberos_principal_object_type=0;
unsigned int ntrees=0, tre=0;
krb5_error_code st=0, tempst=0;
LDAP *ld=NULL;
LDAPMessage *result=NULL, *ent=NULL;
char **subtreelist = NULL;
char *user=NULL, *subtree=NULL, *principal_dn=NULL;
char **values=NULL, *strval[10]={NULL}, errbuf[1024];
char *filtuser=NULL;
struct berval **bersecretkey=NULL;
LDAPMod **mods=NULL;
krb5_boolean create_standalone_prinicipal=FALSE;
krb5_boolean krb_identity_exists=FALSE, establish_links=FALSE;
char *standalone_principal_dn=NULL;
krb5_tl_data *tl_data=NULL;
krb5_key_data **keys=NULL;
kdb5_dal_handle *dal_handle=NULL;
krb5_ldap_context *ldap_context=NULL;
krb5_ldap_server_handle *ldap_server_handle=NULL;
osa_princ_ent_rec princ_ent = {0};
xargs_t xargs = {0};
char *polname = NULL;
OPERATION optype;
krb5_boolean found_entry = FALSE;
/* Clear the global error string */
krb5_clear_error_message(context);
SETUP_CONTEXT();
if (ldap_context->lrparams == NULL || ldap_context->container_dn == NULL)
return EINVAL;
/* get ldap handle */
GET_HANDLE();
if (is_principal_in_realm(ldap_context, entry->princ) != 0) {
st = EINVAL;
krb5_set_error_message(context, st, _("Principal does not belong to "
"the default realm"));
goto cleanup;
}
/* get the principal information to act on */
if (((st=krb5_unparse_name(context, entry->princ, &user)) != 0) ||
((st=krb5_ldap_unparse_principal_name(user)) != 0))
goto cleanup;
filtuser = ldap_filter_correct(user);
if (filtuser == NULL) {
st = ENOMEM;
goto cleanup;
}
/* Identity the type of operation, it can be
* add principal or modify principal.
* hack if the entry->mask has KRB_PRINCIPAL flag set
* then it is a add operation
*/
if (entry->mask & KADM5_PRINCIPAL)
optype = ADD_PRINCIPAL;
else
optype = MODIFY_PRINCIPAL;
if (((st=krb5_get_princ_type(context, entry, &kerberos_principal_object_type)) != 0) ||
((st=krb5_get_userdn(context, entry, &principal_dn)) != 0))
goto cleanup;
if ((st=process_db_args(context, db_args, &xargs, optype)) != 0)
goto cleanup;
if (entry->mask & KADM5_LOAD) {
unsigned int tree = 0;
int numlentries = 0;
char *filter = NULL;
/* A load operation is special, will do a mix-in (add krbprinc
* attrs to a non-krb object entry) if an object exists with a
* matching krbprincipalname attribute so try to find existing
* object and set principal_dn. This assumes that the
* krbprincipalname attribute is unique (only one object entry has
* a particular krbprincipalname attribute).
*/
if (asprintf(&filter, FILTER"%s))", filtuser) < 0) {
filter = NULL;
st = ENOMEM;
goto cleanup;
}
/* get the current subtree list */
if ((st = krb5_get_subtree_info(ldap_context, &subtreelist, &ntrees)) != 0)
goto cleanup;
found_entry = FALSE;
/* search for entry with matching krbprincipalname attribute */
for (tree = 0; found_entry == FALSE && tree < ntrees; ++tree) {
if (principal_dn == NULL) {
LDAP_SEARCH_1(subtreelist[tree], ldap_context->lrparams->search_scope, filter, principal_attributes, IGNORE_STATUS);
} else {
/* just look for entry with principal_dn */
LDAP_SEARCH_1(principal_dn, LDAP_SCOPE_BASE, filter, principal_attributes, IGNORE_STATUS);
}
if (st == LDAP_SUCCESS) {
numlentries = ldap_count_entries(ld, result);
if (numlentries > 1) {
free(filter);
st = EINVAL;
krb5_set_error_message(context, st,
_("operation can not continue, "
"more than one entry with "
"principal name \"%s\" found"),
user);
goto cleanup;
} else if (numlentries == 1) {
found_entry = TRUE;
if (principal_dn == NULL) {
ent = ldap_first_entry(ld, result);
if (ent != NULL) {
/* setting principal_dn will cause that entry to be modified further down */
if ((principal_dn = ldap_get_dn(ld, ent)) == NULL) {
ldap_get_option (ld, LDAP_OPT_RESULT_CODE, &st);
st = set_ldap_error (context, st, 0);
free(filter);
goto cleanup;
}
}
}
}
} else if (st != LDAP_NO_SUCH_OBJECT) {
/* could not perform search, return with failure */
st = set_ldap_error (context, st, 0);
free(filter);
goto cleanup;
}
ldap_msgfree(result);
result = NULL;
/*
* If it isn't found then assume a standalone princ entry is to
* be created.
*/
} /* end for (tree = 0; principal_dn == ... */
free(filter);
if (found_entry == FALSE && principal_dn != NULL) {
/*
* if principal_dn is null then there is code further down to
* deal with setting standalone_principal_dn. Also note that
* this will set create_standalone_prinicipal true for
* non-mix-in entries which is okay if loading from a dump.
*/
create_standalone_prinicipal = TRUE;
standalone_principal_dn = strdup(principal_dn);
CHECK_NULL(standalone_principal_dn);
}
} /* end if (entry->mask & KADM5_LOAD */
/* time to generate the DN information with the help of
* containerdn, principalcontainerreference or
* realmcontainerdn information
*/
if (principal_dn == NULL && xargs.dn == NULL) { /* creation of standalone principal */
/* get the subtree information */
if (entry->princ->length == 2 && entry->princ->data[0].length == strlen("krbtgt") &&
strncmp(entry->princ->data[0].data, "krbtgt", entry->princ->data[0].length) == 0) {
/* if the principal is a inter-realm principal, always created in the realm container */
subtree = strdup(ldap_context->lrparams->realmdn);
} else if (xargs.containerdn) {
if ((st=checkattributevalue(ld, xargs.containerdn, NULL, NULL, NULL)) != 0) {
if (st == KRB5_KDB_NOENTRY || st == KRB5_KDB_CONSTRAINT_VIOLATION) {
int ost = st;
st = EINVAL;
snprintf(errbuf, sizeof(errbuf), _("'%s' not found: "),
xargs.containerdn);
prepend_err_str(context, errbuf, st, ost);
}
goto cleanup;
}
subtree = strdup(xargs.containerdn);
} else if (ldap_context->lrparams->containerref && strlen(ldap_context->lrparams->containerref) != 0) {
/*
* Here the subtree should be changed with
* principalcontainerreference attribute value
*/
subtree = strdup(ldap_context->lrparams->containerref);
} else {
subtree = strdup(ldap_context->lrparams->realmdn);
}
CHECK_NULL(subtree);
if (asprintf(&standalone_principal_dn, "krbprincipalname=%s,%s",
filtuser, subtree) < 0)
standalone_principal_dn = NULL;
CHECK_NULL(standalone_principal_dn);
/*
* free subtree when you are done using the subtree
* set the boolean create_standalone_prinicipal to TRUE
*/
create_standalone_prinicipal = TRUE;
free(subtree);
subtree = NULL;
}
/*
* If the DN information is presented by the user, time to
* validate the input to ensure that the DN falls under
* any of the subtrees
*/
if (xargs.dn_from_kbd == TRUE) {
/* make sure the DN falls in the subtree */
int dnlen=0, subtreelen=0;
char *dn=NULL;
krb5_boolean outofsubtree=TRUE;
if (xargs.dn != NULL) {
dn = xargs.dn;
} else if (xargs.linkdn != NULL) {
dn = xargs.linkdn;
} else if (standalone_principal_dn != NULL) {
/*
* Even though the standalone_principal_dn is constructed
* within this function, there is the containerdn input
* from the user that can become part of the it.
*/
dn = standalone_principal_dn;
}
/* Get the current subtree list if we haven't already done so. */
if (subtreelist == NULL) {
st = krb5_get_subtree_info(ldap_context, &subtreelist, &ntrees);
if (st)
goto cleanup;
}
for (tre=0; tre<ntrees; ++tre) {
if (subtreelist[tre] == NULL || strlen(subtreelist[tre]) == 0) {
outofsubtree = FALSE;
break;
} else {
dnlen = strlen (dn);
subtreelen = strlen(subtreelist[tre]);
if ((dnlen >= subtreelen) && (strcasecmp((dn + dnlen - subtreelen), subtreelist[tre]) == 0)) {
outofsubtree = FALSE;
break;
}
}
}
if (outofsubtree == TRUE) {
st = EINVAL;
krb5_set_error_message(context, st,
_("DN is out of the realm subtree"));
goto cleanup;
}
/*
* dn value will be set either by dn, linkdn or the standalone_principal_dn
* In the first 2 cases, the dn should be existing and in the last case we
* are supposed to create the ldap object. so the below should not be
* executed for the last case.
*/
if (standalone_principal_dn == NULL) {
/*
* If the ldap object is missing, this results in an error.
*/
/*
* Search for krbprincipalname attribute here.
* This is to find if a kerberos identity is already present
* on the ldap object, in which case adding a kerberos identity
* on the ldap object should result in an error.
*/
char *attributes[]={"krbticketpolicyreference", "krbprincipalname", NULL};
ldap_msgfree(result);
result = NULL;
LDAP_SEARCH_1(dn, LDAP_SCOPE_BASE, 0, attributes, IGNORE_STATUS);
if (st == LDAP_SUCCESS) {
ent = ldap_first_entry(ld, result);
if (ent != NULL) {
if ((values=ldap_get_values(ld, ent, "krbticketpolicyreference")) != NULL) {
ldap_value_free(values);
}
if ((values=ldap_get_values(ld, ent, "krbprincipalname")) != NULL) {
krb_identity_exists = TRUE;
ldap_value_free(values);
}
}
} else {
st = set_ldap_error(context, st, OP_SEARCH);
goto cleanup;
}
}
}
/*
* If xargs.dn is set then the request is to add a
* kerberos principal on a ldap object, but if
* there is one already on the ldap object this
* should result in an error.
*/
if (xargs.dn != NULL && krb_identity_exists == TRUE) {
st = EINVAL;
snprintf(errbuf, sizeof(errbuf),
_("ldap object is already kerberized"));
krb5_set_error_message(context, st, "%s", errbuf);
goto cleanup;
}
if (xargs.linkdn != NULL) {
/*
* link information can be changed using modprinc.
* However, link information can be changed only on the
* standalone kerberos principal objects. A standalone
* kerberos principal object is of type krbprincipal
* structural objectclass.
*
* NOTE: kerberos principals on an ldap object can't be
* linked to other ldap objects.
*/
if (optype == MODIFY_PRINCIPAL &&
kerberos_principal_object_type != KDB_STANDALONE_PRINCIPAL_OBJECT) {
st = EINVAL;
snprintf(errbuf, sizeof(errbuf),
_("link information can not be set/updated as the "
"kerberos principal belongs to an ldap object"));
krb5_set_error_message(context, st, "%s", errbuf);
goto cleanup;
}
/*
* Check the link information. If there is already a link
* existing then this operation is not allowed.
*/
{
char **linkdns=NULL;
int j=0;
if ((st=krb5_get_linkdn(context, entry, &linkdns)) != 0) {
snprintf(errbuf, sizeof(errbuf),
_("Failed getting object references"));
krb5_set_error_message(context, st, "%s", errbuf);
goto cleanup;
}
if (linkdns != NULL) {
st = EINVAL;
snprintf(errbuf, sizeof(errbuf),
_("kerberos principal is already linked to a ldap "
"object"));
krb5_set_error_message(context, st, "%s", errbuf);
for (j=0; linkdns[j] != NULL; ++j)
free (linkdns[j]);
free (linkdns);
goto cleanup;
}
}
establish_links = TRUE;
}
if (entry->mask & KADM5_LAST_SUCCESS) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->last_success)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastSuccessfulAuth", LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
if (entry->mask & KADM5_LAST_FAILED) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->last_failed)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastFailedAuth", LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free(strval[0]);
}
if (entry->mask & KADM5_FAIL_AUTH_COUNT) {
krb5_kvno fail_auth_count;
fail_auth_count = entry->fail_auth_count;
if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT)
fail_auth_count++;
st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount",
LDAP_MOD_REPLACE,
fail_auth_count);
if (st != 0)
goto cleanup;
} else if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT) {
int attr_mask = 0;
krb5_boolean has_fail_count;
/* Check if the krbLoginFailedCount attribute exists. (Through
* krb5 1.8.1, it wasn't set in new entries.) */
st = krb5_get_attributes_mask(context, entry, &attr_mask);
if (st != 0)
goto cleanup;
has_fail_count = ((attr_mask & KDB_FAIL_AUTH_COUNT_ATTR) != 0);
/*
* If the client library and server supports RFC 4525,
* then use it to increment by one the value of the
* krbLoginFailedCount attribute. Otherwise, assert the
* (provided) old value by deleting it before adding.
*/
#ifdef LDAP_MOD_INCREMENT
if (ldap_server_handle->server_info->modify_increment &&
has_fail_count) {
st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount",
LDAP_MOD_INCREMENT, 1);
if (st != 0)
goto cleanup;
} else {
#endif /* LDAP_MOD_INCREMENT */
if (has_fail_count) {
st = krb5_add_int_mem_ldap_mod(&mods,
"krbLoginFailedCount",
LDAP_MOD_DELETE,
entry->fail_auth_count);
if (st != 0)
goto cleanup;
}
st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount",
LDAP_MOD_ADD,
entry->fail_auth_count + 1);
if (st != 0)
goto cleanup;
#ifdef LDAP_MOD_INCREMENT
}
#endif
} else if (optype == ADD_PRINCIPAL) {
/* Initialize krbLoginFailedCount in new entries to help avoid a
* race during the first failed login. */
st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount",
LDAP_MOD_ADD, 0);
}
if (entry->mask & KADM5_MAX_LIFE) {
if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbmaxticketlife", LDAP_MOD_REPLACE, entry->max_life)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_MAX_RLIFE) {
if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbmaxrenewableage", LDAP_MOD_REPLACE,
entry->max_renewable_life)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_ATTRIBUTES) {
if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbticketflags", LDAP_MOD_REPLACE,
entry->attributes)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_PRINCIPAL) {
memset(strval, 0, sizeof(strval));
strval[0] = user;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbprincipalname", LDAP_MOD_REPLACE, strval)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_PRINC_EXPIRE_TIME) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->expiration)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbprincipalexpiration", LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
if (entry->mask & KADM5_PW_EXPIRATION) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->pw_expiration)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpasswordexpiration",
LDAP_MOD_REPLACE,
strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
if (entry->mask & KADM5_POLICY) {
memset(&princ_ent, 0, sizeof(princ_ent));
for (tl_data=entry->tl_data; tl_data; tl_data=tl_data->tl_data_next) {
if (tl_data->tl_data_type == KRB5_TL_KADM_DATA) {
if ((st = krb5_lookup_tl_kadm_data(tl_data, &princ_ent)) != 0) {
goto cleanup;
}
break;
}
}
if (princ_ent.aux_attributes & KADM5_POLICY) {
memset(strval, 0, sizeof(strval));
if ((st = krb5_ldap_name_to_policydn (context, princ_ent.policy, &polname)) != 0)
goto cleanup;
strval[0] = polname;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_REPLACE, strval)) != 0)
goto cleanup;
} else {
st = EINVAL;
krb5_set_error_message(context, st, "Password policy value null");
goto cleanup;
}
} else if (entry->mask & KADM5_LOAD && found_entry == TRUE) {
/*
* a load is special in that existing entries must have attrs that
* removed.
*/
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_REPLACE, NULL)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_POLICY_CLR) {
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_DELETE, NULL)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_KEY_DATA || entry->mask & KADM5_KVNO) {
krb5_kvno mkvno;
if ((st=krb5_dbe_lookup_mkvno(context, entry, &mkvno)) != 0)
goto cleanup;
bersecretkey = krb5_encode_krbsecretkey (entry->key_data,
entry->n_key_data, mkvno);
if ((st=krb5_add_ber_mem_ldap_mod(&mods, "krbprincipalkey",
LDAP_MOD_REPLACE | LDAP_MOD_BVALUES, bersecretkey)) != 0)
goto cleanup;
if (!(entry->mask & KADM5_PRINCIPAL)) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->pw_expiration)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods,
"krbpasswordexpiration",
LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
/* Update last password change whenever a new key is set */
{
krb5_timestamp last_pw_changed;
if ((st=krb5_dbe_lookup_last_pwd_change(context, entry,
&last_pw_changed)) != 0)
goto cleanup;
memset(strval, 0, sizeof(strval));
if ((strval[0] = getstringtime(last_pw_changed)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastPwdChange",
LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
} /* Modify Key data ends here */
/* Set tl_data */
if (entry->tl_data != NULL) {
int count = 0;
struct berval **ber_tl_data = NULL;
krb5_tl_data *ptr;
krb5_timestamp unlock_time;
for (ptr = entry->tl_data; ptr != NULL; ptr = ptr->tl_data_next) {
if (ptr->tl_data_type == KRB5_TL_LAST_PWD_CHANGE
#ifdef SECURID
|| ptr->tl_data_type == KRB5_TL_DB_ARGS
#endif
|| ptr->tl_data_type == KRB5_TL_KADM_DATA
|| ptr->tl_data_type == KDB_TL_USER_INFO
|| ptr->tl_data_type == KRB5_TL_CONSTRAINED_DELEGATION_ACL
|| ptr->tl_data_type == KRB5_TL_LAST_ADMIN_UNLOCK)
continue;
count++;
}
if (count != 0) {
int j;
ber_tl_data = (struct berval **) calloc (count + 1,
sizeof (struct berval*));
if (ber_tl_data == NULL) {
st = ENOMEM;
goto cleanup;
}
for (j = 0, ptr = entry->tl_data; ptr != NULL; ptr = ptr->tl_data_next) {
/* Ignore tl_data that are stored in separate directory
* attributes */
if (ptr->tl_data_type == KRB5_TL_LAST_PWD_CHANGE
#ifdef SECURID
|| ptr->tl_data_type == KRB5_TL_DB_ARGS
#endif
|| ptr->tl_data_type == KRB5_TL_KADM_DATA
|| ptr->tl_data_type == KDB_TL_USER_INFO
|| ptr->tl_data_type == KRB5_TL_CONSTRAINED_DELEGATION_ACL
|| ptr->tl_data_type == KRB5_TL_LAST_ADMIN_UNLOCK)
continue;
if ((st = tl_data2berval (ptr, &ber_tl_data[j])) != 0)
break;
j++;
}
if (st == 0) {
ber_tl_data[count] = NULL;
st=krb5_add_ber_mem_ldap_mod(&mods, "krbExtraData",
LDAP_MOD_REPLACE |
LDAP_MOD_BVALUES, ber_tl_data);
}
for (j = 0; ber_tl_data[j] != NULL; j++) {
free(ber_tl_data[j]->bv_val);
free(ber_tl_data[j]);
}
free(ber_tl_data);
if (st != 0)
goto cleanup;
}
if ((st=krb5_dbe_lookup_last_admin_unlock(context, entry,
&unlock_time)) != 0)
goto cleanup;
if (unlock_time != 0) {
/* Update last admin unlock */
memset(strval, 0, sizeof(strval));
if ((strval[0] = getstringtime(unlock_time)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastAdminUnlock",
LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
}
/* Directory specific attribute */
if (xargs.tktpolicydn != NULL) {
int tmask=0;
if (strlen(xargs.tktpolicydn) != 0) {
st = checkattributevalue(ld, xargs.tktpolicydn, "objectclass", policyclass, &tmask);
CHECK_CLASS_VALIDITY(st, tmask, _("ticket policy object value: "));
strval[0] = xargs.tktpolicydn;
strval[1] = NULL;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbticketpolicyreference", LDAP_MOD_REPLACE, strval)) != 0)
goto cleanup;
} else {
/* if xargs.tktpolicydn is a empty string, then delete
* already existing krbticketpolicyreference attr */
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbticketpolicyreference", LDAP_MOD_DELETE, NULL)) != 0)
goto cleanup;
}
}
if (establish_links == TRUE) {
memset(strval, 0, sizeof(strval));
strval[0] = xargs.linkdn;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbObjectReferences", LDAP_MOD_REPLACE, strval)) != 0)
goto cleanup;
}
/*
* in case mods is NULL then return
* not sure but can happen in a modprinc
* so no need to return an error
* addprinc will at least have the principal name
* and the keys passed in
*/
if (mods == NULL)
goto cleanup;
if (create_standalone_prinicipal == TRUE) {
memset(strval, 0, sizeof(strval));
strval[0] = "krbprincipal";
strval[1] = "krbprincipalaux";
strval[2] = "krbTicketPolicyAux";
if ((st=krb5_add_str_mem_ldap_mod(&mods, "objectclass", LDAP_MOD_ADD, strval)) != 0)
goto cleanup;
st = ldap_add_ext_s(ld, standalone_principal_dn, mods, NULL, NULL);
if (st == LDAP_ALREADY_EXISTS && entry->mask & KADM5_LOAD) {
/* a load operation must replace an existing entry */
st = ldap_delete_ext_s(ld, standalone_principal_dn, NULL, NULL);
if (st != LDAP_SUCCESS) {
snprintf(errbuf, sizeof(errbuf),
_("Principal delete failed (trying to replace "
"entry): %s"), ldap_err2string(st));
st = translate_ldap_error (st, OP_ADD);
krb5_set_error_message(context, st, "%s", errbuf);
goto cleanup;
} else {
st = ldap_add_ext_s(ld, standalone_principal_dn, mods, NULL, NULL);
}
}
if (st != LDAP_SUCCESS) {
snprintf(errbuf, sizeof(errbuf), _("Principal add failed: %s"),
ldap_err2string(st));
st = translate_ldap_error (st, OP_ADD);
krb5_set_error_message(context, st, "%s", errbuf);
goto cleanup;
}
} else {
/*
* Here existing ldap object is modified and can be related
* to any attribute, so always ensure that the ldap
* object is extended with all the kerberos related
* objectclasses so that there are no constraint
* violations.
*/
{
char *attrvalues[] = {"krbprincipalaux", "krbTicketPolicyAux", NULL};
int p, q, r=0, amask=0;
if ((st=checkattributevalue(ld, (xargs.dn) ? xargs.dn : principal_dn,
"objectclass", attrvalues, &amask)) != 0)
goto cleanup;
memset(strval, 0, sizeof(strval));
for (p=1, q=0; p<=2; p<<=1, ++q) {
if ((p & amask) == 0)
strval[r++] = attrvalues[q];
}
if (r != 0) {
if ((st=krb5_add_str_mem_ldap_mod(&mods, "objectclass", LDAP_MOD_ADD, strval)) != 0)
goto cleanup;
}
}
if (xargs.dn != NULL)
st=ldap_modify_ext_s(ld, xargs.dn, mods, NULL, NULL);
else
st = ldap_modify_ext_s(ld, principal_dn, mods, NULL, NULL);
if (st != LDAP_SUCCESS) {
snprintf(errbuf, sizeof(errbuf), _("User modification failed: %s"),
ldap_err2string(st));
st = translate_ldap_error (st, OP_MOD);
krb5_set_error_message(context, st, "%s", errbuf);
goto cleanup;
}
if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT)
entry->fail_auth_count++;
}
cleanup:
if (user)
free(user);
if (filtuser)
free(filtuser);
free_xargs(xargs);
if (standalone_principal_dn)
free(standalone_principal_dn);
if (principal_dn)
free (principal_dn);
if (polname != NULL)
free(polname);
for (tre = 0; tre < ntrees; tre++)
free(subtreelist[tre]);
free(subtreelist);
if (subtree)
free (subtree);
if (bersecretkey) {
for (l=0; bersecretkey[l]; ++l) {
if (bersecretkey[l]->bv_val)
free (bersecretkey[l]->bv_val);
free (bersecretkey[l]);
}
free (bersecretkey);
}
if (keys)
free (keys);
ldap_mods_free(mods, 1);
ldap_osa_free_princ_ent(&princ_ent);
ldap_msgfree(result);
krb5_ldap_put_handle_to_pool(ldap_context, ldap_server_handle);
return(st);
}
krb5_error_code
krb5_read_tkt_policy(krb5_context context, krb5_ldap_context *ldap_context,
krb5_db_entry *entries, char *policy)
{
krb5_error_code st=0;
int mask=0, omask=0;
int tkt_mask=(KDB_MAX_LIFE_ATTR | KDB_MAX_RLIFE_ATTR | KDB_TKT_FLAGS_ATTR);
krb5_ldap_policy_params *tktpoldnparam=NULL;
if ((st=krb5_get_attributes_mask(context, entries, &mask)) != 0)
goto cleanup;
if ((mask & tkt_mask) == tkt_mask)
goto cleanup;
if (policy != NULL) {
st = krb5_ldap_read_policy(context, policy, &tktpoldnparam, &omask);
if (st && st != KRB5_KDB_NOENTRY) {
prepend_err_str(context, _("Error reading ticket policy. "), st,
st);
goto cleanup;
}
st = 0; /* reset the return status */
}
if ((mask & KDB_MAX_LIFE_ATTR) == 0) {
if ((omask & KDB_MAX_LIFE_ATTR) == KDB_MAX_LIFE_ATTR)
entries->max_life = tktpoldnparam->maxtktlife;
else if (ldap_context->lrparams->max_life)
entries->max_life = ldap_context->lrparams->max_life;
}
if ((mask & KDB_MAX_RLIFE_ATTR) == 0) {
if ((omask & KDB_MAX_RLIFE_ATTR) == KDB_MAX_RLIFE_ATTR)
entries->max_renewable_life = tktpoldnparam->maxrenewlife;
else if (ldap_context->lrparams->max_renewable_life)
entries->max_renewable_life = ldap_context->lrparams->max_renewable_life;
}
if ((mask & KDB_TKT_FLAGS_ATTR) == 0) {
if ((omask & KDB_TKT_FLAGS_ATTR) == KDB_TKT_FLAGS_ATTR)
entries->attributes = tktpoldnparam->tktflags;
else if (ldap_context->lrparams->tktflags)
entries->attributes |= ldap_context->lrparams->tktflags;
}
krb5_ldap_free_policy(context, tktpoldnparam);
cleanup:
return st;
}
krb5_error_code
krb5_decode_krbsecretkey(krb5_context context, krb5_db_entry *entries,
struct berval **bvalues,
krb5_tl_data *userinfo_tl_data, krb5_kvno *mkvno)
{
char *user=NULL;
int i=0, j=0, noofkeys=0;
krb5_key_data *key_data=NULL, *tmp;
krb5_error_code st=0;
if ((st=krb5_unparse_name(context, entries->princ, &user)) != 0)
goto cleanup;
for (i=0; bvalues[i] != NULL; ++i) {
krb5_int16 n_kd;
krb5_key_data *kd;
krb5_data in;
if (bvalues[i]->bv_len == 0)
continue;
in.length = bvalues[i]->bv_len;
in.data = bvalues[i]->bv_val;
st = asn1_decode_sequence_of_keys (&in,
&kd,
&n_kd,
mkvno);
if (st != 0) {
const char *msg = error_message(st);
st = -1; /* Something more appropriate ? */
krb5_set_error_message(context, st, _("unable to decode stored "
"principal key data (%s)"),
msg);
goto cleanup;
}
noofkeys += n_kd;
tmp = key_data;
/* Allocate an extra key data to avoid allocating zero bytes. */
key_data = realloc(key_data, (noofkeys + 1) * sizeof (krb5_key_data));
if (key_data == NULL) {
key_data = tmp;
st = ENOMEM;
goto cleanup;
}
for (j = 0; j < n_kd; j++)
key_data[noofkeys - n_kd + j] = kd[j];
free (kd);
}
entries->n_key_data = noofkeys;
entries->key_data = key_data;
cleanup:
ldap_value_free_len(bvalues);
free (user);
return st;
}
static char *
getstringtime(krb5_timestamp epochtime)
{
struct tm tme;
char *strtime=NULL;
time_t posixtime = epochtime;
strtime = calloc (50, 1);
if (strtime == NULL)
return NULL;
if (gmtime_r(&posixtime, &tme) == NULL)
return NULL;
strftime(strtime, 50, "%Y%m%d%H%M%SZ", &tme);
return strtime;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2201_0 |
crossvul-cpp_data_bad_3654_0 | /*
* Copyright © 2008,2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
uint32_t flush_domains;
uint32_t flush_rings;
uint32_t flips;
};
/*
* Set the next domain for the specified object. This
* may not actually perform the necessary flushing/invaliding though,
* as that may want to be batched with other set_domain operations
*
* This is (we hope) the only really tricky part of gem. The goal
* is fairly simple -- track which caches hold bits of the object
* and make sure they remain coherent. A few concrete examples may
* help to explain how it works. For shorthand, we use the notation
* (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
* a pair of read and write domain masks.
*
* Case 1: the batch buffer
*
* 1. Allocated
* 2. Written by CPU
* 3. Mapped to GTT
* 4. Read by GPU
* 5. Unmapped from GTT
* 6. Freed
*
* Let's take these a step at a time
*
* 1. Allocated
* Pages allocated from the kernel may still have
* cache contents, so we set them to (CPU, CPU) always.
* 2. Written by CPU (using pwrite)
* The pwrite function calls set_domain (CPU, CPU) and
* this function does nothing (as nothing changes)
* 3. Mapped by GTT
* This function asserts that the object is not
* currently in any GPU-based read or write domains
* 4. Read by GPU
* i915_gem_execbuffer calls set_domain (COMMAND, 0).
* As write_domain is zero, this function adds in the
* current read domains (CPU+COMMAND, 0).
* flush_domains is set to CPU.
* invalidate_domains is set to COMMAND
* clflush is run to get data out of the CPU caches
* then i915_dev_set_domain calls i915_gem_flush to
* emit an MI_FLUSH and drm_agp_chipset_flush
* 5. Unmapped from GTT
* i915_gem_object_unbind calls set_domain (CPU, CPU)
* flush_domains and invalidate_domains end up both zero
* so no flushing/invalidating happens
* 6. Freed
* yay, done
*
* Case 2: The shared render buffer
*
* 1. Allocated
* 2. Mapped to GTT
* 3. Read/written by GPU
* 4. set_domain to (CPU,CPU)
* 5. Read/written by CPU
* 6. Read/written by GPU
*
* 1. Allocated
* Same as last example, (CPU, CPU)
* 2. Mapped to GTT
* Nothing changes (assertions find that it is not in the GPU)
* 3. Read/written by GPU
* execbuffer calls set_domain (RENDER, RENDER)
* flush_domains gets CPU
* invalidate_domains gets GPU
* clflush (obj)
* MI_FLUSH and drm_agp_chipset_flush
* 4. set_domain (CPU, CPU)
* flush_domains gets GPU
* invalidate_domains gets CPU
* wait_rendering (obj) to make sure all drawing is complete.
* This will include an MI_FLUSH to get the data from GPU
* to memory
* clflush (obj) to invalidate the CPU cache
* Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
* 5. Read/written by CPU
* cache lines are loaded and dirtied
* 6. Read written by GPU
* Same as last GPU access
*
* Case 3: The constant buffer
*
* 1. Allocated
* 2. Written by CPU
* 3. Read by GPU
* 4. Updated (written) by CPU again
* 5. Read by GPU
*
* 1. Allocated
* (CPU, CPU)
* 2. Written by CPU
* (CPU, CPU)
* 3. Read by GPU
* (CPU+RENDER, 0)
* flush_domains = CPU
* invalidate_domains = RENDER
* clflush (obj)
* MI_FLUSH
* drm_agp_chipset_flush
* 4. Updated (written) by CPU again
* (CPU, CPU)
* flush_domains = 0 (no previous write domain)
* invalidate_domains = 0 (no new read domains)
* 5. Read by GPU
* (CPU+RENDER, 0)
* flush_domains = CPU
* invalidate_domains = RENDER
* clflush (obj)
* MI_FLUSH
* drm_agp_chipset_flush
*/
static void
i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
struct change_domains *cd)
{
uint32_t invalidate_domains = 0, flush_domains = 0;
/*
* If the object isn't moving to a new write domain,
* let the object stay in multiple read domains
*/
if (obj->base.pending_write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
/*
* Flush the current write domain if
* the new read domains don't match. Invalidate
* any read domains which differ from the old
* write domain
*/
if (obj->base.write_domain &&
(((obj->base.write_domain != obj->base.pending_read_domains ||
obj->ring != ring)) ||
(obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
flush_domains |= obj->base.write_domain;
invalidate_domains |=
obj->base.pending_read_domains & ~obj->base.write_domain;
}
/*
* Invalidate any read caches which may have
* stale data. That is, any new read domains.
*/
invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
if (obj->base.pending_write_domain)
cd->flips |= atomic_read(&obj->pending_flip);
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
* write_domains). So if we have a current write domain that we
* aren't changing, set pending_write_domain to that.
*/
if (flush_domains == 0 && obj->base.pending_write_domain == 0)
obj->base.pending_write_domain = obj->base.write_domain;
cd->invalidate_domains |= invalidate_domains;
cd->flush_domains |= flush_domains;
if (flush_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= intel_ring_flag(obj->ring);
if (invalidate_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= intel_ring_flag(ring);
}
struct eb_objects {
int and;
struct hlist_head buckets[0];
};
static struct eb_objects *
eb_create(int size)
{
struct eb_objects *eb;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
while (count > size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
sizeof(struct eb_objects),
GFP_KERNEL);
if (eb == NULL)
return eb;
eb->and = count - 1;
return eb;
}
static void
eb_reset(struct eb_objects *eb)
{
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
static void
eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
{
hlist_add_head(&obj->exec_node,
&eb->buckets[obj->exec_handle & eb->and]);
}
static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
{
struct hlist_head *head;
struct hlist_node *node;
struct drm_i915_gem_object *obj;
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
if (obj->exec_handle == handle)
return obj;
}
return NULL;
}
static void
eb_destroy(struct eb_objects *eb)
{
kfree(eb);
}
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
uint32_t target_offset;
int ret = -EINVAL;
/* we've already hold a reference to all valid objects */
target_obj = &eb_get_object(eb, reloc->target_handle)->base;
if (unlikely(target_obj == NULL))
return -ENOENT;
target_offset = to_intel_bo(target_obj)->gtt_offset;
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
if (unlikely(target_offset == 0)) {
DRM_DEBUG("No GTT space found for object %d\n",
reloc->target_handle);
return ret;
}
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
DRM_DEBUG("reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
return ret;
}
if (unlikely((reloc->write_domain | reloc->read_domains)
& ~I915_GEM_GPU_DOMAINS)) {
DRM_DEBUG("reloc with read/write non-GPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
return ret;
}
if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain)) {
DRM_DEBUG("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->write_domain,
target_obj->pending_write_domain);
return ret;
}
target_obj->pending_read_domains |= reloc->read_domains;
target_obj->pending_write_domain |= reloc->write_domain;
/* If the relocation already has the right value in it, no
* more work needs to be done.
*/
if (target_offset == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset > obj->base.size - 4)) {
DRM_DEBUG("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
(int) reloc->offset,
(int) obj->base.size);
return ret;
}
if (unlikely(reloc->offset & 3)) {
DRM_DEBUG("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
obj, reloc->target_handle,
(int) reloc->offset);
return ret;
}
reloc->delta += target_offset;
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
char *vaddr;
vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
kunmap_atomic(vaddr);
} else {
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
/* We can't wait for rendering with pagefaults disabled */
if (obj->active && in_atomic())
return -EFAULT;
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret)
return ret;
/* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc->offset & ~PAGE_MASK));
iowrite32(reloc->delta, reloc_entry);
io_mapping_unmap_atomic(reloc_page);
}
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
return 0;
}
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb)
{
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
for (i = 0; i < entry->relocation_count; i++) {
struct drm_i915_gem_relocation_entry reloc;
if (__copy_from_user_inatomic(&reloc,
user_relocs+i,
sizeof(reloc)))
return -EFAULT;
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
if (ret)
return ret;
if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
&reloc.presumed_offset,
sizeof(reloc.presumed_offset)))
return -EFAULT;
}
return 0;
}
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *relocs)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
if (ret)
return ret;
}
return 0;
}
static int
i915_gem_execbuffer_relocate(struct drm_device *dev,
struct eb_objects *eb,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
int ret = 0;
/* This is the fast path and we cannot handle a pagefault whilst
* holding the struct mutex lest the user pass in the relocations
* contained within a mmaped bo. For in such a case we, the page
* fault handler would call i915_gem_fault() and we would try to
* acquire the struct mutex again. Obviously this is bad and so
* lockdep complains vehemently.
*/
pagefault_disable();
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
break;
}
pagefault_enable();
return ret;
}
#define __EXEC_OBJECT_HAS_FENCE (1<<31)
static int
pin_and_fence_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
int ret;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
if (ret)
return ret;
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
if (obj->tiling_mode) {
ret = i915_gem_object_get_fence(obj, ring);
if (ret)
goto err_unpin;
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
i915_gem_object_pin_fence(obj);
} else {
ret = i915_gem_object_put_fence(obj);
if (ret)
goto err_unpin;
}
obj->pending_fenced_gpu_access = true;
}
}
entry->offset = obj->gtt_offset;
return 0;
err_unpin:
i915_gem_object_unpin(obj);
return ret;
}
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
int ret, retry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
struct list_head ordered_objects;
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable;
obj = list_first_entry(objects,
struct drm_i915_gem_object,
exec_list);
entry = obj->exec_entry;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
if (need_mappable)
list_move(&obj->exec_list, &ordered_objects);
else
list_move_tail(&obj->exec_list, &ordered_objects);
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
}
list_splice(&ordered_objects, objects);
/* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases:
*
* 1a. Unbind all objects that do not match the GTT constraints for
* the execbuffer (fenceable, mappable, alignment etc).
* 1b. Increment pin count for already bound objects.
* 2. Bind new objects.
* 3. Decrement pin count.
*
* This avoid unnecessary unbinding of later objects in order to makr
* room for the earlier objects *unless* we need to defragment.
*/
retry = 0;
do {
ret = 0;
/* Unbind any ill-fitting objects or pin. */
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
if (!obj->gtt_space)
continue;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
ret = pin_and_fence_object(obj, ring);
if (ret)
goto err;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
if (obj->gtt_space)
continue;
ret = pin_and_fence_object(obj, ring);
if (ret) {
int ret_ignore;
/* This can potentially raise a harmless
* -EINVAL if we failed to bind in the above
* call. It cannot raise -EINTR since we know
* that the bo is freshly bound and so will
* not need to be flushed or waited upon.
*/
ret_ignore = i915_gem_object_unbind(obj);
(void)ret_ignore;
WARN_ON(obj->gtt_space);
break;
}
}
/* Decrement pin count for bound objects */
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry;
if (!obj->gtt_space)
continue;
entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
i915_gem_object_unpin_fence(obj);
entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}
i915_gem_object_unpin(obj);
/* ... and ensure ppgtt mapping exist if needed. */
if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, obj->cache_level);
obj->has_aliasing_ppgtt_mapping = 1;
}
}
if (ret != -ENOSPC || retry > 1)
return ret;
/* First attempt, just clear anything that is purgeable.
* Second attempt, clear the entire GTT.
*/
ret = i915_gem_evict_everything(ring->dev, retry == 0);
if (ret)
return ret;
retry++;
} while (1);
err:
list_for_each_entry_continue_reverse(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry;
if (!obj->gtt_space)
continue;
entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
i915_gem_object_unpin_fence(obj);
entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}
i915_gem_object_unpin(obj);
}
return ret;
}
static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct list_head *objects,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec,
int count)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
int *reloc_offset;
int i, total, ret;
/* We may process another execbuffer during the unlock... */
while (!list_empty(objects)) {
obj = list_first_entry(objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
mutex_unlock(&dev->struct_mutex);
total = 0;
for (i = 0; i < count; i++)
total += exec[i].relocation_count;
reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
reloc = drm_malloc_ab(total, sizeof(*reloc));
if (reloc == NULL || reloc_offset == NULL) {
drm_free_large(reloc);
drm_free_large(reloc_offset);
mutex_lock(&dev->struct_mutex);
return -ENOMEM;
}
total = 0;
for (i = 0; i < count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
if (copy_from_user(reloc+total, user_relocs,
exec[i].relocation_count * sizeof(*reloc))) {
ret = -EFAULT;
mutex_lock(&dev->struct_mutex);
goto err;
}
reloc_offset[i] = total;
total += exec[i].relocation_count;
}
ret = i915_mutex_lock_interruptible(dev);
if (ret) {
mutex_lock(&dev->struct_mutex);
goto err;
}
/* reacquire the objects */
eb_reset(eb);
for (i = 0; i < count; i++) {
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
goto err;
}
list_add_tail(&obj->exec_list, objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
ret = i915_gem_execbuffer_reserve(ring, file, objects);
if (ret)
goto err;
list_for_each_entry(obj, objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset]);
if (ret)
goto err;
}
/* Leave the user relocations as are, this is the painfully slow path,
* and we want to avoid the complication of dropping the lock whilst
* having buffers reserved in the aperture and so causing spurious
* ENOSPC for random operations.
*/
err:
drm_free_large(reloc);
drm_free_large(reloc_offset);
return ret;
}
static int
i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains,
uint32_t flush_domains,
uint32_t flush_rings)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i, ret;
if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i)) {
ret = i915_gem_flush_ring(&dev_priv->ring[i],
invalidate_domains,
flush_domains);
if (ret)
return ret;
}
}
return 0;
}
static bool
intel_enable_semaphores(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
return 0;
if (i915_semaphores >= 0)
return i915_semaphores;
/* Disable semaphores on SNB */
if (INTEL_INFO(dev)->gen == 6)
return 0;
return 1;
}
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
{
struct intel_ring_buffer *from = obj->ring;
u32 seqno;
int ret, idx;
if (from == NULL || to == from)
return 0;
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
if (!intel_enable_semaphores(obj->base.dev))
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
seqno = obj->last_rendering_seqno;
if (seqno <= from->sync_seqno[idx])
return 0;
if (seqno == from->outstanding_lazy_request) {
struct drm_i915_gem_request *request;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
ret = i915_add_request(from, NULL, request);
if (ret) {
kfree(request);
return ret;
}
seqno = request->seqno;
}
from->sync_seqno[idx] = seqno;
return to->sync_to(to, from, seqno - 1);
}
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
u32 plane, flip_mask;
int ret;
/* Check for any pending flips. As we only maintain a flip queue depth
* of 1, we can simply insert a WAIT for the next display flip prior
* to executing the batch and avoid stalling the CPU.
*/
for (plane = 0; flips >> plane; plane++) {
if (((flips >> plane) & 1) == 0)
continue;
if (plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
}
return 0;
}
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
struct change_domains cd;
int ret;
memset(&cd, 0, sizeof(cd));
list_for_each_entry(obj, objects, exec_list)
i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
if (cd.invalidate_domains | cd.flush_domains) {
ret = i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains,
cd.flush_domains,
cd.flush_rings);
if (ret)
return ret;
}
if (cd.flips) {
ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
if (ret)
return ret;
}
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_sync_rings(obj, ring);
if (ret)
return ret;
}
return 0;
}
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
{
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
}
static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
int count)
{
int i;
for (i = 0; i < count; i++) {
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
int length; /* limited by fault_in_pages_readable() */
/* First check for malicious input causing overflow */
if (exec[i].relocation_count >
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
return -EINVAL;
length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry);
if (!access_ok(VERIFY_READ, ptr, length))
return -EFAULT;
/* we may also need to update the presumed offsets */
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
if (fault_in_pages_readable(ptr, length))
return -EFAULT;
}
return 0;
}
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct intel_ring_buffer *ring,
u32 seqno)
{
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, objects, exec_list) {
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
i915_gem_object_move_to_active(obj, ring, seqno);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->pending_gpu_write = true;
list_move_tail(&obj->gpu_write_list,
&ring->gpu_write_list);
intel_mark_busy(ring->dev, obj);
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_request *request;
u32 invalidate;
/*
* Ensure that the commands in the batch buffer are
* finished before the interrupt fires.
*
* The sampler always gets flushed on i965 (sigh).
*/
invalidate = I915_GEM_DOMAIN_COMMAND;
if (INTEL_INFO(dev)->gen >= 4)
invalidate |= I915_GEM_DOMAIN_SAMPLER;
if (ring->flush(ring, invalidate, 0)) {
i915_gem_next_request_seqno(ring);
return;
}
/* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL || i915_add_request(ring, file, request)) {
i915_gem_next_request_seqno(ring);
kfree(request);
}
}
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
return 0;
ret = intel_ring_begin(ring, 4 * 3);
if (ret)
return ret;
for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(ring, 0);
}
intel_ring_advance(ring);
return 0;
}
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head objects;
struct eb_objects *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
u32 exec_start, exec_len;
u32 seqno;
u32 mask;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
DRM_DEBUG("execbuf with invalid offset/length\n");
return -EINVAL;
}
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
return ret;
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
ring = &dev_priv->ring[RCS];
break;
case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
DRM_DEBUG("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BLT:
if (!HAS_BLT(dev)) {
DRM_DEBUG("execbuf with invalid ring (BLT)\n");
return -EINVAL;
}
ring = &dev_priv->ring[BCS];
break;
default:
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mask = I915_EXEC_CONSTANTS_MASK;
switch (mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4)
return -EINVAL;
if (INTEL_INFO(dev)->gen > 5 &&
mode == I915_EXEC_CONSTANTS_REL_SURFACE)
return -EINVAL;
/* The HW changed the meaning on this bit on gen6 */
if (INTEL_INFO(dev)->gen >= 6)
mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
return -EINVAL;
}
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto pre_mutex_err;
}
if (copy_from_user(cliprects,
(struct drm_clip_rect __user *)(uintptr_t)
args->cliprects_ptr,
sizeof(*cliprects)*args->num_cliprects)) {
ret = -EFAULT;
goto pre_mutex_err;
}
}
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto pre_mutex_err;
if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
}
eb = eb_create(args->buffer_count);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
goto pre_mutex_err;
}
/* Look up object handles */
INIT_LIST_HEAD(&objects);
for (i = 0; i < args->buffer_count; i++) {
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
ret = -ENOENT;
goto err;
}
if (!list_empty(&obj->exec_list)) {
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
goto err;
}
list_add_tail(&obj->exec_list, &objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
/* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(objects.prev,
struct drm_i915_gem_object,
exec_list);
/* Move the objects en-masse into the GTT, evicting if necessary. */
ret = i915_gem_execbuffer_reserve(ring, file, &objects);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
&objects, eb,
exec,
args->buffer_count);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
goto err;
}
/* Set the pending read domains for the batch buffer to COMMAND */
if (batch_obj->base.pending_write_domain) {
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL;
goto err;
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
if (ret)
goto err;
seqno = i915_gem_next_request_seqno(ring);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
if (seqno < ring->sync_seqno[i]) {
/* The GPU can not handle its semaphore value wrapping,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
ret = i915_gpu_idle(dev, true);
if (ret)
goto err;
BUG_ON(ring->sync_seqno[i]);
}
}
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
if (ret)
goto err;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, INSTPM);
intel_ring_emit(ring, mask << 16 | mode);
intel_ring_advance(ring);
dev_priv->relative_constants_mode = mode;
}
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(dev, ring);
if (ret)
goto err;
}
trace_i915_gem_ring_dispatch(ring, seqno);
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto err;
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len);
if (ret)
goto err;
}
} else {
ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
if (ret)
goto err;
}
i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
eb_destroy(eb);
while (!list_empty(&objects)) {
struct drm_i915_gem_object *obj;
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
kfree(cliprects);
return ret;
}
/*
* Legacy execbuffer just creates an exec2 list from the original exec object
* list array and passes it to the real function.
*/
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret, i;
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
/* Copy in the exec list from userland */
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
if (exec_list == NULL || exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
drm_free_large(exec_list);
drm_free_large(exec2_list);
return -ENOMEM;
}
ret = copy_from_user(exec_list,
(struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec_list);
drm_free_large(exec2_list);
return -EFAULT;
}
for (i = 0; i < args->buffer_count; i++) {
exec2_list[i].handle = exec_list[i].handle;
exec2_list[i].relocation_count = exec_list[i].relocation_count;
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
exec2_list[i].alignment = exec_list[i].alignment;
exec2_list[i].offset = exec_list[i].offset;
if (INTEL_INFO(dev)->gen < 4)
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
else
exec2_list[i].flags = 0;
}
exec2.buffers_ptr = args->buffers_ptr;
exec2.buffer_count = args->buffer_count;
exec2.batch_start_offset = args->batch_start_offset;
exec2.batch_len = args->batch_len;
exec2.DR1 = args->DR1;
exec2.DR4 = args->DR4;
exec2.num_cliprects = args->num_cliprects;
exec2.cliprects_ptr = args->cliprects_ptr;
exec2.flags = I915_EXEC_RENDER;
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
exec_list,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
}
drm_free_large(exec_list);
drm_free_large(exec2_list);
return ret;
}
int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
if (args->buffer_count < 1 ||
args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL)
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
(struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec2_list);
return -EFAULT;
}
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
exec2_list,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
}
drm_free_large(exec2_list);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3654_0 |
crossvul-cpp_data_good_2020_6 | /*
* in/out function for ltree and lquery
* Teodor Sigaev <teodor@stack.net>
* contrib/ltree/ltree_io.c
*/
#include "postgres.h"
#include <ctype.h>
#include "ltree.h"
#include "utils/memutils.h"
#include "crc32.h"
PG_FUNCTION_INFO_V1(ltree_in);
Datum ltree_in(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(ltree_out);
Datum ltree_out(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(lquery_in);
Datum lquery_in(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(lquery_out);
Datum lquery_out(PG_FUNCTION_ARGS);
#define UNCHAR ereport(ERROR, \
(errcode(ERRCODE_SYNTAX_ERROR), \
errmsg("syntax error at position %d", \
pos)));
typedef struct
{
char *start;
int len; /* length in bytes */
int flag;
int wlen; /* length in characters */
} nodeitem;
#define LTPRS_WAITNAME 0
#define LTPRS_WAITDELIM 1
Datum
ltree_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
char *ptr;
nodeitem *list,
*lptr;
int num = 0,
totallen = 0;
int state = LTPRS_WAITNAME;
ltree *result;
ltree_level *curlevel;
int charlen;
int pos = 0;
ptr = buf;
while (*ptr)
{
charlen = pg_mblen(ptr);
if (charlen == 1 && t_iseq(ptr, '.'))
num++;
ptr += charlen;
}
if (num + 1 > MaxAllocSize / sizeof(nodeitem))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("number of levels (%d) exceeds the maximum allowed (%d)",
num + 1, (int) (MaxAllocSize / sizeof(nodeitem)))));
list = lptr = (nodeitem *) palloc(sizeof(nodeitem) * (num + 1));
ptr = buf;
while (*ptr)
{
charlen = pg_mblen(ptr);
if (state == LTPRS_WAITNAME)
{
if (ISALNUM(ptr))
{
lptr->start = ptr;
lptr->wlen = 0;
state = LTPRS_WAITDELIM;
}
else
UNCHAR;
}
else if (state == LTPRS_WAITDELIM)
{
if (charlen == 1 && t_iseq(ptr, '.'))
{
lptr->len = ptr - lptr->start;
if (lptr->wlen > 255)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("Name length is %d, must "
"be < 256, in position %d.",
lptr->wlen, pos)));
totallen += MAXALIGN(lptr->len + LEVEL_HDRSIZE);
lptr++;
state = LTPRS_WAITNAME;
}
else if (!ISALNUM(ptr))
UNCHAR;
}
else
/* internal error */
elog(ERROR, "internal error in parser");
ptr += charlen;
lptr->wlen++;
pos++;
}
if (state == LTPRS_WAITDELIM)
{
lptr->len = ptr - lptr->start;
if (lptr->wlen > 255)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("Name length is %d, must "
"be < 256, in position %d.",
lptr->wlen, pos)));
totallen += MAXALIGN(lptr->len + LEVEL_HDRSIZE);
lptr++;
}
else if (!(state == LTPRS_WAITNAME && lptr == list))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Unexpected end of line.")));
result = (ltree *) palloc0(LTREE_HDRSIZE + totallen);
SET_VARSIZE(result, LTREE_HDRSIZE + totallen);
result->numlevel = lptr - list;
curlevel = LTREE_FIRST(result);
lptr = list;
while (lptr - list < result->numlevel)
{
curlevel->len = (uint16) lptr->len;
memcpy(curlevel->name, lptr->start, lptr->len);
curlevel = LEVEL_NEXT(curlevel);
lptr++;
}
pfree(list);
PG_RETURN_POINTER(result);
}
Datum
ltree_out(PG_FUNCTION_ARGS)
{
ltree *in = PG_GETARG_LTREE(0);
char *buf,
*ptr;
int i;
ltree_level *curlevel;
ptr = buf = (char *) palloc(VARSIZE(in));
curlevel = LTREE_FIRST(in);
for (i = 0; i < in->numlevel; i++)
{
if (i != 0)
{
*ptr = '.';
ptr++;
}
memcpy(ptr, curlevel->name, curlevel->len);
ptr += curlevel->len;
curlevel = LEVEL_NEXT(curlevel);
}
*ptr = '\0';
PG_FREE_IF_COPY(in, 0);
PG_RETURN_POINTER(buf);
}
#define LQPRS_WAITLEVEL 0
#define LQPRS_WAITDELIM 1
#define LQPRS_WAITOPEN 2
#define LQPRS_WAITFNUM 3
#define LQPRS_WAITSNUM 4
#define LQPRS_WAITND 5
#define LQPRS_WAITCLOSE 6
#define LQPRS_WAITEND 7
#define LQPRS_WAITVAR 8
#define GETVAR(x) ( *((nodeitem**)LQL_FIRST(x)) )
#define ITEMSIZE MAXALIGN(LQL_HDRSIZE+sizeof(nodeitem*))
#define NEXTLEV(x) ( (lquery_level*)( ((char*)(x)) + ITEMSIZE) )
Datum
lquery_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
char *ptr;
int num = 0,
totallen = 0,
numOR = 0;
int state = LQPRS_WAITLEVEL;
lquery *result;
nodeitem *lptr = NULL;
lquery_level *cur,
*curqlevel,
*tmpql;
lquery_variant *lrptr = NULL;
bool hasnot = false;
bool wasbad = false;
int charlen;
int pos = 0;
ptr = buf;
while (*ptr)
{
charlen = pg_mblen(ptr);
if (charlen == 1)
{
if (t_iseq(ptr, '.'))
num++;
else if (t_iseq(ptr, '|'))
numOR++;
}
ptr += charlen;
}
num++;
if (num > MaxAllocSize / ITEMSIZE)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("number of levels (%d) exceeds the maximum allowed (%d)",
num, (int) (MaxAllocSize / ITEMSIZE))));
curqlevel = tmpql = (lquery_level *) palloc0(ITEMSIZE * num);
ptr = buf;
while (*ptr)
{
charlen = pg_mblen(ptr);
if (state == LQPRS_WAITLEVEL)
{
if (ISALNUM(ptr))
{
GETVAR(curqlevel) = lptr = (nodeitem *) palloc0(sizeof(nodeitem) * (numOR + 1));
lptr->start = ptr;
state = LQPRS_WAITDELIM;
curqlevel->numvar = 1;
}
else if (charlen == 1 && t_iseq(ptr, '!'))
{
GETVAR(curqlevel) = lptr = (nodeitem *) palloc0(sizeof(nodeitem) * (numOR + 1));
lptr->start = ptr + 1;
state = LQPRS_WAITDELIM;
curqlevel->numvar = 1;
curqlevel->flag |= LQL_NOT;
hasnot = true;
}
else if (charlen == 1 && t_iseq(ptr, '*'))
state = LQPRS_WAITOPEN;
else
UNCHAR;
}
else if (state == LQPRS_WAITVAR)
{
if (ISALNUM(ptr))
{
lptr++;
lptr->start = ptr;
state = LQPRS_WAITDELIM;
curqlevel->numvar++;
}
else
UNCHAR;
}
else if (state == LQPRS_WAITDELIM)
{
if (charlen == 1 && t_iseq(ptr, '@'))
{
if (lptr->start == ptr)
UNCHAR;
lptr->flag |= LVAR_INCASE;
curqlevel->flag |= LVAR_INCASE;
}
else if (charlen == 1 && t_iseq(ptr, '*'))
{
if (lptr->start == ptr)
UNCHAR;
lptr->flag |= LVAR_ANYEND;
curqlevel->flag |= LVAR_ANYEND;
}
else if (charlen == 1 && t_iseq(ptr, '%'))
{
if (lptr->start == ptr)
UNCHAR;
lptr->flag |= LVAR_SUBLEXEME;
curqlevel->flag |= LVAR_SUBLEXEME;
}
else if (charlen == 1 && t_iseq(ptr, '|'))
{
lptr->len = ptr - lptr->start -
((lptr->flag & LVAR_SUBLEXEME) ? 1 : 0) -
((lptr->flag & LVAR_INCASE) ? 1 : 0) -
((lptr->flag & LVAR_ANYEND) ? 1 : 0);
if (lptr->wlen > 255)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("Name length is %d, must "
"be < 256, in position %d.",
lptr->wlen, pos)));
state = LQPRS_WAITVAR;
}
else if (charlen == 1 && t_iseq(ptr, '.'))
{
lptr->len = ptr - lptr->start -
((lptr->flag & LVAR_SUBLEXEME) ? 1 : 0) -
((lptr->flag & LVAR_INCASE) ? 1 : 0) -
((lptr->flag & LVAR_ANYEND) ? 1 : 0);
if (lptr->wlen > 255)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("Name length is %d, must "
"be < 256, in position %d.",
lptr->wlen, pos)));
state = LQPRS_WAITLEVEL;
curqlevel = NEXTLEV(curqlevel);
}
else if (ISALNUM(ptr))
{
if (lptr->flag)
UNCHAR;
}
else
UNCHAR;
}
else if (state == LQPRS_WAITOPEN)
{
if (charlen == 1 && t_iseq(ptr, '{'))
state = LQPRS_WAITFNUM;
else if (charlen == 1 && t_iseq(ptr, '.'))
{
curqlevel->low = 0;
curqlevel->high = 0xffff;
curqlevel = NEXTLEV(curqlevel);
state = LQPRS_WAITLEVEL;
}
else
UNCHAR;
}
else if (state == LQPRS_WAITFNUM)
{
if (charlen == 1 && t_iseq(ptr, ','))
state = LQPRS_WAITSNUM;
else if (t_isdigit(ptr))
{
curqlevel->low = atoi(ptr);
state = LQPRS_WAITND;
}
else
UNCHAR;
}
else if (state == LQPRS_WAITSNUM)
{
if (t_isdigit(ptr))
{
curqlevel->high = atoi(ptr);
state = LQPRS_WAITCLOSE;
}
else if (charlen == 1 && t_iseq(ptr, '}'))
{
curqlevel->high = 0xffff;
state = LQPRS_WAITEND;
}
else
UNCHAR;
}
else if (state == LQPRS_WAITCLOSE)
{
if (charlen == 1 && t_iseq(ptr, '}'))
state = LQPRS_WAITEND;
else if (!t_isdigit(ptr))
UNCHAR;
}
else if (state == LQPRS_WAITND)
{
if (charlen == 1 && t_iseq(ptr, '}'))
{
curqlevel->high = curqlevel->low;
state = LQPRS_WAITEND;
}
else if (charlen == 1 && t_iseq(ptr, ','))
state = LQPRS_WAITSNUM;
else if (!t_isdigit(ptr))
UNCHAR;
}
else if (state == LQPRS_WAITEND)
{
if (charlen == 1 && t_iseq(ptr, '.'))
{
state = LQPRS_WAITLEVEL;
curqlevel = NEXTLEV(curqlevel);
}
else
UNCHAR;
}
else
/* internal error */
elog(ERROR, "internal error in parser");
ptr += charlen;
if (state == LQPRS_WAITDELIM)
lptr->wlen++;
pos++;
}
if (state == LQPRS_WAITDELIM)
{
if (lptr->start == ptr)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Unexpected end of line.")));
lptr->len = ptr - lptr->start -
((lptr->flag & LVAR_SUBLEXEME) ? 1 : 0) -
((lptr->flag & LVAR_INCASE) ? 1 : 0) -
((lptr->flag & LVAR_ANYEND) ? 1 : 0);
if (lptr->len == 0)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Unexpected end of line.")));
if (lptr->wlen > 255)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("Name length is %d, must "
"be < 256, in position %d.",
lptr->wlen, pos)));
}
else if (state == LQPRS_WAITOPEN)
curqlevel->high = 0xffff;
else if (state != LQPRS_WAITEND)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Unexpected end of line.")));
curqlevel = tmpql;
totallen = LQUERY_HDRSIZE;
while ((char *) curqlevel - (char *) tmpql < num * ITEMSIZE)
{
totallen += LQL_HDRSIZE;
if (curqlevel->numvar)
{
lptr = GETVAR(curqlevel);
while (lptr - GETVAR(curqlevel) < curqlevel->numvar)
{
totallen += MAXALIGN(LVAR_HDRSIZE + lptr->len);
lptr++;
}
}
else if (curqlevel->low > curqlevel->high)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Low limit(%d) is greater than upper(%d).",
curqlevel->low, curqlevel->high)));
curqlevel = NEXTLEV(curqlevel);
}
result = (lquery *) palloc0(totallen);
SET_VARSIZE(result, totallen);
result->numlevel = num;
result->firstgood = 0;
result->flag = 0;
if (hasnot)
result->flag |= LQUERY_HASNOT;
cur = LQUERY_FIRST(result);
curqlevel = tmpql;
while ((char *) curqlevel - (char *) tmpql < num * ITEMSIZE)
{
memcpy(cur, curqlevel, LQL_HDRSIZE);
cur->totallen = LQL_HDRSIZE;
if (curqlevel->numvar)
{
lrptr = LQL_FIRST(cur);
lptr = GETVAR(curqlevel);
while (lptr - GETVAR(curqlevel) < curqlevel->numvar)
{
cur->totallen += MAXALIGN(LVAR_HDRSIZE + lptr->len);
lrptr->len = lptr->len;
lrptr->flag = lptr->flag;
lrptr->val = ltree_crc32_sz(lptr->start, lptr->len);
memcpy(lrptr->name, lptr->start, lptr->len);
lptr++;
lrptr = LVAR_NEXT(lrptr);
}
pfree(GETVAR(curqlevel));
if (cur->numvar > 1 || cur->flag != 0)
wasbad = true;
else if (wasbad == false)
(result->firstgood)++;
}
else
wasbad = true;
curqlevel = NEXTLEV(curqlevel);
cur = LQL_NEXT(cur);
}
pfree(tmpql);
PG_RETURN_POINTER(result);
}
Datum
lquery_out(PG_FUNCTION_ARGS)
{
lquery *in = PG_GETARG_LQUERY(0);
char *buf,
*ptr;
int i,
j,
totallen = 1;
lquery_level *curqlevel;
lquery_variant *curtlevel;
curqlevel = LQUERY_FIRST(in);
for (i = 0; i < in->numlevel; i++)
{
totallen++;
if (curqlevel->numvar)
totallen += 1 + (curqlevel->numvar * 4) + curqlevel->totallen;
else
totallen += 2 * 11 + 4;
curqlevel = LQL_NEXT(curqlevel);
}
ptr = buf = (char *) palloc(totallen);
curqlevel = LQUERY_FIRST(in);
for (i = 0; i < in->numlevel; i++)
{
if (i != 0)
{
*ptr = '.';
ptr++;
}
if (curqlevel->numvar)
{
if (curqlevel->flag & LQL_NOT)
{
*ptr = '!';
ptr++;
}
curtlevel = LQL_FIRST(curqlevel);
for (j = 0; j < curqlevel->numvar; j++)
{
if (j != 0)
{
*ptr = '|';
ptr++;
}
memcpy(ptr, curtlevel->name, curtlevel->len);
ptr += curtlevel->len;
if ((curtlevel->flag & LVAR_SUBLEXEME))
{
*ptr = '%';
ptr++;
}
if ((curtlevel->flag & LVAR_INCASE))
{
*ptr = '@';
ptr++;
}
if ((curtlevel->flag & LVAR_ANYEND))
{
*ptr = '*';
ptr++;
}
curtlevel = LVAR_NEXT(curtlevel);
}
}
else
{
if (curqlevel->low == curqlevel->high)
{
sprintf(ptr, "*{%d}", curqlevel->low);
}
else if (curqlevel->low == 0)
{
if (curqlevel->high == 0xffff)
{
*ptr = '*';
*(ptr + 1) = '\0';
}
else
sprintf(ptr, "*{,%d}", curqlevel->high);
}
else if (curqlevel->high == 0xffff)
{
sprintf(ptr, "*{%d,}", curqlevel->low);
}
else
sprintf(ptr, "*{%d,%d}", curqlevel->low, curqlevel->high);
ptr = strchr(ptr, '\0');
}
curqlevel = LQL_NEXT(curqlevel);
}
*ptr = '\0';
PG_FREE_IF_COPY(in, 0);
PG_RETURN_POINTER(buf);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2020_6 |
crossvul-cpp_data_bad_5817_0 | /*
* Go2Webinar decoder
* Copyright (c) 2012 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Go2Webinar decoder
*/
#include <zlib.h>
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
#include "dsputil.h"
#include "get_bits.h"
#include "internal.h"
#include "mjpeg.h"
enum ChunkType {
FRAME_INFO = 0xC8,
TILE_DATA,
CURSOR_POS,
CURSOR_SHAPE,
CHUNK_CC,
CHUNK_CD
};
enum Compression {
COMPR_EPIC_J_B = 2,
COMPR_KEMPF_J_B,
};
static const uint8_t luma_quant[64] = {
8, 6, 5, 8, 12, 20, 26, 31,
6, 6, 7, 10, 13, 29, 30, 28,
7, 7, 8, 12, 20, 29, 35, 28,
7, 9, 11, 15, 26, 44, 40, 31,
9, 11, 19, 28, 34, 55, 52, 39,
12, 18, 28, 32, 41, 52, 57, 46,
25, 32, 39, 44, 52, 61, 60, 51,
36, 46, 48, 49, 56, 50, 52, 50
};
static const uint8_t chroma_quant[64] = {
9, 9, 12, 24, 50, 50, 50, 50,
9, 11, 13, 33, 50, 50, 50, 50,
12, 13, 28, 50, 50, 50, 50, 50,
24, 33, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50,
};
typedef struct JPGContext {
DSPContext dsp;
ScanTable scantable;
VLC dc_vlc[2], ac_vlc[2];
int prev_dc[3];
DECLARE_ALIGNED(16, int16_t, block)[6][64];
uint8_t *buf;
} JPGContext;
typedef struct G2MContext {
JPGContext jc;
int version;
int compression;
int width, height, bpp;
int tile_width, tile_height;
int tiles_x, tiles_y, tile_x, tile_y;
int got_header;
uint8_t *framebuf;
int framebuf_stride, old_width, old_height;
uint8_t *synth_tile, *jpeg_tile;
int tile_stride, old_tile_w, old_tile_h;
uint8_t *kempf_buf, *kempf_flags;
uint8_t *cursor;
int cursor_stride;
int cursor_fmt;
int cursor_w, cursor_h, cursor_x, cursor_y;
int cursor_hot_x, cursor_hot_y;
} G2MContext;
static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table,
const uint8_t *val_table, int nb_codes,
int is_ac)
{
uint8_t huff_size[256] = { 0 };
uint16_t huff_code[256];
uint16_t huff_sym[256];
int i;
ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
for (i = 0; i < 256; i++)
huff_sym[i] = i + 16 * is_ac;
if (is_ac)
huff_sym[0] = 16 * 256;
return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
huff_code, 2, 2, huff_sym, 2, 2, 0);
}
static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c)
{
int ret;
ret = build_vlc(&c->dc_vlc[0], avpriv_mjpeg_bits_dc_luminance,
avpriv_mjpeg_val_dc, 12, 0);
if (ret)
return ret;
ret = build_vlc(&c->dc_vlc[1], avpriv_mjpeg_bits_dc_chrominance,
avpriv_mjpeg_val_dc, 12, 0);
if (ret)
return ret;
ret = build_vlc(&c->ac_vlc[0], avpriv_mjpeg_bits_ac_luminance,
avpriv_mjpeg_val_ac_luminance, 251, 1);
if (ret)
return ret;
ret = build_vlc(&c->ac_vlc[1], avpriv_mjpeg_bits_ac_chrominance,
avpriv_mjpeg_val_ac_chrominance, 251, 1);
if (ret)
return ret;
ff_dsputil_init(&c->dsp, avctx);
ff_init_scantable(c->dsp.idct_permutation, &c->scantable,
ff_zigzag_direct);
return 0;
}
static av_cold void jpg_free_context(JPGContext *ctx)
{
int i;
for (i = 0; i < 2; i++) {
ff_free_vlc(&ctx->dc_vlc[i]);
ff_free_vlc(&ctx->ac_vlc[i]);
}
av_freep(&ctx->buf);
}
static void jpg_unescape(const uint8_t *src, int src_size,
uint8_t *dst, int *dst_size)
{
const uint8_t *src_end = src + src_size;
uint8_t *dst_start = dst;
while (src < src_end) {
uint8_t x = *src++;
*dst++ = x;
if (x == 0xFF && !*src)
src++;
}
*dst_size = dst - dst_start;
}
static int jpg_decode_block(JPGContext *c, GetBitContext *gb,
int plane, int16_t *block)
{
int dc, val, pos;
const int is_chroma = !!plane;
const uint8_t *qmat = is_chroma ? chroma_quant : luma_quant;
c->dsp.clear_block(block);
dc = get_vlc2(gb, c->dc_vlc[is_chroma].table, 9, 3);
if (dc < 0)
return AVERROR_INVALIDDATA;
if (dc)
dc = get_xbits(gb, dc);
dc = dc * qmat[0] + c->prev_dc[plane];
block[0] = dc;
c->prev_dc[plane] = dc;
pos = 0;
while (pos < 63) {
val = get_vlc2(gb, c->ac_vlc[is_chroma].table, 9, 3);
if (val < 0)
return AVERROR_INVALIDDATA;
pos += val >> 4;
val &= 0xF;
if (pos > 63)
return val ? AVERROR_INVALIDDATA : 0;
if (val) {
int nbits = val;
val = get_xbits(gb, nbits);
val *= qmat[ff_zigzag_direct[pos]];
block[c->scantable.permutated[pos]] = val;
}
}
return 0;
}
static inline void yuv2rgb(uint8_t *out, int Y, int U, int V)
{
out[0] = av_clip_uint8(Y + ( 91881 * V + 32768 >> 16));
out[1] = av_clip_uint8(Y + (-22554 * U - 46802 * V + 32768 >> 16));
out[2] = av_clip_uint8(Y + (116130 * U + 32768 >> 16));
}
static int jpg_decode_data(JPGContext *c, int width, int height,
const uint8_t *src, int src_size,
uint8_t *dst, int dst_stride,
const uint8_t *mask, int mask_stride, int num_mbs,
int swapuv)
{
GetBitContext gb;
uint8_t *tmp;
int mb_w, mb_h, mb_x, mb_y, i, j;
int bx, by;
int unesc_size;
int ret;
tmp = av_realloc(c->buf, src_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!tmp)
return AVERROR(ENOMEM);
c->buf = tmp;
jpg_unescape(src, src_size, c->buf, &unesc_size);
memset(c->buf + unesc_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
init_get_bits(&gb, c->buf, unesc_size * 8);
width = FFALIGN(width, 16);
mb_w = width >> 4;
mb_h = (height + 15) >> 4;
if (!num_mbs)
num_mbs = mb_w * mb_h;
for (i = 0; i < 3; i++)
c->prev_dc[i] = 1024;
bx = by = 0;
for (mb_y = 0; mb_y < mb_h; mb_y++) {
for (mb_x = 0; mb_x < mb_w; mb_x++) {
if (mask && !mask[mb_x]) {
bx += 16;
continue;
}
for (j = 0; j < 2; j++) {
for (i = 0; i < 2; i++) {
if ((ret = jpg_decode_block(c, &gb, 0,
c->block[i + j * 2])) != 0)
return ret;
c->dsp.idct(c->block[i + j * 2]);
}
}
for (i = 1; i < 3; i++) {
if ((ret = jpg_decode_block(c, &gb, i, c->block[i + 3])) != 0)
return ret;
c->dsp.idct(c->block[i + 3]);
}
for (j = 0; j < 16; j++) {
uint8_t *out = dst + bx * 3 + (by + j) * dst_stride;
for (i = 0; i < 16; i++) {
int Y, U, V;
Y = c->block[(j >> 3) * 2 + (i >> 3)][(i & 7) + (j & 7) * 8];
U = c->block[4 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128;
V = c->block[5 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128;
yuv2rgb(out + i * 3, Y, U, V);
}
}
if (!--num_mbs)
return 0;
bx += 16;
}
bx = 0;
by += 16;
if (mask)
mask += mask_stride;
}
return 0;
}
static void kempf_restore_buf(const uint8_t *src, int len,
uint8_t *dst, int stride,
const uint8_t *jpeg_tile, int tile_stride,
int width, int height,
const uint8_t *pal, int npal, int tidx)
{
GetBitContext gb;
int i, j, nb, col;
init_get_bits(&gb, src, len * 8);
if (npal <= 2) nb = 1;
else if (npal <= 4) nb = 2;
else if (npal <= 16) nb = 4;
else nb = 8;
for (j = 0; j < height; j++, dst += stride, jpeg_tile += tile_stride) {
if (get_bits(&gb, 8))
continue;
for (i = 0; i < width; i++) {
col = get_bits(&gb, nb);
if (col != tidx)
memcpy(dst + i * 3, pal + col * 3, 3);
else
memcpy(dst + i * 3, jpeg_tile + i * 3, 3);
}
}
}
static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
const uint8_t *src, int src_size)
{
int width, height;
int hdr, zsize, npal, tidx = -1, ret;
int i, j;
const uint8_t *src_end = src + src_size;
uint8_t pal[768], transp[3];
uLongf dlen = (c->tile_width + 1) * c->tile_height;
int sub_type;
int nblocks, cblocks, bstride;
int bits, bitbuf, coded;
uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 +
tile_y * c->tile_height * c->framebuf_stride;
if (src_size < 2)
return AVERROR_INVALIDDATA;
width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width);
height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height);
hdr = *src++;
sub_type = hdr >> 5;
if (sub_type == 0) {
int j;
memcpy(transp, src, 3);
src += 3;
for (j = 0; j < height; j++, dst += c->framebuf_stride)
for (i = 0; i < width; i++)
memcpy(dst + i * 3, transp, 3);
return 0;
} else if (sub_type == 1) {
return jpg_decode_data(&c->jc, width, height, src, src_end - src,
dst, c->framebuf_stride, NULL, 0, 0, 0);
}
if (sub_type != 2) {
memcpy(transp, src, 3);
src += 3;
}
npal = *src++ + 1;
memcpy(pal, src, npal * 3); src += npal * 3;
if (sub_type != 2) {
for (i = 0; i < npal; i++) {
if (!memcmp(pal + i * 3, transp, 3)) {
tidx = i;
break;
}
}
}
if (src_end - src < 2)
return 0;
zsize = (src[0] << 8) | src[1]; src += 2;
if (src_end - src < zsize + (sub_type != 2))
return AVERROR_INVALIDDATA;
ret = uncompress(c->kempf_buf, &dlen, src, zsize);
if (ret)
return AVERROR_INVALIDDATA;
src += zsize;
if (sub_type == 2) {
kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
NULL, 0, width, height, pal, npal, tidx);
return 0;
}
nblocks = *src++ + 1;
cblocks = 0;
bstride = FFALIGN(width, 16) >> 4;
// blocks are coded LSB and we need normal bitreader for JPEG data
bits = 0;
for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
if (!bits) {
if (src >= src_end)
return AVERROR_INVALIDDATA;
bitbuf = *src++;
bits = 8;
}
coded = bitbuf & 1;
bits--;
bitbuf >>= 1;
cblocks += coded;
if (cblocks > nblocks)
return AVERROR_INVALIDDATA;
c->kempf_flags[j + i * bstride] = coded;
}
}
memset(c->jpeg_tile, 0, c->tile_stride * height);
jpg_decode_data(&c->jc, width, height, src, src_end - src,
c->jpeg_tile, c->tile_stride,
c->kempf_flags, bstride, nblocks, 0);
kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
c->jpeg_tile, c->tile_stride,
width, height, pal, npal, tidx);
return 0;
}
static int g2m_init_buffers(G2MContext *c)
{
int aligned_height;
if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) {
c->framebuf_stride = FFALIGN(c->width * 3, 16);
aligned_height = FFALIGN(c->height, 16);
av_free(c->framebuf);
c->framebuf = av_mallocz(c->framebuf_stride * aligned_height);
if (!c->framebuf)
return AVERROR(ENOMEM);
}
if (!c->synth_tile || !c->jpeg_tile ||
c->old_tile_w < c->tile_width ||
c->old_tile_h < c->tile_height) {
c->tile_stride = FFALIGN(c->tile_width * 3, 16);
aligned_height = FFALIGN(c->tile_height, 16);
av_free(c->synth_tile);
av_free(c->jpeg_tile);
av_free(c->kempf_buf);
av_free(c->kempf_flags);
c->synth_tile = av_mallocz(c->tile_stride * aligned_height);
c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height);
c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height
+ FF_INPUT_BUFFER_PADDING_SIZE);
c->kempf_flags = av_mallocz( c->tile_width * aligned_height);
if (!c->synth_tile || !c->jpeg_tile ||
!c->kempf_buf || !c->kempf_flags)
return AVERROR(ENOMEM);
}
return 0;
}
static int g2m_load_cursor(AVCodecContext *avctx, G2MContext *c,
GetByteContext *gb)
{
int i, j, k;
uint8_t *dst;
uint32_t bits;
uint32_t cur_size, cursor_w, cursor_h, cursor_stride;
uint32_t cursor_hot_x, cursor_hot_y;
int cursor_fmt;
uint8_t *tmp;
cur_size = bytestream2_get_be32(gb);
cursor_w = bytestream2_get_byte(gb);
cursor_h = bytestream2_get_byte(gb);
cursor_hot_x = bytestream2_get_byte(gb);
cursor_hot_y = bytestream2_get_byte(gb);
cursor_fmt = bytestream2_get_byte(gb);
cursor_stride = FFALIGN(cursor_w, 32) * 4;
if (cursor_w < 1 || cursor_w > 256 ||
cursor_h < 1 || cursor_h > 256) {
av_log(avctx, AV_LOG_ERROR, "Invalid cursor dimensions %dx%d\n",
cursor_w, cursor_h);
return AVERROR_INVALIDDATA;
}
if (cursor_hot_x > cursor_w || cursor_hot_y > cursor_h) {
av_log(avctx, AV_LOG_WARNING, "Invalid hotspot position %d,%d\n",
cursor_hot_x, cursor_hot_y);
cursor_hot_x = FFMIN(cursor_hot_x, cursor_w - 1);
cursor_hot_y = FFMIN(cursor_hot_y, cursor_h - 1);
}
if (cur_size - 9 > bytestream2_get_bytes_left(gb) ||
c->cursor_w * c->cursor_h / 4 > cur_size) {
av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d/%d\n",
cur_size, bytestream2_get_bytes_left(gb));
return AVERROR_INVALIDDATA;
}
if (cursor_fmt != 1 && cursor_fmt != 32) {
avpriv_report_missing_feature(avctx, "Cursor format %d",
cursor_fmt);
return AVERROR_PATCHWELCOME;
}
tmp = av_realloc(c->cursor, cursor_stride * cursor_h);
if (!tmp) {
av_log(avctx, AV_LOG_ERROR, "Cannot allocate cursor buffer\n");
return AVERROR(ENOMEM);
}
c->cursor = tmp;
c->cursor_w = cursor_w;
c->cursor_h = cursor_h;
c->cursor_hot_x = cursor_hot_x;
c->cursor_hot_y = cursor_hot_y;
c->cursor_fmt = cursor_fmt;
c->cursor_stride = cursor_stride;
dst = c->cursor;
switch (c->cursor_fmt) {
case 1: // old monochrome
for (j = 0; j < c->cursor_h; j++) {
for (i = 0; i < c->cursor_w; i += 32) {
bits = bytestream2_get_be32(gb);
for (k = 0; k < 32; k++) {
dst[0] = !!(bits & 0x80000000);
dst += 4;
bits <<= 1;
}
}
}
dst = c->cursor;
for (j = 0; j < c->cursor_h; j++) {
for (i = 0; i < c->cursor_w; i += 32) {
bits = bytestream2_get_be32(gb);
for (k = 0; k < 32; k++) {
int mask_bit = !!(bits & 0x80000000);
switch (dst[0] * 2 + mask_bit) {
case 0:
dst[0] = 0xFF; dst[1] = 0x00;
dst[2] = 0x00; dst[3] = 0x00;
break;
case 1:
dst[0] = 0xFF; dst[1] = 0xFF;
dst[2] = 0xFF; dst[3] = 0xFF;
break;
default:
dst[0] = 0x00; dst[1] = 0x00;
dst[2] = 0x00; dst[3] = 0x00;
}
dst += 4;
bits <<= 1;
}
}
}
break;
case 32: // full colour
/* skip monochrome version of the cursor and decode RGBA instead */
bytestream2_skip(gb, c->cursor_h * (FFALIGN(c->cursor_w, 32) >> 3));
for (j = 0; j < c->cursor_h; j++) {
for (i = 0; i < c->cursor_w; i++) {
int val = bytestream2_get_be32(gb);
*dst++ = val >> 0;
*dst++ = val >> 8;
*dst++ = val >> 16;
*dst++ = val >> 24;
}
}
break;
default:
return AVERROR_PATCHWELCOME;
}
return 0;
}
#define APPLY_ALPHA(src, new, alpha) \
src = (src * (256 - alpha) + new * alpha) >> 8
static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride)
{
int i, j;
int x, y, w, h;
const uint8_t *cursor;
if (!c->cursor)
return;
x = c->cursor_x - c->cursor_hot_x;
y = c->cursor_y - c->cursor_hot_y;
cursor = c->cursor;
w = c->cursor_w;
h = c->cursor_h;
if (x + w > c->width)
w = c->width - x;
if (y + h > c->height)
h = c->height - y;
if (x < 0) {
w += x;
cursor += -x * 4;
} else {
dst += x * 3;
}
if (y < 0) {
h += y;
cursor += -y * c->cursor_stride;
} else {
dst += y * stride;
}
if (w < 0 || h < 0)
return;
for (j = 0; j < h; j++) {
for (i = 0; i < w; i++) {
uint8_t alpha = cursor[i * 4];
APPLY_ALPHA(dst[i * 3 + 0], cursor[i * 4 + 1], alpha);
APPLY_ALPHA(dst[i * 3 + 1], cursor[i * 4 + 2], alpha);
APPLY_ALPHA(dst[i * 3 + 2], cursor[i * 4 + 3], alpha);
}
dst += stride;
cursor += c->cursor_stride;
}
}
static int g2m_decode_frame(AVCodecContext *avctx, void *data,
int *got_picture_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
G2MContext *c = avctx->priv_data;
AVFrame *pic = data;
GetByteContext bc, tbc;
int magic;
int got_header = 0;
uint32_t chunk_size;
int chunk_type;
int i;
int ret;
if (buf_size < 12) {
av_log(avctx, AV_LOG_ERROR,
"Frame should have at least 12 bytes, got %d instead\n",
buf_size);
return AVERROR_INVALIDDATA;
}
bytestream2_init(&bc, buf, buf_size);
magic = bytestream2_get_be32(&bc);
if ((magic & ~0xF) != MKBETAG('G', '2', 'M', '0') ||
(magic & 0xF) < 2 || (magic & 0xF) > 4) {
av_log(avctx, AV_LOG_ERROR, "Wrong magic %08X\n", magic);
return AVERROR_INVALIDDATA;
}
if ((magic & 0xF) != 4) {
av_log(avctx, AV_LOG_ERROR, "G2M2 and G2M3 are not yet supported\n");
return AVERROR(ENOSYS);
}
while (bytestream2_get_bytes_left(&bc) > 5) {
chunk_size = bytestream2_get_le32(&bc) - 1;
chunk_type = bytestream2_get_byte(&bc);
if (chunk_size > bytestream2_get_bytes_left(&bc)) {
av_log(avctx, AV_LOG_ERROR, "Invalid chunk size %d type %02X\n",
chunk_size, chunk_type);
break;
}
switch (chunk_type) {
case FRAME_INFO:
c->got_header = 0;
if (chunk_size < 21) {
av_log(avctx, AV_LOG_ERROR, "Invalid frame info size %d\n",
chunk_size);
break;
}
c->width = bytestream2_get_be32(&bc);
c->height = bytestream2_get_be32(&bc);
if (c->width < 16 || c->width > avctx->width ||
c->height < 16 || c->height > avctx->height) {
av_log(avctx, AV_LOG_ERROR,
"Invalid frame dimensions %dx%d\n",
c->width, c->height);
ret = AVERROR_INVALIDDATA;
goto header_fail;
}
if (c->width != avctx->width || c->height != avctx->height)
avcodec_set_dimensions(avctx, c->width, c->height);
c->compression = bytestream2_get_be32(&bc);
if (c->compression != 2 && c->compression != 3) {
av_log(avctx, AV_LOG_ERROR,
"Unknown compression method %d\n",
c->compression);
return AVERROR_PATCHWELCOME;
}
c->tile_width = bytestream2_get_be32(&bc);
c->tile_height = bytestream2_get_be32(&bc);
if (!c->tile_width || !c->tile_height) {
av_log(avctx, AV_LOG_ERROR,
"Invalid tile dimensions %dx%d\n",
c->tile_width, c->tile_height);
ret = AVERROR_INVALIDDATA;
goto header_fail;
}
c->tiles_x = (c->width + c->tile_width - 1) / c->tile_width;
c->tiles_y = (c->height + c->tile_height - 1) / c->tile_height;
c->bpp = bytestream2_get_byte(&bc);
chunk_size -= 21;
bytestream2_skip(&bc, chunk_size);
if (g2m_init_buffers(c)) {
ret = AVERROR(ENOMEM);
goto header_fail;
}
got_header = 1;
break;
case TILE_DATA:
if (!c->tiles_x || !c->tiles_y) {
av_log(avctx, AV_LOG_WARNING,
"No frame header - skipping tile\n");
bytestream2_skip(&bc, bytestream2_get_bytes_left(&bc));
break;
}
if (chunk_size < 2) {
av_log(avctx, AV_LOG_ERROR, "Invalid tile data size %d\n",
chunk_size);
break;
}
c->tile_x = bytestream2_get_byte(&bc);
c->tile_y = bytestream2_get_byte(&bc);
if (c->tile_x >= c->tiles_x || c->tile_y >= c->tiles_y) {
av_log(avctx, AV_LOG_ERROR,
"Invalid tile pos %d,%d (in %dx%d grid)\n",
c->tile_x, c->tile_y, c->tiles_x, c->tiles_y);
break;
}
chunk_size -= 2;
ret = 0;
switch (c->compression) {
case COMPR_EPIC_J_B:
av_log(avctx, AV_LOG_ERROR,
"ePIC j-b compression is not implemented yet\n");
return AVERROR(ENOSYS);
case COMPR_KEMPF_J_B:
ret = kempf_decode_tile(c, c->tile_x, c->tile_y,
buf + bytestream2_tell(&bc),
chunk_size);
break;
}
if (ret && c->framebuf)
av_log(avctx, AV_LOG_ERROR, "Error decoding tile %d,%d\n",
c->tile_x, c->tile_y);
bytestream2_skip(&bc, chunk_size);
break;
case CURSOR_POS:
if (chunk_size < 5) {
av_log(avctx, AV_LOG_ERROR, "Invalid cursor pos size %d\n",
chunk_size);
break;
}
c->cursor_x = bytestream2_get_be16(&bc);
c->cursor_y = bytestream2_get_be16(&bc);
bytestream2_skip(&bc, chunk_size - 4);
break;
case CURSOR_SHAPE:
if (chunk_size < 8) {
av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d\n",
chunk_size);
break;
}
bytestream2_init(&tbc, buf + bytestream2_tell(&bc),
chunk_size - 4);
g2m_load_cursor(avctx, c, &tbc);
bytestream2_skip(&bc, chunk_size);
break;
case CHUNK_CC:
case CHUNK_CD:
bytestream2_skip(&bc, chunk_size);
break;
default:
av_log(avctx, AV_LOG_WARNING, "Skipping chunk type %02X\n",
chunk_type);
bytestream2_skip(&bc, chunk_size);
}
}
if (got_header)
c->got_header = 1;
if (c->width && c->height && c->framebuf) {
if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
return ret;
pic->key_frame = got_header;
pic->pict_type = got_header ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
for (i = 0; i < avctx->height; i++)
memcpy(pic->data[0] + i * pic->linesize[0],
c->framebuf + i * c->framebuf_stride,
c->width * 3);
g2m_paint_cursor(c, pic->data[0], pic->linesize[0]);
*got_picture_ptr = 1;
}
return buf_size;
header_fail:
c->width = c->height = 0;
c->tiles_x = c->tiles_y = 0;
return ret;
}
static av_cold int g2m_decode_init(AVCodecContext *avctx)
{
G2MContext * const c = avctx->priv_data;
int ret;
if ((ret = jpg_init(avctx, &c->jc)) != 0) {
av_log(avctx, AV_LOG_ERROR, "Cannot initialise VLCs\n");
jpg_free_context(&c->jc);
return AVERROR(ENOMEM);
}
avctx->pix_fmt = AV_PIX_FMT_RGB24;
return 0;
}
static av_cold int g2m_decode_end(AVCodecContext *avctx)
{
G2MContext * const c = avctx->priv_data;
jpg_free_context(&c->jc);
av_freep(&c->kempf_buf);
av_freep(&c->kempf_flags);
av_freep(&c->synth_tile);
av_freep(&c->jpeg_tile);
av_freep(&c->cursor);
av_freep(&c->framebuf);
return 0;
}
AVCodec ff_g2m_decoder = {
.name = "g2m",
.long_name = NULL_IF_CONFIG_SMALL("Go2Meeting"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_G2M,
.priv_data_size = sizeof(G2MContext),
.init = g2m_decode_init,
.close = g2m_decode_end,
.decode = g2m_decode_frame,
.capabilities = CODEC_CAP_DR1,
};
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5817_0 |
crossvul-cpp_data_good_3663_0 | /*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
#include "private/gc_priv.h"
#include <stdio.h>
#include <string.h>
/* Allocate reclaim list for kind: */
/* Return TRUE on success */
STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
{
struct hblk ** result = (struct hblk **)
GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
if (result == 0) return(FALSE);
BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
kind -> ok_reclaim_list = result;
return(TRUE);
}
GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
GC_bool ignore_off_page,
GC_bool retry); /* from alloc.c */
/* Allocate a large block of size lb bytes. */
/* The block is not cleared. */
/* Flags is 0 or IGNORE_OFF_PAGE. */
/* We hold the allocation lock. */
/* EXTRA_BYTES were already added to lb. */
GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
{
struct hblk * h;
word n_blocks;
ptr_t result;
GC_bool retry = FALSE;
/* Round up to a multiple of a granule. */
lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
n_blocks = OBJ_SZ_TO_BLOCKS(lb);
if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
/* Do our share of marking work */
if (GC_incremental && !GC_dont_gc)
GC_collect_a_little_inner((int)n_blocks);
h = GC_allochblk(lb, k, flags);
# ifdef USE_MUNMAP
if (0 == h) {
GC_merge_unmapped();
h = GC_allochblk(lb, k, flags);
}
# endif
while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) {
h = GC_allochblk(lb, k, flags);
retry = TRUE;
}
if (h == 0) {
result = 0;
} else {
size_t total_bytes = n_blocks * HBLKSIZE;
if (n_blocks > 1) {
GC_large_allocd_bytes += total_bytes;
if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
GC_max_large_allocd_bytes = GC_large_allocd_bytes;
}
result = h -> hb_body;
}
return result;
}
/* Allocate a large block of size lb bytes. Clear if appropriate. */
/* We hold the allocation lock. */
/* EXTRA_BYTES were already added to lb. */
STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
{
ptr_t result = GC_alloc_large(lb, k, flags);
word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
if (0 == result) return 0;
if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
/* Clear the whole block, in case of GC_realloc call. */
BZERO(result, n_blocks * HBLKSIZE);
}
return result;
}
/* allocate lb bytes for an object of kind k. */
/* Should not be used to directly to allocate */
/* objects such as STUBBORN objects that */
/* require special handling on allocation. */
/* First a version that assumes we already */
/* hold lock: */
GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
{
void *op;
if(SMALL_OBJ(lb)) {
struct obj_kind * kind = GC_obj_kinds + k;
size_t lg = GC_size_map[lb];
void ** opp = &(kind -> ok_freelist[lg]);
op = *opp;
if (EXPECT(0 == op, FALSE)) {
if (GC_size_map[lb] == 0) {
if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
return(GC_generic_malloc_inner(lb, k));
}
if (kind -> ok_reclaim_list == 0) {
if (!GC_alloc_reclaim_list(kind)) goto out;
}
op = GC_allocobj(lg, k);
if (op == 0) goto out;
}
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
} else {
op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
GC_bytes_allocd += lb;
}
out:
return op;
}
/* Allocate a composite object of size n bytes. The caller guarantees */
/* that pointers past the first page are not relevant. Caller holds */
/* allocation lock. */
GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
{
word lb_adjusted;
void * op;
if (lb <= HBLKSIZE)
return(GC_generic_malloc_inner(lb, k));
lb_adjusted = ADD_SLOP(lb);
op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
GC_bytes_allocd += lb_adjusted;
return op;
}
GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k)
{
void * result;
DCL_LOCK_STATE;
if (EXPECT(GC_have_errors, FALSE))
GC_print_all_errors();
GC_INVOKE_FINALIZERS();
if (SMALL_OBJ(lb)) {
LOCK();
result = GC_generic_malloc_inner((word)lb, k);
UNLOCK();
} else {
size_t lg;
size_t lb_rounded;
word n_blocks;
GC_bool init;
lg = ROUNDED_UP_GRANULES(lb);
lb_rounded = GRANULES_TO_BYTES(lg);
if (lb_rounded < lb)
return((*GC_get_oom_fn())(lb));
n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
init = GC_obj_kinds[k].ok_init;
LOCK();
result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
if (0 != result) {
if (GC_debugging_started) {
BZERO(result, n_blocks * HBLKSIZE);
} else {
# ifdef THREADS
/* Clear any memory that might be used for GC descriptors */
/* before we release the lock. */
((word *)result)[0] = 0;
((word *)result)[1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
# endif
}
}
GC_bytes_allocd += lb_rounded;
UNLOCK();
if (init && !GC_debugging_started && 0 != result) {
BZERO(result, n_blocks * HBLKSIZE);
}
}
if (0 == result) {
return((*GC_get_oom_fn())(lb));
} else {
return(result);
}
}
/* Allocate lb bytes of atomic (pointerfree) data */
#ifdef THREAD_LOCAL_ALLOC
GC_INNER void * GC_core_malloc_atomic(size_t lb)
#else
GC_API void * GC_CALL GC_malloc_atomic(size_t lb)
#endif
{
void *op;
void ** opp;
size_t lg;
DCL_LOCK_STATE;
if(SMALL_OBJ(lb)) {
lg = GC_size_map[lb];
opp = &(GC_aobjfreelist[lg]);
LOCK();
if (EXPECT((op = *opp) == 0, FALSE)) {
UNLOCK();
return(GENERAL_MALLOC((word)lb, PTRFREE));
}
*opp = obj_link(op);
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
UNLOCK();
return((void *) op);
} else {
return(GENERAL_MALLOC((word)lb, PTRFREE));
}
}
/* Allocate lb bytes of composite (pointerful) data */
#ifdef THREAD_LOCAL_ALLOC
GC_INNER void * GC_core_malloc(size_t lb)
#else
GC_API void * GC_CALL GC_malloc(size_t lb)
#endif
{
void *op;
void **opp;
size_t lg;
DCL_LOCK_STATE;
if(SMALL_OBJ(lb)) {
lg = GC_size_map[lb];
opp = (void **)&(GC_objfreelist[lg]);
LOCK();
if (EXPECT((op = *opp) == 0, FALSE)) {
UNLOCK();
return (GENERAL_MALLOC((word)lb, NORMAL));
}
GC_ASSERT(0 == obj_link(op)
|| ((word)obj_link(op)
<= (word)GC_greatest_plausible_heap_addr
&& (word)obj_link(op)
>= (word)GC_least_plausible_heap_addr));
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
UNLOCK();
return op;
} else {
return(GENERAL_MALLOC(lb, NORMAL));
}
}
/* Allocate lb bytes of pointerful, traced, but not collectable data */
GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb)
{
void *op;
void **opp;
size_t lg;
DCL_LOCK_STATE;
if( SMALL_OBJ(lb) ) {
if (EXTRA_BYTES != 0 && lb != 0) lb--;
/* We don't need the extra byte, since this won't be */
/* collected anyway. */
lg = GC_size_map[lb];
opp = &(GC_uobjfreelist[lg]);
LOCK();
op = *opp;
if (EXPECT(0 != op, TRUE)) {
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
/* Mark bit ws already set on free list. It will be */
/* cleared only temporarily during a collection, as a */
/* result of the normal free list mark bit clearing. */
GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
UNLOCK();
} else {
UNLOCK();
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
/* For small objects, the free lists are completely marked. */
}
GC_ASSERT(0 == op || GC_is_marked(op));
return((void *) op);
} else {
hdr * hhdr;
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
if (0 == op) return(0);
GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
hhdr = HDR(op);
/* We don't need the lock here, since we have an undisguised */
/* pointer. We do need to hold the lock while we adjust */
/* mark bits. */
LOCK();
set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
GC_ASSERT(hhdr -> hb_n_marks == 0);
hhdr -> hb_n_marks = 1;
UNLOCK();
return((void *) op);
}
}
#ifdef REDIRECT_MALLOC
# ifndef MSWINCE
# include <errno.h>
# endif
/* Avoid unnecessary nested procedure calls here, by #defining some */
/* malloc replacements. Otherwise we end up saving a */
/* meaningless return address in the object. It also speeds things up, */
/* but it is admittedly quite ugly. */
# define GC_debug_malloc_replacement(lb) \
GC_debug_malloc(lb, GC_DBG_RA "unknown", 0)
void * malloc(size_t lb)
{
/* It might help to manually inline the GC_malloc call here. */
/* But any decent compiler should reduce the extra procedure call */
/* to at most a jump instruction in this case. */
# if defined(I386) && defined(GC_SOLARIS_THREADS)
/*
* Thread initialisation can call malloc before
* we're ready for it.
* It's not clear that this is enough to help matters.
* The thread implementation may well call malloc at other
* inopportune times.
*/
if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb);
# endif /* I386 && GC_SOLARIS_THREADS */
return((void *)REDIRECT_MALLOC(lb));
}
#if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
STATIC ptr_t GC_libpthread_start = 0;
STATIC ptr_t GC_libpthread_end = 0;
STATIC ptr_t GC_libld_start = 0;
STATIC ptr_t GC_libld_end = 0;
STATIC void GC_init_lib_bounds(void)
{
if (GC_libpthread_start != 0) return;
GC_init(); /* if not called yet */
if (!GC_text_mapping("libpthread-",
&GC_libpthread_start, &GC_libpthread_end)) {
WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
/* This might still work with some versions of libpthread, */
/* so we don't abort. Perhaps we should. */
/* Generate message only once: */
GC_libpthread_start = (ptr_t)1;
}
if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
}
}
#endif /* GC_LINUX_THREADS */
#include <limits.h>
#ifdef SIZE_MAX
# define GC_SIZE_MAX SIZE_MAX
#else
# define GC_SIZE_MAX (~(size_t)0)
#endif
void * calloc(size_t n, size_t lb)
{
if (lb && n > GC_SIZE_MAX / lb)
return NULL;
# if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
/* libpthread allocated some memory that is only pointed to by */
/* mmapped thread stacks. Make sure it's not collectable. */
{
static GC_bool lib_bounds_set = FALSE;
ptr_t caller = (ptr_t)__builtin_return_address(0);
/* This test does not need to ensure memory visibility, since */
/* the bounds will be set when/if we create another thread. */
if (!EXPECT(lib_bounds_set, TRUE)) {
GC_init_lib_bounds();
lib_bounds_set = TRUE;
}
if (((word)caller >= (word)GC_libpthread_start
&& (word)caller < (word)GC_libpthread_end)
|| ((word)caller >= (word)GC_libld_start
&& (word)caller < (word)GC_libld_end))
return GC_malloc_uncollectable(n*lb);
/* The two ranges are actually usually adjacent, so there may */
/* be a way to speed this up. */
}
# endif
return((void *)REDIRECT_MALLOC(n*lb));
}
#ifndef strdup
char *strdup(const char *s)
{
size_t lb = strlen(s) + 1;
char *result = (char *)REDIRECT_MALLOC(lb);
if (result == 0) {
errno = ENOMEM;
return 0;
}
BCOPY(s, result, lb);
return result;
}
#endif /* !defined(strdup) */
/* If strdup is macro defined, we assume that it actually calls malloc, */
/* and thus the right thing will happen even without overriding it. */
/* This seems to be true on most Linux systems. */
#ifndef strndup
/* This is similar to strdup(). */
char *strndup(const char *str, size_t size)
{
char *copy;
size_t len = strlen(str);
if (len > size)
len = size;
copy = (char *)REDIRECT_MALLOC(len + 1);
if (copy == NULL) {
errno = ENOMEM;
return NULL;
}
BCOPY(str, copy, len);
copy[len] = '\0';
return copy;
}
#endif /* !strndup */
#undef GC_debug_malloc_replacement
#endif /* REDIRECT_MALLOC */
/* Explicitly deallocate an object p. */
GC_API void GC_CALL GC_free(void * p)
{
struct hblk *h;
hdr *hhdr;
size_t sz; /* In bytes */
size_t ngranules; /* sz in granules */
void **flh;
int knd;
struct obj_kind * ok;
DCL_LOCK_STATE;
if (p == 0) return;
/* Required by ANSI. It's not my fault ... */
# ifdef LOG_ALLOCS
GC_err_printf("GC_free(%p), GC: %lu\n", p, (unsigned long)GC_gc_no);
# endif
h = HBLKPTR(p);
hhdr = HDR(h);
# if defined(REDIRECT_MALLOC) && \
(defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
|| defined(MSWIN32))
/* For Solaris, we have to redirect malloc calls during */
/* initialization. For the others, this seems to happen */
/* implicitly. */
/* Don't try to deallocate that memory. */
if (0 == hhdr) return;
# endif
GC_ASSERT(GC_base(p) == p);
sz = hhdr -> hb_sz;
ngranules = BYTES_TO_GRANULES(sz);
knd = hhdr -> hb_obj_kind;
ok = &GC_obj_kinds[knd];
if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
LOCK();
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
/* Its unnecessary to clear the mark bit. If the */
/* object is reallocated, it doesn't matter. O.w. the */
/* collector will do it, since it's on a free list. */
if (ok -> ok_init) {
BZERO((word *)p + 1, sz-sizeof(word));
}
flh = &(ok -> ok_freelist[ngranules]);
obj_link(p) = *flh;
*flh = (ptr_t)p;
UNLOCK();
} else {
size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
LOCK();
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (nblocks > 1) {
GC_large_allocd_bytes -= nblocks * HBLKSIZE;
}
GC_freehblk(h);
UNLOCK();
}
}
/* Explicitly deallocate an object p when we already hold lock. */
/* Only used for internally allocated objects, so we can take some */
/* shortcuts. */
#ifdef THREADS
GC_INNER void GC_free_inner(void * p)
{
struct hblk *h;
hdr *hhdr;
size_t sz; /* bytes */
size_t ngranules; /* sz in granules */
void ** flh;
int knd;
struct obj_kind * ok;
h = HBLKPTR(p);
hhdr = HDR(h);
knd = hhdr -> hb_obj_kind;
sz = hhdr -> hb_sz;
ngranules = BYTES_TO_GRANULES(sz);
ok = &GC_obj_kinds[knd];
if (ngranules <= MAXOBJGRANULES) {
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (ok -> ok_init) {
BZERO((word *)p + 1, sz-sizeof(word));
}
flh = &(ok -> ok_freelist[ngranules]);
obj_link(p) = *flh;
*flh = (ptr_t)p;
} else {
size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (nblocks > 1) {
GC_large_allocd_bytes -= nblocks * HBLKSIZE;
}
GC_freehblk(h);
}
}
#endif /* THREADS */
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
# define REDIRECT_FREE GC_free
#endif
#ifdef REDIRECT_FREE
void free(void * p)
{
# if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
{
/* Don't bother with initialization checks. If nothing */
/* has been initialized, the check fails, and that's safe, */
/* since we haven't allocated uncollectable objects either. */
ptr_t caller = (ptr_t)__builtin_return_address(0);
/* This test does not need to ensure memory visibility, since */
/* the bounds will be set when/if we create another thread. */
if (((word)caller >= (word)GC_libpthread_start
&& (word)caller < (word)GC_libpthread_end)
|| ((word)caller >= (word)GC_libld_start
&& (word)caller < (word)GC_libld_end)) {
GC_free(p);
return;
}
}
# endif
# ifndef IGNORE_FREE
REDIRECT_FREE(p);
# endif
}
#endif /* REDIRECT_FREE */
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3663_0 |
crossvul-cpp_data_good_5669_0 | /*
* linux/drivers/video/fbmem.c
*
* Copyright (C) 1994 Martin Schaller
*
* 2001 - Documented with DocBook
* - Brad Douglas <brad@neruo.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
#include <linux/compat.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/vt.h>
#include <linux/init.h>
#include <linux/linux_logo.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/console.h>
#include <linux/kmod.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/efi.h>
#include <linux/fb.h>
#include <asm/fb.h>
/*
* Frame buffer device initialization and setup routines
*/
#define FBPIXMAPSIZE (1024 * 8)
static DEFINE_MUTEX(registration_lock);
struct fb_info *registered_fb[FB_MAX] __read_mostly;
int num_registered_fb __read_mostly;
static struct fb_info *get_fb_info(unsigned int idx)
{
struct fb_info *fb_info;
if (idx >= FB_MAX)
return ERR_PTR(-ENODEV);
mutex_lock(®istration_lock);
fb_info = registered_fb[idx];
if (fb_info)
atomic_inc(&fb_info->count);
mutex_unlock(®istration_lock);
return fb_info;
}
static void put_fb_info(struct fb_info *fb_info)
{
if (!atomic_dec_and_test(&fb_info->count))
return;
if (fb_info->fbops->fb_destroy)
fb_info->fbops->fb_destroy(fb_info);
}
int lock_fb_info(struct fb_info *info)
{
mutex_lock(&info->lock);
if (!info->fbops) {
mutex_unlock(&info->lock);
return 0;
}
return 1;
}
EXPORT_SYMBOL(lock_fb_info);
/*
* Helpers
*/
int fb_get_color_depth(struct fb_var_screeninfo *var,
struct fb_fix_screeninfo *fix)
{
int depth = 0;
if (fix->visual == FB_VISUAL_MONO01 ||
fix->visual == FB_VISUAL_MONO10)
depth = 1;
else {
if (var->green.length == var->blue.length &&
var->green.length == var->red.length &&
var->green.offset == var->blue.offset &&
var->green.offset == var->red.offset)
depth = var->green.length;
else
depth = var->green.length + var->red.length +
var->blue.length;
}
return depth;
}
EXPORT_SYMBOL(fb_get_color_depth);
/*
* Data padding functions.
*/
void fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u32 height)
{
__fb_pad_aligned_buffer(dst, d_pitch, src, s_pitch, height);
}
EXPORT_SYMBOL(fb_pad_aligned_buffer);
void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx, u32 height,
u32 shift_high, u32 shift_low, u32 mod)
{
u8 mask = (u8) (0xfff << shift_high), tmp;
int i, j;
for (i = height; i--; ) {
for (j = 0; j < idx; j++) {
tmp = dst[j];
tmp &= mask;
tmp |= *src >> shift_low;
dst[j] = tmp;
tmp = *src << shift_high;
dst[j+1] = tmp;
src++;
}
tmp = dst[idx];
tmp &= mask;
tmp |= *src >> shift_low;
dst[idx] = tmp;
if (shift_high < mod) {
tmp = *src << shift_high;
dst[idx+1] = tmp;
}
src++;
dst += d_pitch;
}
}
EXPORT_SYMBOL(fb_pad_unaligned_buffer);
/*
* we need to lock this section since fb_cursor
* may use fb_imageblit()
*/
char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size)
{
u32 align = buf->buf_align - 1, offset;
char *addr = buf->addr;
/* If IO mapped, we need to sync before access, no sharing of
* the pixmap is done
*/
if (buf->flags & FB_PIXMAP_IO) {
if (info->fbops->fb_sync && (buf->flags & FB_PIXMAP_SYNC))
info->fbops->fb_sync(info);
return addr;
}
/* See if we fit in the remaining pixmap space */
offset = buf->offset + align;
offset &= ~align;
if (offset + size > buf->size) {
/* We do not fit. In order to be able to re-use the buffer,
* we must ensure no asynchronous DMA'ing or whatever operation
* is in progress, we sync for that.
*/
if (info->fbops->fb_sync && (buf->flags & FB_PIXMAP_SYNC))
info->fbops->fb_sync(info);
offset = 0;
}
buf->offset = offset + size;
addr += offset;
return addr;
}
#ifdef CONFIG_LOGO
static inline unsigned safe_shift(unsigned d, int n)
{
return n < 0 ? d >> -n : d << n;
}
static void fb_set_logocmap(struct fb_info *info,
const struct linux_logo *logo)
{
struct fb_cmap palette_cmap;
u16 palette_green[16];
u16 palette_blue[16];
u16 palette_red[16];
int i, j, n;
const unsigned char *clut = logo->clut;
palette_cmap.start = 0;
palette_cmap.len = 16;
palette_cmap.red = palette_red;
palette_cmap.green = palette_green;
palette_cmap.blue = palette_blue;
palette_cmap.transp = NULL;
for (i = 0; i < logo->clutsize; i += n) {
n = logo->clutsize - i;
/* palette_cmap provides space for only 16 colors at once */
if (n > 16)
n = 16;
palette_cmap.start = 32 + i;
palette_cmap.len = n;
for (j = 0; j < n; ++j) {
palette_cmap.red[j] = clut[0] << 8 | clut[0];
palette_cmap.green[j] = clut[1] << 8 | clut[1];
palette_cmap.blue[j] = clut[2] << 8 | clut[2];
clut += 3;
}
fb_set_cmap(&palette_cmap, info);
}
}
static void fb_set_logo_truepalette(struct fb_info *info,
const struct linux_logo *logo,
u32 *palette)
{
static const unsigned char mask[] = { 0,0x80,0xc0,0xe0,0xf0,0xf8,0xfc,0xfe,0xff };
unsigned char redmask, greenmask, bluemask;
int redshift, greenshift, blueshift;
int i;
const unsigned char *clut = logo->clut;
/*
* We have to create a temporary palette since console palette is only
* 16 colors long.
*/
/* Bug: Doesn't obey msb_right ... (who needs that?) */
redmask = mask[info->var.red.length < 8 ? info->var.red.length : 8];
greenmask = mask[info->var.green.length < 8 ? info->var.green.length : 8];
bluemask = mask[info->var.blue.length < 8 ? info->var.blue.length : 8];
redshift = info->var.red.offset - (8 - info->var.red.length);
greenshift = info->var.green.offset - (8 - info->var.green.length);
blueshift = info->var.blue.offset - (8 - info->var.blue.length);
for ( i = 0; i < logo->clutsize; i++) {
palette[i+32] = (safe_shift((clut[0] & redmask), redshift) |
safe_shift((clut[1] & greenmask), greenshift) |
safe_shift((clut[2] & bluemask), blueshift));
clut += 3;
}
}
static void fb_set_logo_directpalette(struct fb_info *info,
const struct linux_logo *logo,
u32 *palette)
{
int redshift, greenshift, blueshift;
int i;
redshift = info->var.red.offset;
greenshift = info->var.green.offset;
blueshift = info->var.blue.offset;
for (i = 32; i < 32 + logo->clutsize; i++)
palette[i] = i << redshift | i << greenshift | i << blueshift;
}
static void fb_set_logo(struct fb_info *info,
const struct linux_logo *logo, u8 *dst,
int depth)
{
int i, j, k;
const u8 *src = logo->data;
u8 xor = (info->fix.visual == FB_VISUAL_MONO01) ? 0xff : 0;
u8 fg = 1, d;
switch (fb_get_color_depth(&info->var, &info->fix)) {
case 1:
fg = 1;
break;
case 2:
fg = 3;
break;
default:
fg = 7;
break;
}
if (info->fix.visual == FB_VISUAL_MONO01 ||
info->fix.visual == FB_VISUAL_MONO10)
fg = ~((u8) (0xfff << info->var.green.length));
switch (depth) {
case 4:
for (i = 0; i < logo->height; i++)
for (j = 0; j < logo->width; src++) {
*dst++ = *src >> 4;
j++;
if (j < logo->width) {
*dst++ = *src & 0x0f;
j++;
}
}
break;
case 1:
for (i = 0; i < logo->height; i++) {
for (j = 0; j < logo->width; src++) {
d = *src ^ xor;
for (k = 7; k >= 0; k--) {
*dst++ = ((d >> k) & 1) ? fg : 0;
j++;
}
}
}
break;
}
}
/*
* Three (3) kinds of logo maps exist. linux_logo_clut224 (>16 colors),
* linux_logo_vga16 (16 colors) and linux_logo_mono (2 colors). Depending on
* the visual format and color depth of the framebuffer, the DAC, the
* pseudo_palette, and the logo data will be adjusted accordingly.
*
* Case 1 - linux_logo_clut224:
* Color exceeds the number of console colors (16), thus we set the hardware DAC
* using fb_set_cmap() appropriately. The "needs_cmapreset" flag will be set.
*
* For visuals that require color info from the pseudo_palette, we also construct
* one for temporary use. The "needs_directpalette" or "needs_truepalette" flags
* will be set.
*
* Case 2 - linux_logo_vga16:
* The number of colors just matches the console colors, thus there is no need
* to set the DAC or the pseudo_palette. However, the bitmap is packed, ie,
* each byte contains color information for two pixels (upper and lower nibble).
* To be consistent with fb_imageblit() usage, we therefore separate the two
* nibbles into separate bytes. The "depth" flag will be set to 4.
*
* Case 3 - linux_logo_mono:
* This is similar with Case 2. Each byte contains information for 8 pixels.
* We isolate each bit and expand each into a byte. The "depth" flag will
* be set to 1.
*/
static struct logo_data {
int depth;
int needs_directpalette;
int needs_truepalette;
int needs_cmapreset;
const struct linux_logo *logo;
} fb_logo __read_mostly;
static void fb_rotate_logo_ud(const u8 *in, u8 *out, u32 width, u32 height)
{
u32 size = width * height, i;
out += size - 1;
for (i = size; i--; )
*out-- = *in++;
}
static void fb_rotate_logo_cw(const u8 *in, u8 *out, u32 width, u32 height)
{
int i, j, h = height - 1;
for (i = 0; i < height; i++)
for (j = 0; j < width; j++)
out[height * j + h - i] = *in++;
}
static void fb_rotate_logo_ccw(const u8 *in, u8 *out, u32 width, u32 height)
{
int i, j, w = width - 1;
for (i = 0; i < height; i++)
for (j = 0; j < width; j++)
out[height * (w - j) + i] = *in++;
}
static void fb_rotate_logo(struct fb_info *info, u8 *dst,
struct fb_image *image, int rotate)
{
u32 tmp;
if (rotate == FB_ROTATE_UD) {
fb_rotate_logo_ud(image->data, dst, image->width,
image->height);
image->dx = info->var.xres - image->width - image->dx;
image->dy = info->var.yres - image->height - image->dy;
} else if (rotate == FB_ROTATE_CW) {
fb_rotate_logo_cw(image->data, dst, image->width,
image->height);
tmp = image->width;
image->width = image->height;
image->height = tmp;
tmp = image->dy;
image->dy = image->dx;
image->dx = info->var.xres - image->width - tmp;
} else if (rotate == FB_ROTATE_CCW) {
fb_rotate_logo_ccw(image->data, dst, image->width,
image->height);
tmp = image->width;
image->width = image->height;
image->height = tmp;
tmp = image->dx;
image->dx = image->dy;
image->dy = info->var.yres - image->height - tmp;
}
image->data = dst;
}
static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
int rotate, unsigned int num)
{
unsigned int x;
if (rotate == FB_ROTATE_UR) {
for (x = 0;
x < num && image->dx + image->width <= info->var.xres;
x++) {
info->fbops->fb_imageblit(info, image);
image->dx += image->width + 8;
}
} else if (rotate == FB_ROTATE_UD) {
for (x = 0; x < num && image->dx >= 0; x++) {
info->fbops->fb_imageblit(info, image);
image->dx -= image->width + 8;
}
} else if (rotate == FB_ROTATE_CW) {
for (x = 0;
x < num && image->dy + image->height <= info->var.yres;
x++) {
info->fbops->fb_imageblit(info, image);
image->dy += image->height + 8;
}
} else if (rotate == FB_ROTATE_CCW) {
for (x = 0; x < num && image->dy >= 0; x++) {
info->fbops->fb_imageblit(info, image);
image->dy -= image->height + 8;
}
}
}
static int fb_show_logo_line(struct fb_info *info, int rotate,
const struct linux_logo *logo, int y,
unsigned int n)
{
u32 *palette = NULL, *saved_pseudo_palette = NULL;
unsigned char *logo_new = NULL, *logo_rotate = NULL;
struct fb_image image;
/* Return if the frame buffer is not mapped or suspended */
if (logo == NULL || info->state != FBINFO_STATE_RUNNING ||
info->flags & FBINFO_MODULE)
return 0;
image.depth = 8;
image.data = logo->data;
if (fb_logo.needs_cmapreset)
fb_set_logocmap(info, logo);
if (fb_logo.needs_truepalette ||
fb_logo.needs_directpalette) {
palette = kmalloc(256 * 4, GFP_KERNEL);
if (palette == NULL)
return 0;
if (fb_logo.needs_truepalette)
fb_set_logo_truepalette(info, logo, palette);
else
fb_set_logo_directpalette(info, logo, palette);
saved_pseudo_palette = info->pseudo_palette;
info->pseudo_palette = palette;
}
if (fb_logo.depth <= 4) {
logo_new = kmalloc(logo->width * logo->height, GFP_KERNEL);
if (logo_new == NULL) {
kfree(palette);
if (saved_pseudo_palette)
info->pseudo_palette = saved_pseudo_palette;
return 0;
}
image.data = logo_new;
fb_set_logo(info, logo, logo_new, fb_logo.depth);
}
image.dx = 0;
image.dy = y;
image.width = logo->width;
image.height = logo->height;
if (rotate) {
logo_rotate = kmalloc(logo->width *
logo->height, GFP_KERNEL);
if (logo_rotate)
fb_rotate_logo(info, logo_rotate, &image, rotate);
}
fb_do_show_logo(info, &image, rotate, n);
kfree(palette);
if (saved_pseudo_palette != NULL)
info->pseudo_palette = saved_pseudo_palette;
kfree(logo_new);
kfree(logo_rotate);
return logo->height;
}
#ifdef CONFIG_FB_LOGO_EXTRA
#define FB_LOGO_EX_NUM_MAX 10
static struct logo_data_extra {
const struct linux_logo *logo;
unsigned int n;
} fb_logo_ex[FB_LOGO_EX_NUM_MAX];
static unsigned int fb_logo_ex_num;
void fb_append_extra_logo(const struct linux_logo *logo, unsigned int n)
{
if (!n || fb_logo_ex_num == FB_LOGO_EX_NUM_MAX)
return;
fb_logo_ex[fb_logo_ex_num].logo = logo;
fb_logo_ex[fb_logo_ex_num].n = n;
fb_logo_ex_num++;
}
static int fb_prepare_extra_logos(struct fb_info *info, unsigned int height,
unsigned int yres)
{
unsigned int i;
/* FIXME: logo_ex supports only truecolor fb. */
if (info->fix.visual != FB_VISUAL_TRUECOLOR)
fb_logo_ex_num = 0;
for (i = 0; i < fb_logo_ex_num; i++) {
if (fb_logo_ex[i].logo->type != fb_logo.logo->type) {
fb_logo_ex[i].logo = NULL;
continue;
}
height += fb_logo_ex[i].logo->height;
if (height > yres) {
height -= fb_logo_ex[i].logo->height;
fb_logo_ex_num = i;
break;
}
}
return height;
}
static int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
{
unsigned int i;
for (i = 0; i < fb_logo_ex_num; i++)
y += fb_show_logo_line(info, rotate,
fb_logo_ex[i].logo, y, fb_logo_ex[i].n);
return y;
}
#else /* !CONFIG_FB_LOGO_EXTRA */
static inline int fb_prepare_extra_logos(struct fb_info *info,
unsigned int height,
unsigned int yres)
{
return height;
}
static inline int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
{
return y;
}
#endif /* CONFIG_FB_LOGO_EXTRA */
int fb_prepare_logo(struct fb_info *info, int rotate)
{
int depth = fb_get_color_depth(&info->var, &info->fix);
unsigned int yres;
memset(&fb_logo, 0, sizeof(struct logo_data));
if (info->flags & FBINFO_MISC_TILEBLITTING ||
info->flags & FBINFO_MODULE)
return 0;
if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
depth = info->var.blue.length;
if (info->var.red.length < depth)
depth = info->var.red.length;
if (info->var.green.length < depth)
depth = info->var.green.length;
}
if (info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR && depth > 4) {
/* assume console colormap */
depth = 4;
}
/* Return if no suitable logo was found */
fb_logo.logo = fb_find_logo(depth);
if (!fb_logo.logo) {
return 0;
}
if (rotate == FB_ROTATE_UR || rotate == FB_ROTATE_UD)
yres = info->var.yres;
else
yres = info->var.xres;
if (fb_logo.logo->height > yres) {
fb_logo.logo = NULL;
return 0;
}
/* What depth we asked for might be different from what we get */
if (fb_logo.logo->type == LINUX_LOGO_CLUT224)
fb_logo.depth = 8;
else if (fb_logo.logo->type == LINUX_LOGO_VGA16)
fb_logo.depth = 4;
else
fb_logo.depth = 1;
if (fb_logo.depth > 4 && depth > 4) {
switch (info->fix.visual) {
case FB_VISUAL_TRUECOLOR:
fb_logo.needs_truepalette = 1;
break;
case FB_VISUAL_DIRECTCOLOR:
fb_logo.needs_directpalette = 1;
fb_logo.needs_cmapreset = 1;
break;
case FB_VISUAL_PSEUDOCOLOR:
fb_logo.needs_cmapreset = 1;
break;
}
}
return fb_prepare_extra_logos(info, fb_logo.logo->height, yres);
}
int fb_show_logo(struct fb_info *info, int rotate)
{
int y;
y = fb_show_logo_line(info, rotate, fb_logo.logo, 0,
num_online_cpus());
y = fb_show_extra_logos(info, y, rotate);
return y;
}
#else
int fb_prepare_logo(struct fb_info *info, int rotate) { return 0; }
int fb_show_logo(struct fb_info *info, int rotate) { return 0; }
#endif /* CONFIG_LOGO */
static void *fb_seq_start(struct seq_file *m, loff_t *pos)
{
mutex_lock(®istration_lock);
return (*pos < FB_MAX) ? pos : NULL;
}
static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return (*pos < FB_MAX) ? pos : NULL;
}
static void fb_seq_stop(struct seq_file *m, void *v)
{
mutex_unlock(®istration_lock);
}
static int fb_seq_show(struct seq_file *m, void *v)
{
int i = *(loff_t *)v;
struct fb_info *fi = registered_fb[i];
if (fi)
seq_printf(m, "%d %s\n", fi->node, fi->fix.id);
return 0;
}
static const struct seq_operations proc_fb_seq_ops = {
.start = fb_seq_start,
.next = fb_seq_next,
.stop = fb_seq_stop,
.show = fb_seq_show,
};
static int proc_fb_open(struct inode *inode, struct file *file)
{
return seq_open(file, &proc_fb_seq_ops);
}
static const struct file_operations fb_proc_fops = {
.owner = THIS_MODULE,
.open = proc_fb_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* We hold a reference to the fb_info in file->private_data,
* but if the current registered fb has changed, we don't
* actually want to use it.
*
* So look up the fb_info using the inode minor number,
* and just verify it against the reference we have.
*/
static struct fb_info *file_fb_info(struct file *file)
{
struct inode *inode = file_inode(file);
int fbidx = iminor(inode);
struct fb_info *info = registered_fb[fbidx];
if (info != file->private_data)
info = NULL;
return info;
}
static ssize_t
fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
unsigned long p = *ppos;
struct fb_info *info = file_fb_info(file);
u8 *buffer, *dst;
u8 __iomem *src;
int c, cnt = 0, err = 0;
unsigned long total_size;
if (!info || ! info->screen_base)
return -ENODEV;
if (info->state != FBINFO_STATE_RUNNING)
return -EPERM;
if (info->fbops->fb_read)
return info->fbops->fb_read(info, buf, count, ppos);
total_size = info->screen_size;
if (total_size == 0)
total_size = info->fix.smem_len;
if (p >= total_size)
return 0;
if (count >= total_size)
count = total_size;
if (count + p > total_size)
count = total_size - p;
buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count,
GFP_KERNEL);
if (!buffer)
return -ENOMEM;
src = (u8 __iomem *) (info->screen_base + p);
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
while (count) {
c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
dst = buffer;
fb_memcpy_fromfb(dst, src, c);
dst += c;
src += c;
if (copy_to_user(buf, buffer, c)) {
err = -EFAULT;
break;
}
*ppos += c;
buf += c;
cnt += c;
count -= c;
}
kfree(buffer);
return (err) ? err : cnt;
}
static ssize_t
fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
unsigned long p = *ppos;
struct fb_info *info = file_fb_info(file);
u8 *buffer, *src;
u8 __iomem *dst;
int c, cnt = 0, err = 0;
unsigned long total_size;
if (!info || !info->screen_base)
return -ENODEV;
if (info->state != FBINFO_STATE_RUNNING)
return -EPERM;
if (info->fbops->fb_write)
return info->fbops->fb_write(info, buf, count, ppos);
total_size = info->screen_size;
if (total_size == 0)
total_size = info->fix.smem_len;
if (p > total_size)
return -EFBIG;
if (count > total_size) {
err = -EFBIG;
count = total_size;
}
if (count + p > total_size) {
if (!err)
err = -ENOSPC;
count = total_size - p;
}
buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count,
GFP_KERNEL);
if (!buffer)
return -ENOMEM;
dst = (u8 __iomem *) (info->screen_base + p);
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
while (count) {
c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
src = buffer;
if (copy_from_user(src, buf, c)) {
err = -EFAULT;
break;
}
fb_memcpy_tofb(dst, src, c);
dst += c;
src += c;
*ppos += c;
buf += c;
cnt += c;
count -= c;
}
kfree(buffer);
return (cnt) ? cnt : err;
}
int
fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var)
{
struct fb_fix_screeninfo *fix = &info->fix;
unsigned int yres = info->var.yres;
int err = 0;
if (var->yoffset > 0) {
if (var->vmode & FB_VMODE_YWRAP) {
if (!fix->ywrapstep || (var->yoffset % fix->ywrapstep))
err = -EINVAL;
else
yres = 0;
} else if (!fix->ypanstep || (var->yoffset % fix->ypanstep))
err = -EINVAL;
}
if (var->xoffset > 0 && (!fix->xpanstep ||
(var->xoffset % fix->xpanstep)))
err = -EINVAL;
if (err || !info->fbops->fb_pan_display ||
var->yoffset > info->var.yres_virtual - yres ||
var->xoffset > info->var.xres_virtual - info->var.xres)
return -EINVAL;
if ((err = info->fbops->fb_pan_display(var, info)))
return err;
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
if (var->vmode & FB_VMODE_YWRAP)
info->var.vmode |= FB_VMODE_YWRAP;
else
info->var.vmode &= ~FB_VMODE_YWRAP;
return 0;
}
static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
u32 activate)
{
struct fb_event event;
struct fb_blit_caps caps, fbcaps;
int err = 0;
memset(&caps, 0, sizeof(caps));
memset(&fbcaps, 0, sizeof(fbcaps));
caps.flags = (activate & FB_ACTIVATE_ALL) ? 1 : 0;
event.info = info;
event.data = ∩︀
fb_notifier_call_chain(FB_EVENT_GET_REQ, &event);
info->fbops->fb_get_caps(info, &fbcaps, var);
if (((fbcaps.x ^ caps.x) & caps.x) ||
((fbcaps.y ^ caps.y) & caps.y) ||
(fbcaps.len < caps.len))
err = -EINVAL;
return err;
}
int
fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
{
int flags = info->flags;
int ret = 0;
if (var->activate & FB_ACTIVATE_INV_MODE) {
struct fb_videomode mode1, mode2;
fb_var_to_videomode(&mode1, var);
fb_var_to_videomode(&mode2, &info->var);
/* make sure we don't delete the videomode of current var */
ret = fb_mode_is_equal(&mode1, &mode2);
if (!ret) {
struct fb_event event;
event.info = info;
event.data = &mode1;
ret = fb_notifier_call_chain(FB_EVENT_MODE_DELETE, &event);
}
if (!ret)
fb_delete_videomode(&mode1, &info->modelist);
ret = (ret) ? -EINVAL : 0;
goto done;
}
if ((var->activate & FB_ACTIVATE_FORCE) ||
memcmp(&info->var, var, sizeof(struct fb_var_screeninfo))) {
u32 activate = var->activate;
/* When using FOURCC mode, make sure the red, green, blue and
* transp fields are set to 0.
*/
if ((info->fix.capabilities & FB_CAP_FOURCC) &&
var->grayscale > 1) {
if (var->red.offset || var->green.offset ||
var->blue.offset || var->transp.offset ||
var->red.length || var->green.length ||
var->blue.length || var->transp.length ||
var->red.msb_right || var->green.msb_right ||
var->blue.msb_right || var->transp.msb_right)
return -EINVAL;
}
if (!info->fbops->fb_check_var) {
*var = info->var;
goto done;
}
ret = info->fbops->fb_check_var(var, info);
if (ret)
goto done;
if ((var->activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) {
struct fb_var_screeninfo old_var;
struct fb_videomode mode;
if (info->fbops->fb_get_caps) {
ret = fb_check_caps(info, var, activate);
if (ret)
goto done;
}
old_var = info->var;
info->var = *var;
if (info->fbops->fb_set_par) {
ret = info->fbops->fb_set_par(info);
if (ret) {
info->var = old_var;
printk(KERN_WARNING "detected "
"fb_set_par error, "
"error code: %d\n", ret);
goto done;
}
}
fb_pan_display(info, &info->var);
fb_set_cmap(&info->cmap, info);
fb_var_to_videomode(&mode, &info->var);
if (info->modelist.prev && info->modelist.next &&
!list_empty(&info->modelist))
ret = fb_add_videomode(&mode, &info->modelist);
if (!ret && (flags & FBINFO_MISC_USEREVENT)) {
struct fb_event event;
int evnt = (activate & FB_ACTIVATE_ALL) ?
FB_EVENT_MODE_CHANGE_ALL :
FB_EVENT_MODE_CHANGE;
info->flags &= ~FBINFO_MISC_USEREVENT;
event.info = info;
event.data = &mode;
fb_notifier_call_chain(evnt, &event);
}
}
}
done:
return ret;
}
int
fb_blank(struct fb_info *info, int blank)
{
struct fb_event event;
int ret = -EINVAL, early_ret;
if (blank > FB_BLANK_POWERDOWN)
blank = FB_BLANK_POWERDOWN;
event.info = info;
event.data = ␣
early_ret = fb_notifier_call_chain(FB_EARLY_EVENT_BLANK, &event);
if (info->fbops->fb_blank)
ret = info->fbops->fb_blank(blank, info);
if (!ret)
fb_notifier_call_chain(FB_EVENT_BLANK, &event);
else {
/*
* if fb_blank is failed then revert effects of
* the early blank event.
*/
if (!early_ret)
fb_notifier_call_chain(FB_R_EARLY_EVENT_BLANK, &event);
}
return ret;
}
static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct fb_ops *fb;
struct fb_var_screeninfo var;
struct fb_fix_screeninfo fix;
struct fb_con2fbmap con2fb;
struct fb_cmap cmap_from;
struct fb_cmap_user cmap;
struct fb_event event;
void __user *argp = (void __user *)arg;
long ret = 0;
switch (cmd) {
case FBIOGET_VSCREENINFO:
if (!lock_fb_info(info))
return -ENODEV;
var = info->var;
unlock_fb_info(info);
ret = copy_to_user(argp, &var, sizeof(var)) ? -EFAULT : 0;
break;
case FBIOPUT_VSCREENINFO:
if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
if (!lock_fb_info(info))
return -ENODEV;
console_lock();
info->flags |= FBINFO_MISC_USEREVENT;
ret = fb_set_var(info, &var);
info->flags &= ~FBINFO_MISC_USEREVENT;
console_unlock();
unlock_fb_info(info);
if (!ret && copy_to_user(argp, &var, sizeof(var)))
ret = -EFAULT;
break;
case FBIOGET_FSCREENINFO:
if (!lock_fb_info(info))
return -ENODEV;
fix = info->fix;
unlock_fb_info(info);
ret = copy_to_user(argp, &fix, sizeof(fix)) ? -EFAULT : 0;
break;
case FBIOPUTCMAP:
if (copy_from_user(&cmap, argp, sizeof(cmap)))
return -EFAULT;
ret = fb_set_user_cmap(&cmap, info);
break;
case FBIOGETCMAP:
if (copy_from_user(&cmap, argp, sizeof(cmap)))
return -EFAULT;
if (!lock_fb_info(info))
return -ENODEV;
cmap_from = info->cmap;
unlock_fb_info(info);
ret = fb_cmap_to_user(&cmap_from, &cmap);
break;
case FBIOPAN_DISPLAY:
if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
if (!lock_fb_info(info))
return -ENODEV;
console_lock();
ret = fb_pan_display(info, &var);
console_unlock();
unlock_fb_info(info);
if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
return -EFAULT;
break;
case FBIO_CURSOR:
ret = -EINVAL;
break;
case FBIOGET_CON2FBMAP:
if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
return -EFAULT;
if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
return -EINVAL;
con2fb.framebuffer = -1;
event.data = &con2fb;
if (!lock_fb_info(info))
return -ENODEV;
event.info = info;
fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event);
unlock_fb_info(info);
ret = copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0;
break;
case FBIOPUT_CON2FBMAP:
if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
return -EFAULT;
if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
return -EINVAL;
if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
return -EINVAL;
if (!registered_fb[con2fb.framebuffer])
request_module("fb%d", con2fb.framebuffer);
if (!registered_fb[con2fb.framebuffer]) {
ret = -EINVAL;
break;
}
event.data = &con2fb;
if (!lock_fb_info(info))
return -ENODEV;
console_lock();
event.info = info;
ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
console_unlock();
unlock_fb_info(info);
break;
case FBIOBLANK:
if (!lock_fb_info(info))
return -ENODEV;
console_lock();
info->flags |= FBINFO_MISC_USEREVENT;
ret = fb_blank(info, arg);
info->flags &= ~FBINFO_MISC_USEREVENT;
console_unlock();
unlock_fb_info(info);
break;
default:
if (!lock_fb_info(info))
return -ENODEV;
fb = info->fbops;
if (fb->fb_ioctl)
ret = fb->fb_ioctl(info, cmd, arg);
else
ret = -ENOTTY;
unlock_fb_info(info);
}
return ret;
}
static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct fb_info *info = file_fb_info(file);
if (!info)
return -ENODEV;
return do_fb_ioctl(info, cmd, arg);
}
#ifdef CONFIG_COMPAT
struct fb_fix_screeninfo32 {
char id[16];
compat_caddr_t smem_start;
u32 smem_len;
u32 type;
u32 type_aux;
u32 visual;
u16 xpanstep;
u16 ypanstep;
u16 ywrapstep;
u32 line_length;
compat_caddr_t mmio_start;
u32 mmio_len;
u32 accel;
u16 reserved[3];
};
struct fb_cmap32 {
u32 start;
u32 len;
compat_caddr_t red;
compat_caddr_t green;
compat_caddr_t blue;
compat_caddr_t transp;
};
static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct fb_cmap_user __user *cmap;
struct fb_cmap32 __user *cmap32;
__u32 data;
int err;
cmap = compat_alloc_user_space(sizeof(*cmap));
cmap32 = compat_ptr(arg);
if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32)))
return -EFAULT;
if (get_user(data, &cmap32->red) ||
put_user(compat_ptr(data), &cmap->red) ||
get_user(data, &cmap32->green) ||
put_user(compat_ptr(data), &cmap->green) ||
get_user(data, &cmap32->blue) ||
put_user(compat_ptr(data), &cmap->blue) ||
get_user(data, &cmap32->transp) ||
put_user(compat_ptr(data), &cmap->transp))
return -EFAULT;
err = do_fb_ioctl(info, cmd, (unsigned long) cmap);
if (!err) {
if (copy_in_user(&cmap32->start,
&cmap->start,
2 * sizeof(__u32)))
err = -EFAULT;
}
return err;
}
static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
struct fb_fix_screeninfo32 __user *fix32)
{
__u32 data;
int err;
err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
data = (__u32) (unsigned long) fix->smem_start;
err |= put_user(data, &fix32->smem_start);
err |= put_user(fix->smem_len, &fix32->smem_len);
err |= put_user(fix->type, &fix32->type);
err |= put_user(fix->type_aux, &fix32->type_aux);
err |= put_user(fix->visual, &fix32->visual);
err |= put_user(fix->xpanstep, &fix32->xpanstep);
err |= put_user(fix->ypanstep, &fix32->ypanstep);
err |= put_user(fix->ywrapstep, &fix32->ywrapstep);
err |= put_user(fix->line_length, &fix32->line_length);
data = (__u32) (unsigned long) fix->mmio_start;
err |= put_user(data, &fix32->mmio_start);
err |= put_user(fix->mmio_len, &fix32->mmio_len);
err |= put_user(fix->accel, &fix32->accel);
err |= copy_to_user(fix32->reserved, fix->reserved,
sizeof(fix->reserved));
return err;
}
static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
mm_segment_t old_fs;
struct fb_fix_screeninfo fix;
struct fb_fix_screeninfo32 __user *fix32;
int err;
fix32 = compat_ptr(arg);
old_fs = get_fs();
set_fs(KERNEL_DS);
err = do_fb_ioctl(info, cmd, (unsigned long) &fix);
set_fs(old_fs);
if (!err)
err = do_fscreeninfo_to_user(&fix, fix32);
return err;
}
static long fb_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fb_info *info = file_fb_info(file);
struct fb_ops *fb;
long ret = -ENOIOCTLCMD;
if (!info)
return -ENODEV;
fb = info->fbops;
switch(cmd) {
case FBIOGET_VSCREENINFO:
case FBIOPUT_VSCREENINFO:
case FBIOPAN_DISPLAY:
case FBIOGET_CON2FBMAP:
case FBIOPUT_CON2FBMAP:
arg = (unsigned long) compat_ptr(arg);
case FBIOBLANK:
ret = do_fb_ioctl(info, cmd, arg);
break;
case FBIOGET_FSCREENINFO:
ret = fb_get_fscreeninfo(info, cmd, arg);
break;
case FBIOGETCMAP:
case FBIOPUTCMAP:
ret = fb_getput_cmap(info, cmd, arg);
break;
default:
if (fb->fb_compat_ioctl)
ret = fb->fb_compat_ioctl(info, cmd, arg);
break;
}
return ret;
}
#endif
static int
fb_mmap(struct file *file, struct vm_area_struct * vma)
{
struct fb_info *info = file_fb_info(file);
struct fb_ops *fb;
unsigned long mmio_pgoff;
unsigned long start;
u32 len;
if (!info)
return -ENODEV;
fb = info->fbops;
if (!fb)
return -ENODEV;
mutex_lock(&info->mm_lock);
if (fb->fb_mmap) {
int res;
res = fb->fb_mmap(info, vma);
mutex_unlock(&info->mm_lock);
return res;
}
/*
* Ugh. This can be either the frame buffer mapping, or
* if pgoff points past it, the mmio mapping.
*/
start = info->fix.smem_start;
len = info->fix.smem_len;
mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
if (vma->vm_pgoff >= mmio_pgoff) {
vma->vm_pgoff -= mmio_pgoff;
start = info->fix.mmio_start;
len = info->fix.mmio_len;
}
mutex_unlock(&info->mm_lock);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
fb_pgprotect(file, vma, start);
return vm_iomap_memory(vma, start, len);
}
static int
fb_open(struct inode *inode, struct file *file)
__acquires(&info->lock)
__releases(&info->lock)
{
int fbidx = iminor(inode);
struct fb_info *info;
int res = 0;
info = get_fb_info(fbidx);
if (!info) {
request_module("fb%d", fbidx);
info = get_fb_info(fbidx);
if (!info)
return -ENODEV;
}
if (IS_ERR(info))
return PTR_ERR(info);
mutex_lock(&info->lock);
if (!try_module_get(info->fbops->owner)) {
res = -ENODEV;
goto out;
}
file->private_data = info;
if (info->fbops->fb_open) {
res = info->fbops->fb_open(info,1);
if (res)
module_put(info->fbops->owner);
}
#ifdef CONFIG_FB_DEFERRED_IO
if (info->fbdefio)
fb_deferred_io_open(info, inode, file);
#endif
out:
mutex_unlock(&info->lock);
if (res)
put_fb_info(info);
return res;
}
static int
fb_release(struct inode *inode, struct file *file)
__acquires(&info->lock)
__releases(&info->lock)
{
struct fb_info * const info = file->private_data;
mutex_lock(&info->lock);
if (info->fbops->fb_release)
info->fbops->fb_release(info,1);
module_put(info->fbops->owner);
mutex_unlock(&info->lock);
put_fb_info(info);
return 0;
}
static const struct file_operations fb_fops = {
.owner = THIS_MODULE,
.read = fb_read,
.write = fb_write,
.unlocked_ioctl = fb_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = fb_compat_ioctl,
#endif
.mmap = fb_mmap,
.open = fb_open,
.release = fb_release,
#ifdef HAVE_ARCH_FB_UNMAPPED_AREA
.get_unmapped_area = get_fb_unmapped_area,
#endif
#ifdef CONFIG_FB_DEFERRED_IO
.fsync = fb_deferred_io_fsync,
#endif
.llseek = default_llseek,
};
struct class *fb_class;
EXPORT_SYMBOL(fb_class);
static int fb_check_foreignness(struct fb_info *fi)
{
const bool foreign_endian = fi->flags & FBINFO_FOREIGN_ENDIAN;
fi->flags &= ~FBINFO_FOREIGN_ENDIAN;
#ifdef __BIG_ENDIAN
fi->flags |= foreign_endian ? 0 : FBINFO_BE_MATH;
#else
fi->flags |= foreign_endian ? FBINFO_BE_MATH : 0;
#endif /* __BIG_ENDIAN */
if (fi->flags & FBINFO_BE_MATH && !fb_be_math(fi)) {
pr_err("%s: enable CONFIG_FB_BIG_ENDIAN to "
"support this framebuffer\n", fi->fix.id);
return -ENOSYS;
} else if (!(fi->flags & FBINFO_BE_MATH) && fb_be_math(fi)) {
pr_err("%s: enable CONFIG_FB_LITTLE_ENDIAN to "
"support this framebuffer\n", fi->fix.id);
return -ENOSYS;
}
return 0;
}
static bool apertures_overlap(struct aperture *gen, struct aperture *hw)
{
/* is the generic aperture base the same as the HW one */
if (gen->base == hw->base)
return true;
/* is the generic aperture base inside the hw base->hw base+size */
if (gen->base > hw->base && gen->base < hw->base + hw->size)
return true;
return false;
}
static bool fb_do_apertures_overlap(struct apertures_struct *gena,
struct apertures_struct *hwa)
{
int i, j;
if (!hwa || !gena)
return false;
for (i = 0; i < hwa->count; ++i) {
struct aperture *h = &hwa->ranges[i];
for (j = 0; j < gena->count; ++j) {
struct aperture *g = &gena->ranges[j];
printk(KERN_DEBUG "checking generic (%llx %llx) vs hw (%llx %llx)\n",
(unsigned long long)g->base,
(unsigned long long)g->size,
(unsigned long long)h->base,
(unsigned long long)h->size);
if (apertures_overlap(g, h))
return true;
}
}
return false;
}
static int do_unregister_framebuffer(struct fb_info *fb_info);
#define VGA_FB_PHYS 0xA0000
static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary)
{
int i;
/* check all firmware fbs and kick off if the base addr overlaps */
for (i = 0 ; i < FB_MAX; i++) {
struct apertures_struct *gen_aper;
if (!registered_fb[i])
continue;
if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE))
continue;
gen_aper = registered_fb[i]->apertures;
if (fb_do_apertures_overlap(gen_aper, a) ||
(primary && gen_aper && gen_aper->count &&
gen_aper->ranges[0].base == VGA_FB_PHYS)) {
printk(KERN_INFO "fb: conflicting fb hw usage "
"%s vs %s - removing generic driver\n",
name, registered_fb[i]->fix.id);
do_unregister_framebuffer(registered_fb[i]);
}
}
}
static int do_register_framebuffer(struct fb_info *fb_info)
{
int i;
struct fb_event event;
struct fb_videomode mode;
if (fb_check_foreignness(fb_info))
return -ENOSYS;
do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
fb_is_primary_device(fb_info));
if (num_registered_fb == FB_MAX)
return -ENXIO;
num_registered_fb++;
for (i = 0 ; i < FB_MAX; i++)
if (!registered_fb[i])
break;
fb_info->node = i;
atomic_set(&fb_info->count, 1);
mutex_init(&fb_info->lock);
mutex_init(&fb_info->mm_lock);
fb_info->dev = device_create(fb_class, fb_info->device,
MKDEV(FB_MAJOR, i), NULL, "fb%d", i);
if (IS_ERR(fb_info->dev)) {
/* Not fatal */
printk(KERN_WARNING "Unable to create device for framebuffer %d; errno = %ld\n", i, PTR_ERR(fb_info->dev));
fb_info->dev = NULL;
} else
fb_init_device(fb_info);
if (fb_info->pixmap.addr == NULL) {
fb_info->pixmap.addr = kmalloc(FBPIXMAPSIZE, GFP_KERNEL);
if (fb_info->pixmap.addr) {
fb_info->pixmap.size = FBPIXMAPSIZE;
fb_info->pixmap.buf_align = 1;
fb_info->pixmap.scan_align = 1;
fb_info->pixmap.access_align = 32;
fb_info->pixmap.flags = FB_PIXMAP_DEFAULT;
}
}
fb_info->pixmap.offset = 0;
if (!fb_info->pixmap.blit_x)
fb_info->pixmap.blit_x = ~(u32)0;
if (!fb_info->pixmap.blit_y)
fb_info->pixmap.blit_y = ~(u32)0;
if (!fb_info->modelist.prev || !fb_info->modelist.next)
INIT_LIST_HEAD(&fb_info->modelist);
fb_var_to_videomode(&mode, &fb_info->var);
fb_add_videomode(&mode, &fb_info->modelist);
registered_fb[i] = fb_info;
event.info = fb_info;
if (!lock_fb_info(fb_info))
return -ENODEV;
console_lock();
fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
console_unlock();
unlock_fb_info(fb_info);
return 0;
}
static int do_unregister_framebuffer(struct fb_info *fb_info)
{
struct fb_event event;
int i, ret = 0;
i = fb_info->node;
if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
return -EINVAL;
if (!lock_fb_info(fb_info))
return -ENODEV;
console_lock();
event.info = fb_info;
ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
console_unlock();
unlock_fb_info(fb_info);
if (ret)
return -EINVAL;
unlink_framebuffer(fb_info);
if (fb_info->pixmap.addr &&
(fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
kfree(fb_info->pixmap.addr);
fb_destroy_modelist(&fb_info->modelist);
registered_fb[i] = NULL;
num_registered_fb--;
fb_cleanup_device(fb_info);
event.info = fb_info;
console_lock();
fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
console_unlock();
/* this may free fb info */
put_fb_info(fb_info);
return 0;
}
int unlink_framebuffer(struct fb_info *fb_info)
{
int i;
i = fb_info->node;
if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
return -EINVAL;
if (fb_info->dev) {
device_destroy(fb_class, MKDEV(FB_MAJOR, i));
fb_info->dev = NULL;
}
return 0;
}
EXPORT_SYMBOL(unlink_framebuffer);
void remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary)
{
mutex_lock(®istration_lock);
do_remove_conflicting_framebuffers(a, name, primary);
mutex_unlock(®istration_lock);
}
EXPORT_SYMBOL(remove_conflicting_framebuffers);
/**
* register_framebuffer - registers a frame buffer device
* @fb_info: frame buffer info structure
*
* Registers a frame buffer device @fb_info.
*
* Returns negative errno on error, or zero for success.
*
*/
int
register_framebuffer(struct fb_info *fb_info)
{
int ret;
mutex_lock(®istration_lock);
ret = do_register_framebuffer(fb_info);
mutex_unlock(®istration_lock);
return ret;
}
/**
* unregister_framebuffer - releases a frame buffer device
* @fb_info: frame buffer info structure
*
* Unregisters a frame buffer device @fb_info.
*
* Returns negative errno on error, or zero for success.
*
* This function will also notify the framebuffer console
* to release the driver.
*
* This is meant to be called within a driver's module_exit()
* function. If this is called outside module_exit(), ensure
* that the driver implements fb_open() and fb_release() to
* check that no processes are using the device.
*/
int
unregister_framebuffer(struct fb_info *fb_info)
{
int ret;
mutex_lock(®istration_lock);
ret = do_unregister_framebuffer(fb_info);
mutex_unlock(®istration_lock);
return ret;
}
/**
* fb_set_suspend - low level driver signals suspend
* @info: framebuffer affected
* @state: 0 = resuming, !=0 = suspending
*
* This is meant to be used by low level drivers to
* signal suspend/resume to the core & clients.
* It must be called with the console semaphore held
*/
void fb_set_suspend(struct fb_info *info, int state)
{
struct fb_event event;
event.info = info;
if (state) {
fb_notifier_call_chain(FB_EVENT_SUSPEND, &event);
info->state = FBINFO_STATE_SUSPENDED;
} else {
info->state = FBINFO_STATE_RUNNING;
fb_notifier_call_chain(FB_EVENT_RESUME, &event);
}
}
/**
* fbmem_init - init frame buffer subsystem
*
* Initialize the frame buffer subsystem.
*
* NOTE: This function is _only_ to be called by drivers/char/mem.c.
*
*/
static int __init
fbmem_init(void)
{
proc_create("fb", 0, NULL, &fb_proc_fops);
if (register_chrdev(FB_MAJOR,"fb",&fb_fops))
printk("unable to get major %d for fb devs\n", FB_MAJOR);
fb_class = class_create(THIS_MODULE, "graphics");
if (IS_ERR(fb_class)) {
printk(KERN_WARNING "Unable to create fb class; errno = %ld\n", PTR_ERR(fb_class));
fb_class = NULL;
}
return 0;
}
#ifdef MODULE
module_init(fbmem_init);
static void __exit
fbmem_exit(void)
{
remove_proc_entry("fb", NULL);
class_destroy(fb_class);
unregister_chrdev(FB_MAJOR, "fb");
}
module_exit(fbmem_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Framebuffer base");
#else
subsys_initcall(fbmem_init);
#endif
int fb_new_modelist(struct fb_info *info)
{
struct fb_event event;
struct fb_var_screeninfo var = info->var;
struct list_head *pos, *n;
struct fb_modelist *modelist;
struct fb_videomode *m, mode;
int err = 1;
list_for_each_safe(pos, n, &info->modelist) {
modelist = list_entry(pos, struct fb_modelist, list);
m = &modelist->mode;
fb_videomode_to_var(&var, m);
var.activate = FB_ACTIVATE_TEST;
err = fb_set_var(info, &var);
fb_var_to_videomode(&mode, &var);
if (err || !fb_mode_is_equal(m, &mode)) {
list_del(pos);
kfree(pos);
}
}
err = 1;
if (!list_empty(&info->modelist)) {
event.info = info;
err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
}
return err;
}
static char *video_options[FB_MAX] __read_mostly;
static int ofonly __read_mostly;
/**
* fb_get_options - get kernel boot parameters
* @name: framebuffer name as it would appear in
* the boot parameter line
* (video=<name>:<options>)
* @option: the option will be stored here
*
* NOTE: Needed to maintain backwards compatibility
*/
int fb_get_options(char *name, char **option)
{
char *opt, *options = NULL;
int retval = 0;
int name_len = strlen(name), i;
if (name_len && ofonly && strncmp(name, "offb", 4))
retval = 1;
if (name_len && !retval) {
for (i = 0; i < FB_MAX; i++) {
if (video_options[i] == NULL)
continue;
if (!video_options[i][0])
continue;
opt = video_options[i];
if (!strncmp(name, opt, name_len) &&
opt[name_len] == ':')
options = opt + name_len + 1;
}
}
if (options && !strncmp(options, "off", 3))
retval = 1;
if (option)
*option = options;
return retval;
}
#ifndef MODULE
/**
* video_setup - process command line options
* @options: string of options
*
* Process command line options for frame buffer subsystem.
*
* NOTE: This function is a __setup and __init function.
* It only stores the options. Drivers have to call
* fb_get_options() as necessary.
*
* Returns zero.
*
*/
static int __init video_setup(char *options)
{
int i, global = 0;
if (!options || !*options)
global = 1;
if (!global && !strncmp(options, "ofonly", 6)) {
ofonly = 1;
global = 1;
}
if (!global && !strchr(options, ':')) {
fb_mode_option = options;
global = 1;
}
if (!global) {
for (i = 0; i < FB_MAX; i++) {
if (video_options[i] == NULL) {
video_options[i] = options;
break;
}
}
}
return 1;
}
__setup("video=", video_setup);
#endif
/*
* Visible symbols for modules
*/
EXPORT_SYMBOL(register_framebuffer);
EXPORT_SYMBOL(unregister_framebuffer);
EXPORT_SYMBOL(num_registered_fb);
EXPORT_SYMBOL(registered_fb);
EXPORT_SYMBOL(fb_show_logo);
EXPORT_SYMBOL(fb_set_var);
EXPORT_SYMBOL(fb_blank);
EXPORT_SYMBOL(fb_pan_display);
EXPORT_SYMBOL(fb_get_buffer_offset);
EXPORT_SYMBOL(fb_set_suspend);
EXPORT_SYMBOL(fb_get_options);
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5669_0 |
crossvul-cpp_data_bad_3498_6 | /*
* Implement CPU time clocks for the POSIX clock interface.
*/
#include <linux/sched.h>
#include <linux/posix-timers.h>
#include <asm/uaccess.h>
#include <linux/errno.h>
static int check_clock(const clockid_t which_clock)
{
int error = 0;
struct task_struct *p;
const pid_t pid = CPUCLOCK_PID(which_clock);
if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
return -EINVAL;
if (pid == 0)
return 0;
read_lock(&tasklist_lock);
p = find_task_by_vpid(pid);
if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
same_thread_group(p, current) : thread_group_leader(p))) {
error = -EINVAL;
}
read_unlock(&tasklist_lock);
return error;
}
static inline union cpu_time_count
timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
{
union cpu_time_count ret;
ret.sched = 0; /* high half always zero when .cpu used */
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
} else {
ret.cpu = timespec_to_cputime(tp);
}
return ret;
}
static void sample_to_timespec(const clockid_t which_clock,
union cpu_time_count cpu,
struct timespec *tp)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
tp->tv_sec = div_long_long_rem(cpu.sched,
NSEC_PER_SEC, &tp->tv_nsec);
} else {
cputime_to_timespec(cpu.cpu, tp);
}
}
static inline int cpu_time_before(const clockid_t which_clock,
union cpu_time_count now,
union cpu_time_count then)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
return now.sched < then.sched;
} else {
return cputime_lt(now.cpu, then.cpu);
}
}
static inline void cpu_time_add(const clockid_t which_clock,
union cpu_time_count *acc,
union cpu_time_count val)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
acc->sched += val.sched;
} else {
acc->cpu = cputime_add(acc->cpu, val.cpu);
}
}
static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
union cpu_time_count a,
union cpu_time_count b)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
a.sched -= b.sched;
} else {
a.cpu = cputime_sub(a.cpu, b.cpu);
}
return a;
}
/*
* Divide and limit the result to res >= 1
*
* This is necessary to prevent signal delivery starvation, when the result of
* the division would be rounded down to 0.
*/
static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
{
cputime_t res = cputime_div(time, div);
return max_t(cputime_t, res, 1);
}
/*
* Update expiry time from increment, and increase overrun count,
* given the current clock sample.
*/
static void bump_cpu_timer(struct k_itimer *timer,
union cpu_time_count now)
{
int i;
if (timer->it.cpu.incr.sched == 0)
return;
if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
unsigned long long delta, incr;
if (now.sched < timer->it.cpu.expires.sched)
return;
incr = timer->it.cpu.incr.sched;
delta = now.sched + incr - timer->it.cpu.expires.sched;
/* Don't use (incr*2 < delta), incr*2 might overflow. */
for (i = 0; incr < delta - incr; i++)
incr = incr << 1;
for (; i >= 0; incr >>= 1, i--) {
if (delta < incr)
continue;
timer->it.cpu.expires.sched += incr;
timer->it_overrun += 1 << i;
delta -= incr;
}
} else {
cputime_t delta, incr;
if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
return;
incr = timer->it.cpu.incr.cpu;
delta = cputime_sub(cputime_add(now.cpu, incr),
timer->it.cpu.expires.cpu);
/* Don't use (incr*2 < delta), incr*2 might overflow. */
for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
incr = cputime_add(incr, incr);
for (; i >= 0; incr = cputime_halve(incr), i--) {
if (cputime_lt(delta, incr))
continue;
timer->it.cpu.expires.cpu =
cputime_add(timer->it.cpu.expires.cpu, incr);
timer->it_overrun += 1 << i;
delta = cputime_sub(delta, incr);
}
}
}
static inline cputime_t prof_ticks(struct task_struct *p)
{
return cputime_add(p->utime, p->stime);
}
static inline cputime_t virt_ticks(struct task_struct *p)
{
return p->utime;
}
static inline unsigned long long sched_ns(struct task_struct *p)
{
return task_sched_runtime(p);
}
int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
{
int error = check_clock(which_clock);
if (!error) {
tp->tv_sec = 0;
tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
/*
* If sched_clock is using a cycle counter, we
* don't have any idea of its true resolution
* exported, but it is much more than 1s/HZ.
*/
tp->tv_nsec = 1;
}
}
return error;
}
int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
{
/*
* You can never reset a CPU clock, but we check for other errors
* in the call before failing with EPERM.
*/
int error = check_clock(which_clock);
if (error == 0) {
error = -EPERM;
}
return error;
}
/*
* Sample a per-thread clock for the given task.
*/
static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
union cpu_time_count *cpu)
{
switch (CPUCLOCK_WHICH(which_clock)) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
cpu->cpu = prof_ticks(p);
break;
case CPUCLOCK_VIRT:
cpu->cpu = virt_ticks(p);
break;
case CPUCLOCK_SCHED:
cpu->sched = sched_ns(p);
break;
}
return 0;
}
/*
* Sample a process (thread group) clock for the given group_leader task.
* Must be called with tasklist_lock held for reading.
* Must be called with tasklist_lock held for reading, and p->sighand->siglock.
*/
static int cpu_clock_sample_group_locked(unsigned int clock_idx,
struct task_struct *p,
union cpu_time_count *cpu)
{
struct task_struct *t = p;
switch (clock_idx) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
cpu->cpu = cputime_add(p->signal->utime, p->signal->stime);
do {
cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t));
t = next_thread(t);
} while (t != p);
break;
case CPUCLOCK_VIRT:
cpu->cpu = p->signal->utime;
do {
cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t));
t = next_thread(t);
} while (t != p);
break;
case CPUCLOCK_SCHED:
cpu->sched = p->signal->sum_sched_runtime;
/* Add in each other live thread. */
while ((t = next_thread(t)) != p) {
cpu->sched += t->se.sum_exec_runtime;
}
cpu->sched += sched_ns(p);
break;
}
return 0;
}
/*
* Sample a process (thread group) clock for the given group_leader task.
* Must be called with tasklist_lock held for reading.
*/
static int cpu_clock_sample_group(const clockid_t which_clock,
struct task_struct *p,
union cpu_time_count *cpu)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&p->sighand->siglock, flags);
ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p,
cpu);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
return ret;
}
int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
{
const pid_t pid = CPUCLOCK_PID(which_clock);
int error = -EINVAL;
union cpu_time_count rtn;
if (pid == 0) {
/*
* Special case constant value for our own clocks.
* We don't have to do any lookup to find ourselves.
*/
if (CPUCLOCK_PERTHREAD(which_clock)) {
/*
* Sampling just ourselves we can do with no locking.
*/
error = cpu_clock_sample(which_clock,
current, &rtn);
} else {
read_lock(&tasklist_lock);
error = cpu_clock_sample_group(which_clock,
current, &rtn);
read_unlock(&tasklist_lock);
}
} else {
/*
* Find the given PID, and validate that the caller
* should be able to see it.
*/
struct task_struct *p;
rcu_read_lock();
p = find_task_by_vpid(pid);
if (p) {
if (CPUCLOCK_PERTHREAD(which_clock)) {
if (same_thread_group(p, current)) {
error = cpu_clock_sample(which_clock,
p, &rtn);
}
} else {
read_lock(&tasklist_lock);
if (thread_group_leader(p) && p->signal) {
error =
cpu_clock_sample_group(which_clock,
p, &rtn);
}
read_unlock(&tasklist_lock);
}
}
rcu_read_unlock();
}
if (error)
return error;
sample_to_timespec(which_clock, rtn, tp);
return 0;
}
/*
* Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
* This is called from sys_timer_create with the new timer already locked.
*/
int posix_cpu_timer_create(struct k_itimer *new_timer)
{
int ret = 0;
const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
struct task_struct *p;
if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
return -EINVAL;
INIT_LIST_HEAD(&new_timer->it.cpu.entry);
new_timer->it.cpu.incr.sched = 0;
new_timer->it.cpu.expires.sched = 0;
read_lock(&tasklist_lock);
if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
if (pid == 0) {
p = current;
} else {
p = find_task_by_vpid(pid);
if (p && !same_thread_group(p, current))
p = NULL;
}
} else {
if (pid == 0) {
p = current->group_leader;
} else {
p = find_task_by_vpid(pid);
if (p && !thread_group_leader(p))
p = NULL;
}
}
new_timer->it.cpu.task = p;
if (p) {
get_task_struct(p);
} else {
ret = -EINVAL;
}
read_unlock(&tasklist_lock);
return ret;
}
/*
* Clean up a CPU-clock timer that is about to be destroyed.
* This is called from timer deletion with the timer already locked.
* If we return TIMER_RETRY, it's necessary to release the timer's lock
* and try again. (This happens when the timer is in the middle of firing.)
*/
int posix_cpu_timer_del(struct k_itimer *timer)
{
struct task_struct *p = timer->it.cpu.task;
int ret = 0;
if (likely(p != NULL)) {
read_lock(&tasklist_lock);
if (unlikely(p->signal == NULL)) {
/*
* We raced with the reaping of the task.
* The deletion should have cleared us off the list.
*/
BUG_ON(!list_empty(&timer->it.cpu.entry));
} else {
spin_lock(&p->sighand->siglock);
if (timer->it.cpu.firing)
ret = TIMER_RETRY;
else
list_del(&timer->it.cpu.entry);
spin_unlock(&p->sighand->siglock);
}
read_unlock(&tasklist_lock);
if (!ret)
put_task_struct(p);
}
return ret;
}
/*
* Clean out CPU timers still ticking when a thread exited. The task
* pointer is cleared, and the expiry time is replaced with the residual
* time for later timer_gettime calls to return.
* This must be called with the siglock held.
*/
static void cleanup_timers(struct list_head *head,
cputime_t utime, cputime_t stime,
unsigned long long sum_exec_runtime)
{
struct cpu_timer_list *timer, *next;
cputime_t ptime = cputime_add(utime, stime);
list_for_each_entry_safe(timer, next, head, entry) {
list_del_init(&timer->entry);
if (cputime_lt(timer->expires.cpu, ptime)) {
timer->expires.cpu = cputime_zero;
} else {
timer->expires.cpu = cputime_sub(timer->expires.cpu,
ptime);
}
}
++head;
list_for_each_entry_safe(timer, next, head, entry) {
list_del_init(&timer->entry);
if (cputime_lt(timer->expires.cpu, utime)) {
timer->expires.cpu = cputime_zero;
} else {
timer->expires.cpu = cputime_sub(timer->expires.cpu,
utime);
}
}
++head;
list_for_each_entry_safe(timer, next, head, entry) {
list_del_init(&timer->entry);
if (timer->expires.sched < sum_exec_runtime) {
timer->expires.sched = 0;
} else {
timer->expires.sched -= sum_exec_runtime;
}
}
}
/*
* These are both called with the siglock held, when the current thread
* is being reaped. When the final (leader) thread in the group is reaped,
* posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
*/
void posix_cpu_timers_exit(struct task_struct *tsk)
{
cleanup_timers(tsk->cpu_timers,
tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
}
void posix_cpu_timers_exit_group(struct task_struct *tsk)
{
cleanup_timers(tsk->signal->cpu_timers,
cputime_add(tsk->utime, tsk->signal->utime),
cputime_add(tsk->stime, tsk->signal->stime),
tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime);
}
/*
* Set the expiry times of all the threads in the process so one of them
* will go off before the process cumulative expiry total is reached.
*/
static void process_timer_rebalance(struct task_struct *p,
unsigned int clock_idx,
union cpu_time_count expires,
union cpu_time_count val)
{
cputime_t ticks, left;
unsigned long long ns, nsleft;
struct task_struct *t = p;
unsigned int nthreads = atomic_read(&p->signal->live);
if (!nthreads)
return;
switch (clock_idx) {
default:
BUG();
break;
case CPUCLOCK_PROF:
left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
nthreads);
do {
if (likely(!(t->flags & PF_EXITING))) {
ticks = cputime_add(prof_ticks(t), left);
if (cputime_eq(t->it_prof_expires,
cputime_zero) ||
cputime_gt(t->it_prof_expires, ticks)) {
t->it_prof_expires = ticks;
}
}
t = next_thread(t);
} while (t != p);
break;
case CPUCLOCK_VIRT:
left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
nthreads);
do {
if (likely(!(t->flags & PF_EXITING))) {
ticks = cputime_add(virt_ticks(t), left);
if (cputime_eq(t->it_virt_expires,
cputime_zero) ||
cputime_gt(t->it_virt_expires, ticks)) {
t->it_virt_expires = ticks;
}
}
t = next_thread(t);
} while (t != p);
break;
case CPUCLOCK_SCHED:
nsleft = expires.sched - val.sched;
do_div(nsleft, nthreads);
nsleft = max_t(unsigned long long, nsleft, 1);
do {
if (likely(!(t->flags & PF_EXITING))) {
ns = t->se.sum_exec_runtime + nsleft;
if (t->it_sched_expires == 0 ||
t->it_sched_expires > ns) {
t->it_sched_expires = ns;
}
}
t = next_thread(t);
} while (t != p);
break;
}
}
static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
{
/*
* That's all for this thread or process.
* We leave our residual in expires to be reported.
*/
put_task_struct(timer->it.cpu.task);
timer->it.cpu.task = NULL;
timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
timer->it.cpu.expires,
now);
}
/*
* Insert the timer on the appropriate list before any timers that
* expire later. This must be called with the tasklist_lock held
* for reading, and interrupts disabled.
*/
static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
{
struct task_struct *p = timer->it.cpu.task;
struct list_head *head, *listpos;
struct cpu_timer_list *const nt = &timer->it.cpu;
struct cpu_timer_list *next;
unsigned long i;
head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
p->cpu_timers : p->signal->cpu_timers);
head += CPUCLOCK_WHICH(timer->it_clock);
BUG_ON(!irqs_disabled());
spin_lock(&p->sighand->siglock);
listpos = head;
if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
list_for_each_entry(next, head, entry) {
if (next->expires.sched > nt->expires.sched)
break;
listpos = &next->entry;
}
} else {
list_for_each_entry(next, head, entry) {
if (cputime_gt(next->expires.cpu, nt->expires.cpu))
break;
listpos = &next->entry;
}
}
list_add(&nt->entry, listpos);
if (listpos == head) {
/*
* We are the new earliest-expiring timer.
* If we are a thread timer, there can always
* be a process timer telling us to stop earlier.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
switch (CPUCLOCK_WHICH(timer->it_clock)) {
default:
BUG();
case CPUCLOCK_PROF:
if (cputime_eq(p->it_prof_expires,
cputime_zero) ||
cputime_gt(p->it_prof_expires,
nt->expires.cpu))
p->it_prof_expires = nt->expires.cpu;
break;
case CPUCLOCK_VIRT:
if (cputime_eq(p->it_virt_expires,
cputime_zero) ||
cputime_gt(p->it_virt_expires,
nt->expires.cpu))
p->it_virt_expires = nt->expires.cpu;
break;
case CPUCLOCK_SCHED:
if (p->it_sched_expires == 0 ||
p->it_sched_expires > nt->expires.sched)
p->it_sched_expires = nt->expires.sched;
break;
}
} else {
/*
* For a process timer, we must balance
* all the live threads' expirations.
*/
switch (CPUCLOCK_WHICH(timer->it_clock)) {
default:
BUG();
case CPUCLOCK_VIRT:
if (!cputime_eq(p->signal->it_virt_expires,
cputime_zero) &&
cputime_lt(p->signal->it_virt_expires,
timer->it.cpu.expires.cpu))
break;
goto rebalance;
case CPUCLOCK_PROF:
if (!cputime_eq(p->signal->it_prof_expires,
cputime_zero) &&
cputime_lt(p->signal->it_prof_expires,
timer->it.cpu.expires.cpu))
break;
i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
if (i != RLIM_INFINITY &&
i <= cputime_to_secs(timer->it.cpu.expires.cpu))
break;
goto rebalance;
case CPUCLOCK_SCHED:
rebalance:
process_timer_rebalance(
timer->it.cpu.task,
CPUCLOCK_WHICH(timer->it_clock),
timer->it.cpu.expires, now);
break;
}
}
}
spin_unlock(&p->sighand->siglock);
}
/*
* The timer is locked, fire it and arrange for its reload.
*/
static void cpu_timer_fire(struct k_itimer *timer)
{
if (unlikely(timer->sigq == NULL)) {
/*
* This a special case for clock_nanosleep,
* not a normal timer from sys_timer_create.
*/
wake_up_process(timer->it_process);
timer->it.cpu.expires.sched = 0;
} else if (timer->it.cpu.incr.sched == 0) {
/*
* One-shot timer. Clear it as soon as it's fired.
*/
posix_timer_event(timer, 0);
timer->it.cpu.expires.sched = 0;
} else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
/*
* The signal did not get queued because the signal
* was ignored, so we won't get any callback to
* reload the timer. But we need to keep it
* ticking in case the signal is deliverable next time.
*/
posix_cpu_timer_schedule(timer);
}
}
/*
* Guts of sys_timer_settime for CPU timers.
* This is called with the timer locked and interrupts disabled.
* If we return TIMER_RETRY, it's necessary to release the timer's lock
* and try again. (This happens when the timer is in the middle of firing.)
*/
int posix_cpu_timer_set(struct k_itimer *timer, int flags,
struct itimerspec *new, struct itimerspec *old)
{
struct task_struct *p = timer->it.cpu.task;
union cpu_time_count old_expires, new_expires, val;
int ret;
if (unlikely(p == NULL)) {
/*
* Timer refers to a dead task's clock.
*/
return -ESRCH;
}
new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
read_lock(&tasklist_lock);
/*
* We need the tasklist_lock to protect against reaping that
* clears p->signal. If p has just been reaped, we can no
* longer get any information about it at all.
*/
if (unlikely(p->signal == NULL)) {
read_unlock(&tasklist_lock);
put_task_struct(p);
timer->it.cpu.task = NULL;
return -ESRCH;
}
/*
* Disarm any old timer after extracting its expiry time.
*/
BUG_ON(!irqs_disabled());
ret = 0;
spin_lock(&p->sighand->siglock);
old_expires = timer->it.cpu.expires;
if (unlikely(timer->it.cpu.firing)) {
timer->it.cpu.firing = -1;
ret = TIMER_RETRY;
} else
list_del_init(&timer->it.cpu.entry);
spin_unlock(&p->sighand->siglock);
/*
* We need to sample the current value to convert the new
* value from to relative and absolute, and to convert the
* old value from absolute to relative. To set a process
* timer, we need a sample to balance the thread expiry
* times (in arm_timer). With an absolute time, we must
* check if it's already passed. In short, we need a sample.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &val);
} else {
cpu_clock_sample_group(timer->it_clock, p, &val);
}
if (old) {
if (old_expires.sched == 0) {
old->it_value.tv_sec = 0;
old->it_value.tv_nsec = 0;
} else {
/*
* Update the timer in case it has
* overrun already. If it has,
* we'll report it as having overrun
* and with the next reloaded timer
* already ticking, though we are
* swallowing that pending
* notification here to install the
* new setting.
*/
bump_cpu_timer(timer, val);
if (cpu_time_before(timer->it_clock, val,
timer->it.cpu.expires)) {
old_expires = cpu_time_sub(
timer->it_clock,
timer->it.cpu.expires, val);
sample_to_timespec(timer->it_clock,
old_expires,
&old->it_value);
} else {
old->it_value.tv_nsec = 1;
old->it_value.tv_sec = 0;
}
}
}
if (unlikely(ret)) {
/*
* We are colliding with the timer actually firing.
* Punt after filling in the timer's old value, and
* disable this firing since we are already reporting
* it as an overrun (thanks to bump_cpu_timer above).
*/
read_unlock(&tasklist_lock);
goto out;
}
if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
cpu_time_add(timer->it_clock, &new_expires, val);
}
/*
* Install the new expiry time (or zero).
* For a timer with no notification action, we don't actually
* arm the timer (we'll just fake it for timer_gettime).
*/
timer->it.cpu.expires = new_expires;
if (new_expires.sched != 0 &&
(timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
cpu_time_before(timer->it_clock, val, new_expires)) {
arm_timer(timer, val);
}
read_unlock(&tasklist_lock);
/*
* Install the new reload setting, and
* set up the signal and overrun bookkeeping.
*/
timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
&new->it_interval);
/*
* This acts as a modification timestamp for the timer,
* so any automatic reload attempt will punt on seeing
* that we have reset the timer manually.
*/
timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
~REQUEUE_PENDING;
timer->it_overrun_last = 0;
timer->it_overrun = -1;
if (new_expires.sched != 0 &&
(timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
!cpu_time_before(timer->it_clock, val, new_expires)) {
/*
* The designated time already passed, so we notify
* immediately, even if the thread never runs to
* accumulate more time on this clock.
*/
cpu_timer_fire(timer);
}
ret = 0;
out:
if (old) {
sample_to_timespec(timer->it_clock,
timer->it.cpu.incr, &old->it_interval);
}
return ret;
}
void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
{
union cpu_time_count now;
struct task_struct *p = timer->it.cpu.task;
int clear_dead;
/*
* Easy part: convert the reload time.
*/
sample_to_timespec(timer->it_clock,
timer->it.cpu.incr, &itp->it_interval);
if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
return;
}
if (unlikely(p == NULL)) {
/*
* This task already died and the timer will never fire.
* In this case, expires is actually the dead value.
*/
dead:
sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
&itp->it_value);
return;
}
/*
* Sample the clock to take the difference with the expiry time.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &now);
clear_dead = p->exit_state;
} else {
read_lock(&tasklist_lock);
if (unlikely(p->signal == NULL)) {
/*
* The process has been reaped.
* We can't even collect a sample any more.
* Call the timer disarmed, nothing else to do.
*/
put_task_struct(p);
timer->it.cpu.task = NULL;
timer->it.cpu.expires.sched = 0;
read_unlock(&tasklist_lock);
goto dead;
} else {
cpu_clock_sample_group(timer->it_clock, p, &now);
clear_dead = (unlikely(p->exit_state) &&
thread_group_empty(p));
}
read_unlock(&tasklist_lock);
}
if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
if (timer->it.cpu.incr.sched == 0 &&
cpu_time_before(timer->it_clock,
timer->it.cpu.expires, now)) {
/*
* Do-nothing timer expired and has no reload,
* so it's as if it was never set.
*/
timer->it.cpu.expires.sched = 0;
itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
return;
}
/*
* Account for any expirations and reloads that should
* have happened.
*/
bump_cpu_timer(timer, now);
}
if (unlikely(clear_dead)) {
/*
* We've noticed that the thread is dead, but
* not yet reaped. Take this opportunity to
* drop our task ref.
*/
clear_dead_task(timer, now);
goto dead;
}
if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
sample_to_timespec(timer->it_clock,
cpu_time_sub(timer->it_clock,
timer->it.cpu.expires, now),
&itp->it_value);
} else {
/*
* The timer should have expired already, but the firing
* hasn't taken place yet. Say it's just about to expire.
*/
itp->it_value.tv_nsec = 1;
itp->it_value.tv_sec = 0;
}
}
/*
* Check for any per-thread CPU timers that have fired and move them off
* the tsk->cpu_timers[N] list onto the firing list. Here we update the
* tsk->it_*_expires values to reflect the remaining thread CPU timers.
*/
static void check_thread_timers(struct task_struct *tsk,
struct list_head *firing)
{
int maxfire;
struct list_head *timers = tsk->cpu_timers;
struct signal_struct *const sig = tsk->signal;
maxfire = 20;
tsk->it_prof_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
tsk->it_prof_expires = t->expires.cpu;
break;
}
t->firing = 1;
list_move_tail(&t->entry, firing);
}
++timers;
maxfire = 20;
tsk->it_virt_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
tsk->it_virt_expires = t->expires.cpu;
break;
}
t->firing = 1;
list_move_tail(&t->entry, firing);
}
++timers;
maxfire = 20;
tsk->it_sched_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
tsk->it_sched_expires = t->expires.sched;
break;
}
t->firing = 1;
list_move_tail(&t->entry, firing);
}
/*
* Check for the special case thread timers.
*/
if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
if (hard != RLIM_INFINITY &&
tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
/*
* At the hard limit, we just die.
* No need to calculate anything else now.
*/
__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
return;
}
if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
/*
* At the soft limit, send a SIGXCPU every second.
*/
if (sig->rlim[RLIMIT_RTTIME].rlim_cur
< sig->rlim[RLIMIT_RTTIME].rlim_max) {
sig->rlim[RLIMIT_RTTIME].rlim_cur +=
USEC_PER_SEC;
}
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
}
}
}
/*
* Check for any per-thread CPU timers that have fired and move them
* off the tsk->*_timers list onto the firing list. Per-thread timers
* have already been taken off.
*/
static void check_process_timers(struct task_struct *tsk,
struct list_head *firing)
{
int maxfire;
struct signal_struct *const sig = tsk->signal;
cputime_t utime, stime, ptime, virt_expires, prof_expires;
unsigned long long sum_sched_runtime, sched_expires;
struct task_struct *t;
struct list_head *timers = sig->cpu_timers;
/*
* Don't sample the current process CPU clocks if there are no timers.
*/
if (list_empty(&timers[CPUCLOCK_PROF]) &&
cputime_eq(sig->it_prof_expires, cputime_zero) &&
sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
list_empty(&timers[CPUCLOCK_VIRT]) &&
cputime_eq(sig->it_virt_expires, cputime_zero) &&
list_empty(&timers[CPUCLOCK_SCHED]))
return;
/*
* Collect the current process totals.
*/
utime = sig->utime;
stime = sig->stime;
sum_sched_runtime = sig->sum_sched_runtime;
t = tsk;
do {
utime = cputime_add(utime, t->utime);
stime = cputime_add(stime, t->stime);
sum_sched_runtime += t->se.sum_exec_runtime;
t = next_thread(t);
} while (t != tsk);
ptime = cputime_add(utime, stime);
maxfire = 20;
prof_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *tl = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
prof_expires = tl->expires.cpu;
break;
}
tl->firing = 1;
list_move_tail(&tl->entry, firing);
}
++timers;
maxfire = 20;
virt_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *tl = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
virt_expires = tl->expires.cpu;
break;
}
tl->firing = 1;
list_move_tail(&tl->entry, firing);
}
++timers;
maxfire = 20;
sched_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *tl = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
sched_expires = tl->expires.sched;
break;
}
tl->firing = 1;
list_move_tail(&tl->entry, firing);
}
/*
* Check for the special case process timers.
*/
if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
if (cputime_ge(ptime, sig->it_prof_expires)) {
/* ITIMER_PROF fires and reloads. */
sig->it_prof_expires = sig->it_prof_incr;
if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
sig->it_prof_expires = cputime_add(
sig->it_prof_expires, ptime);
}
__group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
}
if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
(cputime_eq(prof_expires, cputime_zero) ||
cputime_lt(sig->it_prof_expires, prof_expires))) {
prof_expires = sig->it_prof_expires;
}
}
if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
if (cputime_ge(utime, sig->it_virt_expires)) {
/* ITIMER_VIRTUAL fires and reloads. */
sig->it_virt_expires = sig->it_virt_incr;
if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
sig->it_virt_expires = cputime_add(
sig->it_virt_expires, utime);
}
__group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
}
if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
(cputime_eq(virt_expires, cputime_zero) ||
cputime_lt(sig->it_virt_expires, virt_expires))) {
virt_expires = sig->it_virt_expires;
}
}
if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
unsigned long psecs = cputime_to_secs(ptime);
cputime_t x;
if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
/*
* At the hard limit, we just die.
* No need to calculate anything else now.
*/
__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
return;
}
if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
/*
* At the soft limit, send a SIGXCPU every second.
*/
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
if (sig->rlim[RLIMIT_CPU].rlim_cur
< sig->rlim[RLIMIT_CPU].rlim_max) {
sig->rlim[RLIMIT_CPU].rlim_cur++;
}
}
x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
if (cputime_eq(prof_expires, cputime_zero) ||
cputime_lt(x, prof_expires)) {
prof_expires = x;
}
}
if (!cputime_eq(prof_expires, cputime_zero) ||
!cputime_eq(virt_expires, cputime_zero) ||
sched_expires != 0) {
/*
* Rebalance the threads' expiry times for the remaining
* process CPU timers.
*/
cputime_t prof_left, virt_left, ticks;
unsigned long long sched_left, sched;
const unsigned int nthreads = atomic_read(&sig->live);
if (!nthreads)
return;
prof_left = cputime_sub(prof_expires, utime);
prof_left = cputime_sub(prof_left, stime);
prof_left = cputime_div_non_zero(prof_left, nthreads);
virt_left = cputime_sub(virt_expires, utime);
virt_left = cputime_div_non_zero(virt_left, nthreads);
if (sched_expires) {
sched_left = sched_expires - sum_sched_runtime;
do_div(sched_left, nthreads);
sched_left = max_t(unsigned long long, sched_left, 1);
} else {
sched_left = 0;
}
t = tsk;
do {
if (unlikely(t->flags & PF_EXITING))
continue;
ticks = cputime_add(cputime_add(t->utime, t->stime),
prof_left);
if (!cputime_eq(prof_expires, cputime_zero) &&
(cputime_eq(t->it_prof_expires, cputime_zero) ||
cputime_gt(t->it_prof_expires, ticks))) {
t->it_prof_expires = ticks;
}
ticks = cputime_add(t->utime, virt_left);
if (!cputime_eq(virt_expires, cputime_zero) &&
(cputime_eq(t->it_virt_expires, cputime_zero) ||
cputime_gt(t->it_virt_expires, ticks))) {
t->it_virt_expires = ticks;
}
sched = t->se.sum_exec_runtime + sched_left;
if (sched_expires && (t->it_sched_expires == 0 ||
t->it_sched_expires > sched)) {
t->it_sched_expires = sched;
}
} while ((t = next_thread(t)) != tsk);
}
}
/*
* This is called from the signal code (via do_schedule_next_timer)
* when the last timer signal was delivered and we have to reload the timer.
*/
void posix_cpu_timer_schedule(struct k_itimer *timer)
{
struct task_struct *p = timer->it.cpu.task;
union cpu_time_count now;
if (unlikely(p == NULL))
/*
* The task was cleaned up already, no future firings.
*/
goto out;
/*
* Fetch the current sample and update the timer's expiry time.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &now);
bump_cpu_timer(timer, now);
if (unlikely(p->exit_state)) {
clear_dead_task(timer, now);
goto out;
}
read_lock(&tasklist_lock); /* arm_timer needs it. */
} else {
read_lock(&tasklist_lock);
if (unlikely(p->signal == NULL)) {
/*
* The process has been reaped.
* We can't even collect a sample any more.
*/
put_task_struct(p);
timer->it.cpu.task = p = NULL;
timer->it.cpu.expires.sched = 0;
goto out_unlock;
} else if (unlikely(p->exit_state) && thread_group_empty(p)) {
/*
* We've noticed that the thread is dead, but
* not yet reaped. Take this opportunity to
* drop our task ref.
*/
clear_dead_task(timer, now);
goto out_unlock;
}
cpu_clock_sample_group(timer->it_clock, p, &now);
bump_cpu_timer(timer, now);
/* Leave the tasklist_lock locked for the call below. */
}
/*
* Now re-arm for the new expiry time.
*/
arm_timer(timer, now);
out_unlock:
read_unlock(&tasklist_lock);
out:
timer->it_overrun_last = timer->it_overrun;
timer->it_overrun = -1;
++timer->it_requeue_pending;
}
/*
* This is called from the timer interrupt handler. The irq handler has
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
void run_posix_cpu_timers(struct task_struct *tsk)
{
LIST_HEAD(firing);
struct k_itimer *timer, *next;
BUG_ON(!irqs_disabled());
#define UNEXPIRED(clock) \
(cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \
cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))
if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
(tsk->it_sched_expires == 0 ||
tsk->se.sum_exec_runtime < tsk->it_sched_expires))
return;
#undef UNEXPIRED
/*
* Double-check with locks held.
*/
read_lock(&tasklist_lock);
if (likely(tsk->signal != NULL)) {
spin_lock(&tsk->sighand->siglock);
/*
* Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
* all the timers that are firing, and put them on the firing list.
*/
check_thread_timers(tsk, &firing);
check_process_timers(tsk, &firing);
/*
* We must release these locks before taking any timer's lock.
* There is a potential race with timer deletion here, as the
* siglock now protects our private firing list. We have set
* the firing flag in each timer, so that a deletion attempt
* that gets the timer lock before we do will give it up and
* spin until we've taken care of that timer below.
*/
spin_unlock(&tsk->sighand->siglock);
}
read_unlock(&tasklist_lock);
/*
* Now that all the timers on our list have the firing flag,
* noone will touch their list entries but us. We'll take
* each timer's lock before clearing its firing flag, so no
* timer call will interfere.
*/
list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
int firing;
spin_lock(&timer->it_lock);
list_del_init(&timer->it.cpu.entry);
firing = timer->it.cpu.firing;
timer->it.cpu.firing = 0;
/*
* The firing flag is -1 if we collided with a reset
* of the timer, which already reported this
* almost-firing as an overrun. So don't generate an event.
*/
if (likely(firing >= 0)) {
cpu_timer_fire(timer);
}
spin_unlock(&timer->it_lock);
}
}
/*
* Set one of the process-wide special case CPU timers.
* The tasklist_lock and tsk->sighand->siglock must be held by the caller.
* The oldval argument is null for the RLIMIT_CPU timer, where *newval is
* absolute; non-null for ITIMER_*, where *newval is relative and we update
* it to be absolute, *oldval is absolute and we update it to be relative.
*/
void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
cputime_t *newval, cputime_t *oldval)
{
union cpu_time_count now;
struct list_head *head;
BUG_ON(clock_idx == CPUCLOCK_SCHED);
cpu_clock_sample_group_locked(clock_idx, tsk, &now);
if (oldval) {
if (!cputime_eq(*oldval, cputime_zero)) {
if (cputime_le(*oldval, now.cpu)) {
/* Just about to fire. */
*oldval = jiffies_to_cputime(1);
} else {
*oldval = cputime_sub(*oldval, now.cpu);
}
}
if (cputime_eq(*newval, cputime_zero))
return;
*newval = cputime_add(*newval, now.cpu);
/*
* If the RLIMIT_CPU timer will expire before the
* ITIMER_PROF timer, we have nothing else to do.
*/
if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
< cputime_to_secs(*newval))
return;
}
/*
* Check whether there are any process timers already set to fire
* before this one. If so, we don't have anything more to do.
*/
head = &tsk->signal->cpu_timers[clock_idx];
if (list_empty(head) ||
cputime_ge(list_first_entry(head,
struct cpu_timer_list, entry)->expires.cpu,
*newval)) {
/*
* Rejigger each thread's expiry time so that one will
* notice before we hit the process-cumulative expiry time.
*/
union cpu_time_count expires = { .sched = 0 };
expires.cpu = *newval;
process_timer_rebalance(tsk, clock_idx, expires, now);
}
}
static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
struct timespec *rqtp, struct itimerspec *it)
{
struct k_itimer timer;
int error;
/*
* Set up a temporary timer and then wait for it to go off.
*/
memset(&timer, 0, sizeof timer);
spin_lock_init(&timer.it_lock);
timer.it_clock = which_clock;
timer.it_overrun = -1;
error = posix_cpu_timer_create(&timer);
timer.it_process = current;
if (!error) {
static struct itimerspec zero_it;
memset(it, 0, sizeof *it);
it->it_value = *rqtp;
spin_lock_irq(&timer.it_lock);
error = posix_cpu_timer_set(&timer, flags, it, NULL);
if (error) {
spin_unlock_irq(&timer.it_lock);
return error;
}
while (!signal_pending(current)) {
if (timer.it.cpu.expires.sched == 0) {
/*
* Our timer fired and was reset.
*/
spin_unlock_irq(&timer.it_lock);
return 0;
}
/*
* Block until cpu_timer_fire (or a signal) wakes us.
*/
__set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&timer.it_lock);
schedule();
spin_lock_irq(&timer.it_lock);
}
/*
* We were interrupted by a signal.
*/
sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
posix_cpu_timer_set(&timer, 0, &zero_it, it);
spin_unlock_irq(&timer.it_lock);
if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
/*
* It actually did fire already.
*/
return 0;
}
error = -ERESTART_RESTARTBLOCK;
}
return error;
}
int posix_cpu_nsleep(const clockid_t which_clock, int flags,
struct timespec *rqtp, struct timespec __user *rmtp)
{
struct restart_block *restart_block =
¤t_thread_info()->restart_block;
struct itimerspec it;
int error;
/*
* Diagnose required errors first.
*/
if (CPUCLOCK_PERTHREAD(which_clock) &&
(CPUCLOCK_PID(which_clock) == 0 ||
CPUCLOCK_PID(which_clock) == current->pid))
return -EINVAL;
error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
if (error == -ERESTART_RESTARTBLOCK) {
if (flags & TIMER_ABSTIME)
return -ERESTARTNOHAND;
/*
* Report back to the user the time still remaining.
*/
if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
return -EFAULT;
restart_block->fn = posix_cpu_nsleep_restart;
restart_block->arg0 = which_clock;
restart_block->arg1 = (unsigned long) rmtp;
restart_block->arg2 = rqtp->tv_sec;
restart_block->arg3 = rqtp->tv_nsec;
}
return error;
}
long posix_cpu_nsleep_restart(struct restart_block *restart_block)
{
clockid_t which_clock = restart_block->arg0;
struct timespec __user *rmtp;
struct timespec t;
struct itimerspec it;
int error;
rmtp = (struct timespec __user *) restart_block->arg1;
t.tv_sec = restart_block->arg2;
t.tv_nsec = restart_block->arg3;
restart_block->fn = do_no_restart_syscall;
error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
if (error == -ERESTART_RESTARTBLOCK) {
/*
* Report back to the user the time still remaining.
*/
if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
return -EFAULT;
restart_block->fn = posix_cpu_nsleep_restart;
restart_block->arg0 = which_clock;
restart_block->arg1 = (unsigned long) rmtp;
restart_block->arg2 = t.tv_sec;
restart_block->arg3 = t.tv_nsec;
}
return error;
}
#define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
#define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
static int process_cpu_clock_getres(const clockid_t which_clock,
struct timespec *tp)
{
return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
}
static int process_cpu_clock_get(const clockid_t which_clock,
struct timespec *tp)
{
return posix_cpu_clock_get(PROCESS_CLOCK, tp);
}
static int process_cpu_timer_create(struct k_itimer *timer)
{
timer->it_clock = PROCESS_CLOCK;
return posix_cpu_timer_create(timer);
}
static int process_cpu_nsleep(const clockid_t which_clock, int flags,
struct timespec *rqtp,
struct timespec __user *rmtp)
{
return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
}
static long process_cpu_nsleep_restart(struct restart_block *restart_block)
{
return -EINVAL;
}
static int thread_cpu_clock_getres(const clockid_t which_clock,
struct timespec *tp)
{
return posix_cpu_clock_getres(THREAD_CLOCK, tp);
}
static int thread_cpu_clock_get(const clockid_t which_clock,
struct timespec *tp)
{
return posix_cpu_clock_get(THREAD_CLOCK, tp);
}
static int thread_cpu_timer_create(struct k_itimer *timer)
{
timer->it_clock = THREAD_CLOCK;
return posix_cpu_timer_create(timer);
}
static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
struct timespec *rqtp, struct timespec __user *rmtp)
{
return -EINVAL;
}
static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
{
return -EINVAL;
}
static __init int init_posix_cpu_timers(void)
{
struct k_clock process = {
.clock_getres = process_cpu_clock_getres,
.clock_get = process_cpu_clock_get,
.clock_set = do_posix_clock_nosettime,
.timer_create = process_cpu_timer_create,
.nsleep = process_cpu_nsleep,
.nsleep_restart = process_cpu_nsleep_restart,
};
struct k_clock thread = {
.clock_getres = thread_cpu_clock_getres,
.clock_get = thread_cpu_clock_get,
.clock_set = do_posix_clock_nosettime,
.timer_create = thread_cpu_timer_create,
.nsleep = thread_cpu_nsleep,
.nsleep_restart = thread_cpu_nsleep_restart,
};
register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
return 0;
}
__initcall(init_posix_cpu_timers);
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3498_6 |
crossvul-cpp_data_bad_3484_0 | /*
* pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
*
* Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
* PMC-Sierra Inc
*
* Copyright (C) 2008, 2009 PMC Sierra Inc
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307,
* USA
*
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/wait.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/hdreg.h>
#include <linux/version.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <asm/irq.h>
#include <asm/processor.h>
#include <linux/libata.h>
#include <linux/mutex.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsicam.h>
#include "pmcraid.h"
/*
* Module configuration parameters
*/
static unsigned int pmcraid_debug_log;
static unsigned int pmcraid_disable_aen;
static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST;
static unsigned int pmcraid_enable_msix;
/*
* Data structures to support multiple adapters by the LLD.
* pmcraid_adapter_count - count of configured adapters
*/
static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
/*
* Supporting user-level control interface through IOCTL commands.
* pmcraid_major - major number to use
* pmcraid_minor - minor number(s) to use
*/
static unsigned int pmcraid_major;
static struct class *pmcraid_class;
DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
/*
* Module parameters
*/
MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(PMCRAID_DRIVER_VERSION);
module_param_named(log_level, pmcraid_log_level, uint, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(log_level,
"Enables firmware error code logging, default :1 high-severity"
" errors, 2: all errors including high-severity errors,"
" 0: disables logging");
module_param_named(debug, pmcraid_debug_log, uint, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(debug,
"Enable driver verbose message logging. Set 1 to enable."
"(default: 0)");
module_param_named(disable_aen, pmcraid_disable_aen, uint, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(disable_aen,
"Disable driver aen notifications to apps. Set 1 to disable."
"(default: 0)");
/* chip specific constants for PMC MaxRAID controllers (same for
* 0x5220 and 0x8010
*/
static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
{
.ioastatus = 0x0,
.ioarrin = 0x00040,
.mailbox = 0x7FC30,
.global_intr_mask = 0x00034,
.ioa_host_intr = 0x0009C,
.ioa_host_intr_clr = 0x000A0,
.ioa_host_msix_intr = 0x7FC40,
.ioa_host_mask = 0x7FC28,
.ioa_host_mask_clr = 0x7FC28,
.host_ioa_intr = 0x00020,
.host_ioa_intr_clr = 0x00020,
.transop_timeout = 300
}
};
/*
* PCI device ids supported by pmcraid driver
*/
static struct pci_device_id pmcraid_pci_table[] __devinitdata = {
{ PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
},
{}
};
MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
/**
* pmcraid_slave_alloc - Prepare for commands to a device
* @scsi_dev: scsi device struct
*
* This function is called by mid-layer prior to sending any command to the new
* device. Stores resource entry details of the device in scsi_device struct.
* Queuecommand uses the resource handle and other details to fill up IOARCB
* while sending commands to the device.
*
* Return value:
* 0 on success / -ENXIO if device does not exist
*/
static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
{
struct pmcraid_resource_entry *temp, *res = NULL;
struct pmcraid_instance *pinstance;
u8 target, bus, lun;
unsigned long lock_flags;
int rc = -ENXIO;
u16 fw_version;
pinstance = shost_priv(scsi_dev->host);
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
/* Driver exposes VSET and GSCSI resources only; all other device types
* are not exposed. Resource list is synchronized using resource lock
* so any traversal or modifications to the list should be done inside
* this lock
*/
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
list_for_each_entry(temp, &pinstance->used_res_q, queue) {
/* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
if (RES_IS_VSET(temp->cfg_entry)) {
if (fw_version <= PMCRAID_FW_VERSION_1)
target = temp->cfg_entry.unique_flags1;
else
target = temp->cfg_entry.array_id & 0xFF;
if (target > PMCRAID_MAX_VSET_TARGETS)
continue;
bus = PMCRAID_VSET_BUS_ID;
lun = 0;
} else if (RES_IS_GSCSI(temp->cfg_entry)) {
target = RES_TARGET(temp->cfg_entry.resource_address);
bus = PMCRAID_PHYS_BUS_ID;
lun = RES_LUN(temp->cfg_entry.resource_address);
} else {
continue;
}
if (bus == scsi_dev->channel &&
target == scsi_dev->id &&
lun == scsi_dev->lun) {
res = temp;
break;
}
}
if (res) {
res->scsi_dev = scsi_dev;
scsi_dev->hostdata = res;
res->change_detected = 0;
atomic_set(&res->read_failures, 0);
atomic_set(&res->write_failures, 0);
rc = 0;
}
spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
return rc;
}
/**
* pmcraid_slave_configure - Configures a SCSI device
* @scsi_dev: scsi device struct
*
* This function is executed by SCSI mid layer just after a device is first
* scanned (i.e. it has responded to an INQUIRY). For VSET resources, the
* timeout value (default 30s) will be over-written to a higher value (60s)
* and max_sectors value will be over-written to 512. It also sets queue depth
* to host->cmd_per_lun value
*
* Return value:
* 0 on success
*/
static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
{
struct pmcraid_resource_entry *res = scsi_dev->hostdata;
if (!res)
return 0;
/* LLD exposes VSETs and Enclosure devices only */
if (RES_IS_GSCSI(res->cfg_entry) &&
scsi_dev->type != TYPE_ENCLOSURE)
return -ENXIO;
pmcraid_info("configuring %x:%x:%x:%x\n",
scsi_dev->host->unique_id,
scsi_dev->channel,
scsi_dev->id,
scsi_dev->lun);
if (RES_IS_GSCSI(res->cfg_entry)) {
scsi_dev->allow_restart = 1;
} else if (RES_IS_VSET(res->cfg_entry)) {
scsi_dev->allow_restart = 1;
blk_queue_rq_timeout(scsi_dev->request_queue,
PMCRAID_VSET_IO_TIMEOUT);
blk_queue_max_hw_sectors(scsi_dev->request_queue,
PMCRAID_VSET_MAX_SECTORS);
}
if (scsi_dev->tagged_supported &&
(RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
scsi_adjust_queue_depth(scsi_dev, MSG_SIMPLE_TAG,
scsi_dev->host->cmd_per_lun);
} else {
scsi_adjust_queue_depth(scsi_dev, 0,
scsi_dev->host->cmd_per_lun);
}
return 0;
}
/**
* pmcraid_slave_destroy - Unconfigure a SCSI device before removing it
*
* @scsi_dev: scsi device struct
*
* This is called by mid-layer before removing a device. Pointer assignments
* done in pmcraid_slave_alloc will be reset to NULL here.
*
* Return value
* none
*/
static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
{
struct pmcraid_resource_entry *res;
res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
if (res)
res->scsi_dev = NULL;
scsi_dev->hostdata = NULL;
}
/**
* pmcraid_change_queue_depth - Change the device's queue depth
* @scsi_dev: scsi device struct
* @depth: depth to set
* @reason: calling context
*
* Return value
* actual depth set
*/
static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth,
int reason)
{
if (reason != SCSI_QDEPTH_DEFAULT)
return -EOPNOTSUPP;
if (depth > PMCRAID_MAX_CMD_PER_LUN)
depth = PMCRAID_MAX_CMD_PER_LUN;
scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), depth);
return scsi_dev->queue_depth;
}
/**
* pmcraid_change_queue_type - Change the device's queue type
* @scsi_dev: scsi device struct
* @tag: type of tags to use
*
* Return value:
* actual queue type set
*/
static int pmcraid_change_queue_type(struct scsi_device *scsi_dev, int tag)
{
struct pmcraid_resource_entry *res;
res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
if ((res) && scsi_dev->tagged_supported &&
(RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
scsi_set_tag_type(scsi_dev, tag);
if (tag)
scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
else
scsi_deactivate_tcq(scsi_dev, scsi_dev->queue_depth);
} else
tag = 0;
return tag;
}
/**
* pmcraid_init_cmdblk - initializes a command block
*
* @cmd: pointer to struct pmcraid_cmd to be initialized
* @index: if >=0 first time initialization; otherwise reinitialization
*
* Return Value
* None
*/
void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
{
struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
if (index >= 0) {
/* first time initialization (called from probe) */
u32 ioasa_offset =
offsetof(struct pmcraid_control_block, ioasa);
cmd->index = index;
ioarcb->response_handle = cpu_to_le32(index << 2);
ioarcb->ioarcb_bus_addr = cpu_to_le64(dma_addr);
ioarcb->ioasa_bus_addr = cpu_to_le64(dma_addr + ioasa_offset);
ioarcb->ioasa_len = cpu_to_le16(sizeof(struct pmcraid_ioasa));
} else {
/* re-initialization of various lengths, called once command is
* processed by IOA
*/
memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN);
ioarcb->hrrq_id = 0;
ioarcb->request_flags0 = 0;
ioarcb->request_flags1 = 0;
ioarcb->cmd_timeout = 0;
ioarcb->ioarcb_bus_addr &= (~0x1FULL);
ioarcb->ioadl_bus_addr = 0;
ioarcb->ioadl_length = 0;
ioarcb->data_transfer_length = 0;
ioarcb->add_cmd_param_length = 0;
ioarcb->add_cmd_param_offset = 0;
cmd->ioa_cb->ioasa.ioasc = 0;
cmd->ioa_cb->ioasa.residual_data_length = 0;
cmd->time_left = 0;
}
cmd->cmd_done = NULL;
cmd->scsi_cmd = NULL;
cmd->release = 0;
cmd->completion_req = 0;
cmd->sense_buffer = 0;
cmd->sense_buffer_dma = 0;
cmd->dma_handle = 0;
init_timer(&cmd->timer);
}
/**
* pmcraid_reinit_cmdblk - reinitialize a command block
*
* @cmd: pointer to struct pmcraid_cmd to be reinitialized
*
* Return Value
* None
*/
static void pmcraid_reinit_cmdblk(struct pmcraid_cmd *cmd)
{
pmcraid_init_cmdblk(cmd, -1);
}
/**
* pmcraid_get_free_cmd - get a free cmd block from command block pool
* @pinstance: adapter instance structure
*
* Return Value:
* returns pointer to cmd block or NULL if no blocks are available
*/
static struct pmcraid_cmd *pmcraid_get_free_cmd(
struct pmcraid_instance *pinstance
)
{
struct pmcraid_cmd *cmd = NULL;
unsigned long lock_flags;
/* free cmd block list is protected by free_pool_lock */
spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
if (!list_empty(&pinstance->free_cmd_pool)) {
cmd = list_entry(pinstance->free_cmd_pool.next,
struct pmcraid_cmd, free_list);
list_del(&cmd->free_list);
}
spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
/* Initialize the command block before giving it the caller */
if (cmd != NULL)
pmcraid_reinit_cmdblk(cmd);
return cmd;
}
/**
* pmcraid_return_cmd - return a completed command block back into free pool
* @cmd: pointer to the command block
*
* Return Value:
* nothing
*/
void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
list_add_tail(&cmd->free_list, &pinstance->free_cmd_pool);
spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
}
/**
* pmcraid_read_interrupts - reads IOA interrupts
*
* @pinstance: pointer to adapter instance structure
*
* Return value
* interrupts read from IOA
*/
static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance)
{
return (pinstance->interrupt_mode) ?
ioread32(pinstance->int_regs.ioa_host_msix_interrupt_reg) :
ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
}
/**
* pmcraid_disable_interrupts - Masks and clears all specified interrupts
*
* @pinstance: pointer to per adapter instance structure
* @intrs: interrupts to disable
*
* Return Value
* None
*/
static void pmcraid_disable_interrupts(
struct pmcraid_instance *pinstance,
u32 intrs
)
{
u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
u32 nmask = gmask | GLOBAL_INTERRUPT_MASK;
iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg);
iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
ioread32(pinstance->int_regs.global_interrupt_mask_reg);
if (!pinstance->interrupt_mode) {
iowrite32(intrs,
pinstance->int_regs.ioa_host_interrupt_mask_reg);
ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
}
}
/**
* pmcraid_enable_interrupts - Enables specified interrupts
*
* @pinstance: pointer to per adapter instance structure
* @intr: interrupts to enable
*
* Return Value
* None
*/
static void pmcraid_enable_interrupts(
struct pmcraid_instance *pinstance,
u32 intrs
)
{
u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
if (!pinstance->interrupt_mode) {
iowrite32(~intrs,
pinstance->int_regs.ioa_host_interrupt_mask_reg);
ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
}
pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n",
ioread32(pinstance->int_regs.global_interrupt_mask_reg),
ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg));
}
/**
* pmcraid_clr_trans_op - clear trans to op interrupt
*
* @pinstance: pointer to per adapter instance structure
*
* Return Value
* None
*/
static void pmcraid_clr_trans_op(
struct pmcraid_instance *pinstance
)
{
unsigned long lock_flags;
if (!pinstance->interrupt_mode) {
iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
pinstance->int_regs.ioa_host_interrupt_mask_reg);
ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
pinstance->int_regs.ioa_host_interrupt_clr_reg);
ioread32(pinstance->int_regs.ioa_host_interrupt_clr_reg);
}
if (pinstance->reset_cmd != NULL) {
del_timer(&pinstance->reset_cmd->timer);
spin_lock_irqsave(
pinstance->host->host_lock, lock_flags);
pinstance->reset_cmd->cmd_done(pinstance->reset_cmd);
spin_unlock_irqrestore(
pinstance->host->host_lock, lock_flags);
}
}
/**
* pmcraid_reset_type - Determine the required reset type
* @pinstance: pointer to adapter instance structure
*
* IOA requires hard reset if any of the following conditions is true.
* 1. If HRRQ valid interrupt is not masked
* 2. IOA reset alert doorbell is set
* 3. If there are any error interrupts
*/
static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
{
u32 mask;
u32 intrs;
u32 alerts;
mask = ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
alerts = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
if ((mask & INTRS_HRRQ_VALID) == 0 ||
(alerts & DOORBELL_IOA_RESET_ALERT) ||
(intrs & PMCRAID_ERROR_INTERRUPTS)) {
pmcraid_info("IOA requires hard reset\n");
pinstance->ioa_hard_reset = 1;
}
/* If unit check is active, trigger the dump */
if (intrs & INTRS_IOA_UNIT_CHECK)
pinstance->ioa_unit_check = 1;
}
/**
* pmcraid_bist_done - completion function for PCI BIST
* @cmd: pointer to reset command
* Return Value
* none
*/
static void pmcraid_ioa_reset(struct pmcraid_cmd *);
static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
int rc;
u16 pci_reg;
rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
/* If PCI config space can't be accessed wait for another two secs */
if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) &&
cmd->time_left > 0) {
pmcraid_info("BIST not complete, waiting another 2 secs\n");
cmd->timer.expires = jiffies + cmd->time_left;
cmd->time_left = 0;
cmd->timer.data = (unsigned long)cmd;
cmd->timer.function =
(void (*)(unsigned long))pmcraid_bist_done;
add_timer(&cmd->timer);
} else {
cmd->time_left = 0;
pmcraid_info("BIST is complete, proceeding with reset\n");
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
pmcraid_ioa_reset(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
}
}
/**
* pmcraid_start_bist - starts BIST
* @cmd: pointer to reset cmd
* Return Value
* none
*/
static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 doorbells, intrs;
/* proceed with bist and wait for 2 seconds */
iowrite32(DOORBELL_IOA_START_BIST,
pinstance->int_regs.host_ioa_interrupt_reg);
doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
pmcraid_info("doorbells after start bist: %x intrs: %x\n",
doorbells, intrs);
cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done;
add_timer(&cmd->timer);
}
/**
* pmcraid_reset_alert_done - completion routine for reset_alert
* @cmd: pointer to command block used in reset sequence
* Return value
* None
*/
static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 status = ioread32(pinstance->ioa_status);
unsigned long lock_flags;
/* if the critical operation in progress bit is set or the wait times
* out, invoke reset engine to proceed with hard reset. If there is
* some more time to wait, restart the timer
*/
if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) ||
cmd->time_left <= 0) {
pmcraid_info("critical op is reset proceeding with reset\n");
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
pmcraid_ioa_reset(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
} else {
pmcraid_info("critical op is not yet reset waiting again\n");
/* restart timer if some more time is available to wait */
cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
cmd->timer.function =
(void (*)(unsigned long))pmcraid_reset_alert_done;
add_timer(&cmd->timer);
}
}
/**
* pmcraid_reset_alert - alerts IOA for a possible reset
* @cmd : command block to be used for reset sequence.
*
* Return Value
* returns 0 if pci config-space is accessible and RESET_DOORBELL is
* successfully written to IOA. Returns non-zero in case pci_config_space
* is not accessible
*/
static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32);
static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 doorbells;
int rc;
u16 pci_reg;
/* If we are able to access IOA PCI config space, alert IOA that we are
* going to reset it soon. This enables IOA to preserv persistent error
* data if any. In case memory space is not accessible, proceed with
* BIST or slot_reset
*/
rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
if ((rc == PCIBIOS_SUCCESSFUL) && (pci_reg & PCI_COMMAND_MEMORY)) {
/* wait for IOA permission i.e until CRITICAL_OPERATION bit is
* reset IOA doesn't generate any interrupts when CRITICAL
* OPERATION bit is reset. A timer is started to wait for this
* bit to be reset.
*/
cmd->time_left = PMCRAID_RESET_TIMEOUT;
cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
cmd->timer.function =
(void (*)(unsigned long))pmcraid_reset_alert_done;
add_timer(&cmd->timer);
iowrite32(DOORBELL_IOA_RESET_ALERT,
pinstance->int_regs.host_ioa_interrupt_reg);
doorbells =
ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
pmcraid_info("doorbells after reset alert: %x\n", doorbells);
} else {
pmcraid_info("PCI config is not accessible starting BIST\n");
pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
pmcraid_start_bist(cmd);
}
}
/**
* pmcraid_timeout_handler - Timeout handler for internally generated ops
*
* @cmd : pointer to command structure, that got timedout
*
* This function blocks host requests and initiates an adapter reset.
*
* Return value:
* None
*/
static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
dev_info(&pinstance->pdev->dev,
"Adapter being reset due to cmd(CDB[0] = %x) timeout\n",
cmd->ioa_cb->ioarcb.cdb[0]);
/* Command timeouts result in hard reset sequence. The command that got
* timed out may be the one used as part of reset sequence. In this
* case restart reset sequence using the same command block even if
* reset is in progress. Otherwise fail this command and get a free
* command block to restart the reset sequence.
*/
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
if (!pinstance->ioa_reset_in_progress) {
pinstance->ioa_reset_attempts = 0;
cmd = pmcraid_get_free_cmd(pinstance);
/* If we are out of command blocks, just return here itself.
* Some other command's timeout handler can do the reset job
*/
if (cmd == NULL) {
spin_unlock_irqrestore(pinstance->host->host_lock,
lock_flags);
pmcraid_err("no free cmnd block for timeout handler\n");
return;
}
pinstance->reset_cmd = cmd;
pinstance->ioa_reset_in_progress = 1;
} else {
pmcraid_info("reset is already in progress\n");
if (pinstance->reset_cmd != cmd) {
/* This command should have been given to IOA, this
* command will be completed by fail_outstanding_cmds
* anyway
*/
pmcraid_err("cmd is pending but reset in progress\n");
}
/* If this command was being used as part of the reset
* sequence, set cmd_done pointer to pmcraid_ioa_reset. This
* causes fail_outstanding_commands not to return the command
* block back to free pool
*/
if (cmd == pinstance->reset_cmd)
cmd->cmd_done = pmcraid_ioa_reset;
}
/* Notify apps of important IOA bringup/bringdown sequences */
if (pinstance->scn.ioa_state != PMC_DEVICE_EVENT_RESET_START &&
pinstance->scn.ioa_state != PMC_DEVICE_EVENT_SHUTDOWN_START)
pmcraid_notify_ioastate(pinstance,
PMC_DEVICE_EVENT_RESET_START);
pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
scsi_block_requests(pinstance->host);
pmcraid_reset_alert(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
}
/**
* pmcraid_internal_done - completion routine for internally generated cmds
*
* @cmd: command that got response from IOA
*
* Return Value:
* none
*/
static void pmcraid_internal_done(struct pmcraid_cmd *cmd)
{
pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
/* Some of the internal commands are sent with callers blocking for the
* response. Same will be indicated as part of cmd->completion_req
* field. Response path needs to wake up any waiters waiting for cmd
* completion if this flag is set.
*/
if (cmd->completion_req) {
cmd->completion_req = 0;
complete(&cmd->wait_for_completion);
}
/* most of the internal commands are completed by caller itself, so
* no need to return the command block back to free pool until we are
* required to do so (e.g once done with initialization).
*/
if (cmd->release) {
cmd->release = 0;
pmcraid_return_cmd(cmd);
}
}
/**
* pmcraid_reinit_cfgtable_done - done function for cfg table reinitialization
*
* @cmd: command that got response from IOA
*
* This routine is called after driver re-reads configuration table due to a
* lost CCN. It returns the command block back to free pool and schedules
* worker thread to add/delete devices into the system.
*
* Return Value:
* none
*/
static void pmcraid_reinit_cfgtable_done(struct pmcraid_cmd *cmd)
{
pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
if (cmd->release) {
cmd->release = 0;
pmcraid_return_cmd(cmd);
}
pmcraid_info("scheduling worker for config table reinitialization\n");
schedule_work(&cmd->drv_inst->worker_q);
}
/**
* pmcraid_erp_done - Process completion of SCSI error response from device
* @cmd: pmcraid_command
*
* This function copies the sense buffer into the scsi_cmd struct and completes
* scsi_cmd by calling scsi_done function.
*
* Return value:
* none
*/
static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
{
struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) {
scsi_cmd->result |= (DID_ERROR << 16);
scmd_printk(KERN_INFO, scsi_cmd,
"command CDB[0] = %x failed with IOASC: 0x%08X\n",
cmd->ioa_cb->ioarcb.cdb[0], ioasc);
}
/* if we had allocated sense buffers for request sense, copy the sense
* release the buffers
*/
if (cmd->sense_buffer != NULL) {
memcpy(scsi_cmd->sense_buffer,
cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
pci_free_consistent(pinstance->pdev,
SCSI_SENSE_BUFFERSIZE,
cmd->sense_buffer, cmd->sense_buffer_dma);
cmd->sense_buffer = NULL;
cmd->sense_buffer_dma = 0;
}
scsi_dma_unmap(scsi_cmd);
pmcraid_return_cmd(cmd);
scsi_cmd->scsi_done(scsi_cmd);
}
/**
* pmcraid_fire_command - sends an IOA command to adapter
*
* This function adds the given block into pending command list
* and returns without waiting
*
* @cmd : command to be sent to the device
*
* Return Value
* None
*/
static void _pmcraid_fire_command(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
/* Add this command block to pending cmd pool. We do this prior to
* writting IOARCB to ioarrin because IOA might complete the command
* by the time we are about to add it to the list. Response handler
* (isr/tasklet) looks for cmd block in the pending pending list.
*/
spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool);
spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
atomic_inc(&pinstance->outstanding_cmds);
/* driver writes lower 32-bit value of IOARCB address only */
mb();
iowrite32(le32_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr),
pinstance->ioarrin);
}
/**
* pmcraid_send_cmd - fires a command to IOA
*
* This function also sets up timeout function, and command completion
* function
*
* @cmd: pointer to the command block to be fired to IOA
* @cmd_done: command completion function, called once IOA responds
* @timeout: timeout to wait for this command completion
* @timeout_func: timeout handler
*
* Return value
* none
*/
static void pmcraid_send_cmd(
struct pmcraid_cmd *cmd,
void (*cmd_done) (struct pmcraid_cmd *),
unsigned long timeout,
void (*timeout_func) (struct pmcraid_cmd *)
)
{
/* initialize done function */
cmd->cmd_done = cmd_done;
if (timeout_func) {
/* setup timeout handler */
cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies + timeout;
cmd->timer.function = (void (*)(unsigned long))timeout_func;
add_timer(&cmd->timer);
}
/* fire the command to IOA */
_pmcraid_fire_command(cmd);
}
/**
* pmcraid_ioa_shutdown_done - completion function for IOA shutdown command
* @cmd: pointer to the command block used for sending IOA shutdown command
*
* Return value
* None
*/
static void pmcraid_ioa_shutdown_done(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
pmcraid_ioa_reset(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
}
/**
* pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa
*
* @cmd: pointer to the command block used as part of reset sequence
*
* Return Value
* None
*/
static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
{
pmcraid_info("response for Cancel CCN CDB[0] = %x ioasc = %x\n",
cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
/* Note that commands sent during reset require next command to be sent
* to IOA. Hence reinit the done function as well as timeout function
*/
pmcraid_reinit_cmdblk(cmd);
cmd->ioa_cb->ioarcb.request_type = REQ_TYPE_IOACMD;
cmd->ioa_cb->ioarcb.resource_handle =
cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
cmd->ioa_cb->ioarcb.cdb[0] = PMCRAID_IOA_SHUTDOWN;
cmd->ioa_cb->ioarcb.cdb[1] = PMCRAID_SHUTDOWN_NORMAL;
/* fire shutdown command to hardware. */
pmcraid_info("firing normal shutdown command (%d) to IOA\n",
le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle));
pmcraid_notify_ioastate(cmd->drv_inst, PMC_DEVICE_EVENT_SHUTDOWN_START);
pmcraid_send_cmd(cmd, pmcraid_ioa_shutdown_done,
PMCRAID_SHUTDOWN_TIMEOUT,
pmcraid_timeout_handler);
}
/**
* pmcraid_get_fwversion_done - completion function for get_fwversion
*
* @cmd: pointer to command block used to send INQUIRY command
*
* Return Value
* none
*/
static void pmcraid_querycfg(struct pmcraid_cmd *);
static void pmcraid_get_fwversion_done(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
unsigned long lock_flags;
/* configuration table entry size depends on firmware version. If fw
* version is not known, it is not possible to interpret IOA config
* table
*/
if (ioasc) {
pmcraid_err("IOA Inquiry failed with %x\n", ioasc);
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
pmcraid_reset_alert(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
} else {
pmcraid_querycfg(cmd);
}
}
/**
* pmcraid_get_fwversion - reads firmware version information
*
* @cmd: pointer to command block used to send INQUIRY command
*
* Return Value
* none
*/
static void pmcraid_get_fwversion(struct pmcraid_cmd *cmd)
{
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
struct pmcraid_instance *pinstance = cmd->drv_inst;
u16 data_size = sizeof(struct pmcraid_inquiry_data);
pmcraid_reinit_cmdblk(cmd);
ioarcb->request_type = REQ_TYPE_SCSI;
ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
ioarcb->cdb[0] = INQUIRY;
ioarcb->cdb[1] = 1;
ioarcb->cdb[2] = 0xD0;
ioarcb->cdb[3] = (data_size >> 8) & 0xFF;
ioarcb->cdb[4] = data_size & 0xFF;
/* Since entire inquiry data it can be part of IOARCB itself
*/
ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[0]));
ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioarcb->data_transfer_length = cpu_to_le32(data_size);
ioadl = &(ioarcb->add_data.u.ioadl[0]);
ioadl->flags = IOADL_FLAGS_LAST_DESC;
ioadl->address = cpu_to_le64(pinstance->inq_data_baddr);
ioadl->data_len = cpu_to_le32(data_size);
pmcraid_send_cmd(cmd, pmcraid_get_fwversion_done,
PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
}
/**
* pmcraid_identify_hrrq - registers host rrq buffers with IOA
* @cmd: pointer to command block to be used for identify hrrq
*
* Return Value
* none
*/
static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
int index = cmd->hrrq_index;
__be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]);
u32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD);
void (*done_function)(struct pmcraid_cmd *);
pmcraid_reinit_cmdblk(cmd);
cmd->hrrq_index = index + 1;
if (cmd->hrrq_index < pinstance->num_hrrq) {
done_function = pmcraid_identify_hrrq;
} else {
cmd->hrrq_index = 0;
done_function = pmcraid_get_fwversion;
}
/* Initialize ioarcb */
ioarcb->request_type = REQ_TYPE_IOACMD;
ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
/* initialize the hrrq number where IOA will respond to this command */
ioarcb->hrrq_id = index;
ioarcb->cdb[0] = PMCRAID_IDENTIFY_HRRQ;
ioarcb->cdb[1] = index;
/* IOA expects 64-bit pci address to be written in B.E format
* (i.e cdb[2]=MSByte..cdb[9]=LSB.
*/
pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb:index => %llx:%llx:%x\n",
hrrq_addr, ioarcb->ioarcb_bus_addr, index);
memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr));
memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size));
/* Subsequent commands require HRRQ identification to be successful.
* Note that this gets called even during reset from SCSI mid-layer
* or tasklet
*/
pmcraid_send_cmd(cmd, done_function,
PMCRAID_INTERNAL_TIMEOUT,
pmcraid_timeout_handler);
}
static void pmcraid_process_ccn(struct pmcraid_cmd *cmd);
static void pmcraid_process_ldn(struct pmcraid_cmd *cmd);
/**
* pmcraid_send_hcam_cmd - send an initialized command block(HCAM) to IOA
*
* @cmd: initialized command block pointer
*
* Return Value
* none
*/
static void pmcraid_send_hcam_cmd(struct pmcraid_cmd *cmd)
{
if (cmd->ioa_cb->ioarcb.cdb[1] == PMCRAID_HCAM_CODE_CONFIG_CHANGE)
atomic_set(&(cmd->drv_inst->ccn.ignore), 0);
else
atomic_set(&(cmd->drv_inst->ldn.ignore), 0);
pmcraid_send_cmd(cmd, cmd->cmd_done, 0, NULL);
}
/**
* pmcraid_init_hcam - send an initialized command block(HCAM) to IOA
*
* @pinstance: pointer to adapter instance structure
* @type: HCAM type
*
* Return Value
* pointer to initialized pmcraid_cmd structure or NULL
*/
static struct pmcraid_cmd *pmcraid_init_hcam
(
struct pmcraid_instance *pinstance,
u8 type
)
{
struct pmcraid_cmd *cmd;
struct pmcraid_ioarcb *ioarcb;
struct pmcraid_ioadl_desc *ioadl;
struct pmcraid_hostrcb *hcam;
void (*cmd_done) (struct pmcraid_cmd *);
dma_addr_t dma;
int rcb_size;
cmd = pmcraid_get_free_cmd(pinstance);
if (!cmd) {
pmcraid_err("no free command blocks for hcam\n");
return cmd;
}
if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) {
rcb_size = sizeof(struct pmcraid_hcam_ccn_ext);
cmd_done = pmcraid_process_ccn;
dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE;
hcam = &pinstance->ccn;
} else {
rcb_size = sizeof(struct pmcraid_hcam_ldn);
cmd_done = pmcraid_process_ldn;
dma = pinstance->ldn.baddr + PMCRAID_AEN_HDR_SIZE;
hcam = &pinstance->ldn;
}
/* initialize command pointer used for HCAM registration */
hcam->cmd = cmd;
ioarcb = &cmd->ioa_cb->ioarcb;
ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[0]));
ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
ioadl = ioarcb->add_data.u.ioadl;
/* Initialize ioarcb */
ioarcb->request_type = REQ_TYPE_HCAM;
ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
ioarcb->cdb[0] = PMCRAID_HOST_CONTROLLED_ASYNC;
ioarcb->cdb[1] = type;
ioarcb->cdb[7] = (rcb_size >> 8) & 0xFF;
ioarcb->cdb[8] = (rcb_size) & 0xFF;
ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
ioadl[0].flags |= IOADL_FLAGS_READ_LAST;
ioadl[0].data_len = cpu_to_le32(rcb_size);
ioadl[0].address = cpu_to_le32(dma);
cmd->cmd_done = cmd_done;
return cmd;
}
/**
* pmcraid_send_hcam - Send an HCAM to IOA
* @pinstance: ioa config struct
* @type: HCAM type
*
* This function will send a Host Controlled Async command to IOA.
*
* Return value:
* none
*/
static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type)
{
struct pmcraid_cmd *cmd = pmcraid_init_hcam(pinstance, type);
pmcraid_send_hcam_cmd(cmd);
}
/**
* pmcraid_prepare_cancel_cmd - prepares a command block to abort another
*
* @cmd: pointer to cmd that is used as cancelling command
* @cmd_to_cancel: pointer to the command that needs to be cancelled
*/
static void pmcraid_prepare_cancel_cmd(
struct pmcraid_cmd *cmd,
struct pmcraid_cmd *cmd_to_cancel
)
{
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
__be64 ioarcb_addr = cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr;
/* Get the resource handle to where the command to be aborted has been
* sent.
*/
ioarcb->resource_handle = cmd_to_cancel->ioa_cb->ioarcb.resource_handle;
ioarcb->request_type = REQ_TYPE_IOACMD;
memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
ioarcb->cdb[0] = PMCRAID_ABORT_CMD;
/* IOARCB address of the command to be cancelled is given in
* cdb[2]..cdb[9] is Big-Endian format. Note that length bits in
* IOARCB address are not masked.
*/
ioarcb_addr = cpu_to_be64(ioarcb_addr);
memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr));
}
/**
* pmcraid_cancel_hcam - sends ABORT task to abort a given HCAM
*
* @cmd: command to be used as cancelling command
* @type: HCAM type
* @cmd_done: op done function for the cancelling command
*/
static void pmcraid_cancel_hcam(
struct pmcraid_cmd *cmd,
u8 type,
void (*cmd_done) (struct pmcraid_cmd *)
)
{
struct pmcraid_instance *pinstance;
struct pmcraid_hostrcb *hcam;
pinstance = cmd->drv_inst;
hcam = (type == PMCRAID_HCAM_CODE_LOG_DATA) ?
&pinstance->ldn : &pinstance->ccn;
/* prepare for cancelling previous hcam command. If the HCAM is
* currently not pending with IOA, we would have hcam->cmd as non-null
*/
if (hcam->cmd == NULL)
return;
pmcraid_prepare_cancel_cmd(cmd, hcam->cmd);
/* writing to IOARRIN must be protected by host_lock, as mid-layer
* schedule queuecommand while we are doing this
*/
pmcraid_send_cmd(cmd, cmd_done,
PMCRAID_INTERNAL_TIMEOUT,
pmcraid_timeout_handler);
}
/**
* pmcraid_cancel_ccn - cancel CCN HCAM already registered with IOA
*
* @cmd: command block to be used for cancelling the HCAM
*/
static void pmcraid_cancel_ccn(struct pmcraid_cmd *cmd)
{
pmcraid_info("response for Cancel LDN CDB[0] = %x ioasc = %x\n",
cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
pmcraid_reinit_cmdblk(cmd);
pmcraid_cancel_hcam(cmd,
PMCRAID_HCAM_CODE_CONFIG_CHANGE,
pmcraid_ioa_shutdown);
}
/**
* pmcraid_cancel_ldn - cancel LDN HCAM already registered with IOA
*
* @cmd: command block to be used for cancelling the HCAM
*/
static void pmcraid_cancel_ldn(struct pmcraid_cmd *cmd)
{
pmcraid_cancel_hcam(cmd,
PMCRAID_HCAM_CODE_LOG_DATA,
pmcraid_cancel_ccn);
}
/**
* pmcraid_expose_resource - check if the resource can be exposed to OS
*
* @fw_version: firmware version code
* @cfgte: pointer to configuration table entry of the resource
*
* Return value:
* true if resource can be added to midlayer, false(0) otherwise
*/
static int pmcraid_expose_resource(u16 fw_version,
struct pmcraid_config_table_entry *cfgte)
{
int retval = 0;
if (cfgte->resource_type == RES_TYPE_VSET) {
if (fw_version <= PMCRAID_FW_VERSION_1)
retval = ((cfgte->unique_flags1 & 0x80) == 0);
else
retval = ((cfgte->unique_flags0 & 0x80) == 0 &&
(cfgte->unique_flags1 & 0x80) == 0);
} else if (cfgte->resource_type == RES_TYPE_GSCSI)
retval = (RES_BUS(cfgte->resource_address) !=
PMCRAID_VIRTUAL_ENCL_BUS_ID);
return retval;
}
/* attributes supported by pmcraid_event_family */
enum {
PMCRAID_AEN_ATTR_UNSPEC,
PMCRAID_AEN_ATTR_EVENT,
__PMCRAID_AEN_ATTR_MAX,
};
#define PMCRAID_AEN_ATTR_MAX (__PMCRAID_AEN_ATTR_MAX - 1)
/* commands supported by pmcraid_event_family */
enum {
PMCRAID_AEN_CMD_UNSPEC,
PMCRAID_AEN_CMD_EVENT,
__PMCRAID_AEN_CMD_MAX,
};
#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1)
static struct genl_family pmcraid_event_family = {
.id = GENL_ID_GENERATE,
.name = "pmcraid",
.version = 1,
.maxattr = PMCRAID_AEN_ATTR_MAX
};
/**
* pmcraid_netlink_init - registers pmcraid_event_family
*
* Return value:
* 0 if the pmcraid_event_family is successfully registered
* with netlink generic, non-zero otherwise
*/
static int pmcraid_netlink_init(void)
{
int result;
result = genl_register_family(&pmcraid_event_family);
if (result)
return result;
pmcraid_info("registered NETLINK GENERIC group: %d\n",
pmcraid_event_family.id);
return result;
}
/**
* pmcraid_netlink_release - unregisters pmcraid_event_family
*
* Return value:
* none
*/
static void pmcraid_netlink_release(void)
{
genl_unregister_family(&pmcraid_event_family);
}
/**
* pmcraid_notify_aen - sends event msg to user space application
* @pinstance: pointer to adapter instance structure
* @type: HCAM type
*
* Return value:
* 0 if success, error value in case of any failure.
*/
static int pmcraid_notify_aen(
struct pmcraid_instance *pinstance,
struct pmcraid_aen_msg *aen_msg,
u32 data_size
)
{
struct sk_buff *skb;
void *msg_header;
u32 total_size, nla_genl_hdr_total_size;
int result;
aen_msg->hostno = (pinstance->host->unique_id << 16 |
MINOR(pinstance->cdev.dev));
aen_msg->length = data_size;
data_size += sizeof(*aen_msg);
total_size = nla_total_size(data_size);
/* Add GENL_HDR to total_size */
nla_genl_hdr_total_size =
(total_size + (GENL_HDRLEN +
((struct genl_family *)&pmcraid_event_family)->hdrsize)
+ NLMSG_HDRLEN);
skb = genlmsg_new(nla_genl_hdr_total_size, GFP_ATOMIC);
if (!skb) {
pmcraid_err("Failed to allocate aen data SKB of size: %x\n",
total_size);
return -ENOMEM;
}
/* add the genetlink message header */
msg_header = genlmsg_put(skb, 0, 0,
&pmcraid_event_family, 0,
PMCRAID_AEN_CMD_EVENT);
if (!msg_header) {
pmcraid_err("failed to copy command details\n");
nlmsg_free(skb);
return -ENOMEM;
}
result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg);
if (result) {
pmcraid_err("failed to copy AEN attribute data\n");
nlmsg_free(skb);
return -EINVAL;
}
/* send genetlink multicast message to notify appplications */
result = genlmsg_end(skb, msg_header);
if (result < 0) {
pmcraid_err("genlmsg_end failed\n");
nlmsg_free(skb);
return result;
}
result =
genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC);
/* If there are no listeners, genlmsg_multicast may return non-zero
* value.
*/
if (result)
pmcraid_info("error (%x) sending aen event message\n", result);
return result;
}
/**
* pmcraid_notify_ccn - notifies about CCN event msg to user space
* @pinstance: pointer adapter instance structure
*
* Return value:
* 0 if success, error value in case of any failure
*/
static int pmcraid_notify_ccn(struct pmcraid_instance *pinstance)
{
return pmcraid_notify_aen(pinstance,
pinstance->ccn.msg,
pinstance->ccn.hcam->data_len +
sizeof(struct pmcraid_hcam_hdr));
}
/**
* pmcraid_notify_ldn - notifies about CCN event msg to user space
* @pinstance: pointer adapter instance structure
*
* Return value:
* 0 if success, error value in case of any failure
*/
static int pmcraid_notify_ldn(struct pmcraid_instance *pinstance)
{
return pmcraid_notify_aen(pinstance,
pinstance->ldn.msg,
pinstance->ldn.hcam->data_len +
sizeof(struct pmcraid_hcam_hdr));
}
/**
* pmcraid_notify_ioastate - sends IOA state event msg to user space
* @pinstance: pointer adapter instance structure
* @evt: controller state event to be sent
*
* Return value:
* 0 if success, error value in case of any failure
*/
static void pmcraid_notify_ioastate(struct pmcraid_instance *pinstance, u32 evt)
{
pinstance->scn.ioa_state = evt;
pmcraid_notify_aen(pinstance,
&pinstance->scn.msg,
sizeof(u32));
}
/**
* pmcraid_handle_config_change - Handle a config change from the adapter
* @pinstance: pointer to per adapter instance structure
*
* Return value:
* none
*/
static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
{
struct pmcraid_config_table_entry *cfg_entry;
struct pmcraid_hcam_ccn *ccn_hcam;
struct pmcraid_cmd *cmd;
struct pmcraid_cmd *cfgcmd;
struct pmcraid_resource_entry *res = NULL;
unsigned long lock_flags;
unsigned long host_lock_flags;
u32 new_entry = 1;
u32 hidden_entry = 0;
u16 fw_version;
int rc;
ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
cfg_entry = &ccn_hcam->cfg_entry;
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \
res: %x:%x:%x:%x\n",
pinstance->ccn.hcam->ilid,
pinstance->ccn.hcam->op_code,
((pinstance->ccn.hcam->timestamp1) |
((pinstance->ccn.hcam->timestamp2 & 0xffffffffLL) << 32)),
pinstance->ccn.hcam->notification_type,
pinstance->ccn.hcam->notification_lost,
pinstance->ccn.hcam->flags,
pinstance->host->unique_id,
RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID :
(RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID :
RES_BUS(cfg_entry->resource_address)),
RES_IS_VSET(*cfg_entry) ?
(fw_version <= PMCRAID_FW_VERSION_1 ?
cfg_entry->unique_flags1 :
cfg_entry->array_id & 0xFF) :
RES_TARGET(cfg_entry->resource_address),
RES_LUN(cfg_entry->resource_address));
/* If this HCAM indicates a lost notification, read the config table */
if (pinstance->ccn.hcam->notification_lost) {
cfgcmd = pmcraid_get_free_cmd(pinstance);
if (cfgcmd) {
pmcraid_info("lost CCN, reading config table\b");
pinstance->reinit_cfg_table = 1;
pmcraid_querycfg(cfgcmd);
} else {
pmcraid_err("lost CCN, no free cmd for querycfg\n");
}
goto out_notify_apps;
}
/* If this resource is not going to be added to mid-layer, just notify
* applications and return. If this notification is about hiding a VSET
* resource, check if it was exposed already.
*/
if (pinstance->ccn.hcam->notification_type ==
NOTIFICATION_TYPE_ENTRY_CHANGED &&
cfg_entry->resource_type == RES_TYPE_VSET) {
if (fw_version <= PMCRAID_FW_VERSION_1)
hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
else
hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
} else if (!pmcraid_expose_resource(fw_version, cfg_entry)) {
goto out_notify_apps;
}
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
list_for_each_entry(res, &pinstance->used_res_q, queue) {
rc = memcmp(&res->cfg_entry.resource_address,
&cfg_entry->resource_address,
sizeof(cfg_entry->resource_address));
if (!rc) {
new_entry = 0;
break;
}
}
if (new_entry) {
if (hidden_entry) {
spin_unlock_irqrestore(&pinstance->resource_lock,
lock_flags);
goto out_notify_apps;
}
/* If there are more number of resources than what driver can
* manage, do not notify the applications about the CCN. Just
* ignore this notifications and re-register the same HCAM
*/
if (list_empty(&pinstance->free_res_q)) {
spin_unlock_irqrestore(&pinstance->resource_lock,
lock_flags);
pmcraid_err("too many resources attached\n");
spin_lock_irqsave(pinstance->host->host_lock,
host_lock_flags);
pmcraid_send_hcam(pinstance,
PMCRAID_HCAM_CODE_CONFIG_CHANGE);
spin_unlock_irqrestore(pinstance->host->host_lock,
host_lock_flags);
return;
}
res = list_entry(pinstance->free_res_q.next,
struct pmcraid_resource_entry, queue);
list_del(&res->queue);
res->scsi_dev = NULL;
res->reset_progress = 0;
list_add_tail(&res->queue, &pinstance->used_res_q);
}
memcpy(&res->cfg_entry, cfg_entry, pinstance->config_table_entry_size);
if (pinstance->ccn.hcam->notification_type ==
NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
if (res->scsi_dev) {
if (fw_version <= PMCRAID_FW_VERSION_1)
res->cfg_entry.unique_flags1 &= 0x7F;
else
res->cfg_entry.array_id &= 0xFF;
res->change_detected = RES_CHANGE_DEL;
res->cfg_entry.resource_handle =
PMCRAID_INVALID_RES_HANDLE;
schedule_work(&pinstance->worker_q);
} else {
/* This may be one of the non-exposed resources */
list_move_tail(&res->queue, &pinstance->free_res_q);
}
} else if (!res->scsi_dev) {
res->change_detected = RES_CHANGE_ADD;
schedule_work(&pinstance->worker_q);
}
spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
out_notify_apps:
/* Notify configuration changes to registered applications.*/
if (!pmcraid_disable_aen)
pmcraid_notify_ccn(pinstance);
cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
if (cmd)
pmcraid_send_hcam_cmd(cmd);
}
/**
* pmcraid_get_error_info - return error string for an ioasc
* @ioasc: ioasc code
* Return Value
* none
*/
static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc)
{
int i;
for (i = 0; i < ARRAY_SIZE(pmcraid_ioasc_error_table); i++) {
if (pmcraid_ioasc_error_table[i].ioasc_code == ioasc)
return &pmcraid_ioasc_error_table[i];
}
return NULL;
}
/**
* pmcraid_ioasc_logger - log IOASC information based user-settings
* @ioasc: ioasc code
* @cmd: pointer to command that resulted in 'ioasc'
*/
void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
{
struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
if (error_info == NULL ||
cmd->drv_inst->current_log_level < error_info->log_level)
return;
/* log the error string */
pmcraid_err("cmd [%x] for resource %x failed with %x(%s)\n",
cmd->ioa_cb->ioarcb.cdb[0],
cmd->ioa_cb->ioarcb.resource_handle,
le32_to_cpu(ioasc), error_info->error_string);
}
/**
* pmcraid_handle_error_log - Handle a config change (error log) from the IOA
*
* @pinstance: pointer to per adapter instance structure
*
* Return value:
* none
*/
static void pmcraid_handle_error_log(struct pmcraid_instance *pinstance)
{
struct pmcraid_hcam_ldn *hcam_ldn;
u32 ioasc;
hcam_ldn = (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
pmcraid_info
("LDN(%x): %x type: %x lost: %x flags: %x overlay id: %x\n",
pinstance->ldn.hcam->ilid,
pinstance->ldn.hcam->op_code,
pinstance->ldn.hcam->notification_type,
pinstance->ldn.hcam->notification_lost,
pinstance->ldn.hcam->flags,
pinstance->ldn.hcam->overlay_id);
/* log only the errors, no need to log informational log entries */
if (pinstance->ldn.hcam->notification_type !=
NOTIFICATION_TYPE_ERROR_LOG)
return;
if (pinstance->ldn.hcam->notification_lost ==
HOSTRCB_NOTIFICATIONS_LOST)
dev_info(&pinstance->pdev->dev, "Error notifications lost\n");
ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc);
if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) {
dev_info(&pinstance->pdev->dev,
"UnitAttention due to IOA Bus Reset\n");
scsi_report_bus_reset(
pinstance->host,
RES_BUS(hcam_ldn->error_log.fd_ra));
}
return;
}
/**
* pmcraid_process_ccn - Op done function for a CCN.
* @cmd: pointer to command struct
*
* This function is the op done function for a configuration
* change notification
*
* Return value:
* none
*/
static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
unsigned long lock_flags;
pinstance->ccn.cmd = NULL;
pmcraid_return_cmd(cmd);
/* If driver initiated IOA reset happened while this hcam was pending
* with IOA, or IOA bringdown sequence is in progress, no need to
* re-register the hcam
*/
if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
atomic_read(&pinstance->ccn.ignore) == 1) {
return;
} else if (ioasc) {
dev_info(&pinstance->pdev->dev,
"Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc);
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
} else {
pmcraid_handle_config_change(pinstance);
}
}
/**
* pmcraid_process_ldn - op done function for an LDN
* @cmd: pointer to command block
*
* Return value
* none
*/
static void pmcraid_initiate_reset(struct pmcraid_instance *);
static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
struct pmcraid_hcam_ldn *ldn_hcam =
(struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
u32 fd_ioasc = le32_to_cpu(ldn_hcam->error_log.fd_ioasc);
unsigned long lock_flags;
/* return the command block back to freepool */
pinstance->ldn.cmd = NULL;
pmcraid_return_cmd(cmd);
/* If driver initiated IOA reset happened while this hcam was pending
* with IOA, no need to re-register the hcam as reset engine will do it
* once reset sequence is complete
*/
if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
atomic_read(&pinstance->ccn.ignore) == 1) {
return;
} else if (!ioasc) {
pmcraid_handle_error_log(pinstance);
if (fd_ioasc == PMCRAID_IOASC_NR_IOA_RESET_REQUIRED) {
spin_lock_irqsave(pinstance->host->host_lock,
lock_flags);
pmcraid_initiate_reset(pinstance);
spin_unlock_irqrestore(pinstance->host->host_lock,
lock_flags);
return;
}
if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) {
pinstance->timestamp_error = 1;
pmcraid_set_timestamp(cmd);
}
} else {
dev_info(&pinstance->pdev->dev,
"Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
}
/* send netlink message for HCAM notification if enabled */
if (!pmcraid_disable_aen)
pmcraid_notify_ldn(pinstance);
cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
if (cmd)
pmcraid_send_hcam_cmd(cmd);
}
/**
* pmcraid_register_hcams - register HCAMs for CCN and LDN
*
* @pinstance: pointer per adapter instance structure
*
* Return Value
* none
*/
static void pmcraid_register_hcams(struct pmcraid_instance *pinstance)
{
pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
}
/**
* pmcraid_unregister_hcams - cancel HCAMs registered already
* @cmd: pointer to command used as part of reset sequence
*/
static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
/* During IOA bringdown, HCAM gets fired and tasklet proceeds with
* handling hcam response though it is not necessary. In order to
* prevent this, set 'ignore', so that bring-down sequence doesn't
* re-send any more hcams
*/
atomic_set(&pinstance->ccn.ignore, 1);
atomic_set(&pinstance->ldn.ignore, 1);
/* If adapter reset was forced as part of runtime reset sequence,
* start the reset sequence. Reset will be triggered even in case
* IOA unit_check.
*/
if ((pinstance->force_ioa_reset && !pinstance->ioa_bringdown) ||
pinstance->ioa_unit_check) {
pinstance->force_ioa_reset = 0;
pinstance->ioa_unit_check = 0;
pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
pmcraid_reset_alert(cmd);
return;
}
/* Driver tries to cancel HCAMs by sending ABORT TASK for each HCAM
* one after the other. So CCN cancellation will be triggered by
* pmcraid_cancel_ldn itself.
*/
pmcraid_cancel_ldn(cmd);
}
/**
* pmcraid_reset_enable_ioa - re-enable IOA after a hard reset
* @pinstance: pointer to adapter instance structure
* Return Value
* 1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0
*/
static void pmcraid_reinit_buffers(struct pmcraid_instance *);
static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance)
{
u32 intrs;
pmcraid_reinit_buffers(pinstance);
intrs = pmcraid_read_interrupts(pinstance);
pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
if (!pinstance->interrupt_mode) {
iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
pinstance->int_regs.
ioa_host_interrupt_mask_reg);
iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
pinstance->int_regs.ioa_host_interrupt_clr_reg);
}
return 1;
} else {
return 0;
}
}
/**
* pmcraid_soft_reset - performs a soft reset and makes IOA become ready
* @cmd : pointer to reset command block
*
* Return Value
* none
*/
static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 int_reg;
u32 doorbell;
/* There will be an interrupt when Transition to Operational bit is
* set so tasklet would execute next reset task. The timeout handler
* would re-initiate a reset
*/
cmd->cmd_done = pmcraid_ioa_reset;
cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies +
msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler;
if (!timer_pending(&cmd->timer))
add_timer(&cmd->timer);
/* Enable destructive diagnostics on IOA if it is not yet in
* operational state
*/
doorbell = DOORBELL_RUNTIME_RESET |
DOORBELL_ENABLE_DESTRUCTIVE_DIAGS;
/* Since we do RESET_ALERT and Start BIST we have to again write
* MSIX Doorbell to indicate the interrupt mode
*/
if (pinstance->interrupt_mode) {
iowrite32(DOORBELL_INTR_MODE_MSIX,
pinstance->int_regs.host_ioa_interrupt_reg);
ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
}
iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg);
ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
pmcraid_info("Waiting for IOA to become operational %x:%x\n",
ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
int_reg);
}
/**
* pmcraid_get_dump - retrieves IOA dump in case of Unit Check interrupt
*
* @pinstance: pointer to adapter instance structure
*
* Return Value
* none
*/
static void pmcraid_get_dump(struct pmcraid_instance *pinstance)
{
pmcraid_info("%s is not yet implemented\n", __func__);
}
/**
* pmcraid_fail_outstanding_cmds - Fails all outstanding ops.
* @pinstance: pointer to adapter instance structure
*
* This function fails all outstanding ops. If they are submitted to IOA
* already, it sends cancel all messages if IOA is still accepting IOARCBs,
* otherwise just completes the commands and returns the cmd blocks to free
* pool.
*
* Return value:
* none
*/
static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
{
struct pmcraid_cmd *cmd, *temp;
unsigned long lock_flags;
/* pending command list is protected by pending_pool_lock. Its
* traversal must be done as within this lock
*/
spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
list_for_each_entry_safe(cmd, temp, &pinstance->pending_cmd_pool,
free_list) {
list_del(&cmd->free_list);
spin_unlock_irqrestore(&pinstance->pending_pool_lock,
lock_flags);
cmd->ioa_cb->ioasa.ioasc =
cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET);
cmd->ioa_cb->ioasa.ilid =
cpu_to_be32(PMCRAID_DRIVER_ILID);
/* In case the command timer is still running */
del_timer(&cmd->timer);
/* If this is an IO command, complete it by invoking scsi_done
* function. If this is one of the internal commands other
* than pmcraid_ioa_reset and HCAM commands invoke cmd_done to
* complete it
*/
if (cmd->scsi_cmd) {
struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
__le32 resp = cmd->ioa_cb->ioarcb.response_handle;
scsi_cmd->result |= DID_ERROR << 16;
scsi_dma_unmap(scsi_cmd);
pmcraid_return_cmd(cmd);
pmcraid_info("failing(%d) CDB[0] = %x result: %x\n",
le32_to_cpu(resp) >> 2,
cmd->ioa_cb->ioarcb.cdb[0],
scsi_cmd->result);
scsi_cmd->scsi_done(scsi_cmd);
} else if (cmd->cmd_done == pmcraid_internal_done ||
cmd->cmd_done == pmcraid_erp_done) {
cmd->cmd_done(cmd);
} else if (cmd->cmd_done != pmcraid_ioa_reset &&
cmd->cmd_done != pmcraid_ioa_shutdown_done) {
pmcraid_return_cmd(cmd);
}
atomic_dec(&pinstance->outstanding_cmds);
spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
}
spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
}
/**
* pmcraid_ioa_reset - Implementation of IOA reset logic
*
* @cmd: pointer to the cmd block to be used for entire reset process
*
* This function executes most of the steps required for IOA reset. This gets
* called by user threads (modprobe/insmod/rmmod) timer, tasklet and midlayer's
* 'eh_' thread. Access to variables used for controlling the reset sequence is
* synchronized using host lock. Various functions called during reset process
* would make use of a single command block, pointer to which is also stored in
* adapter instance structure.
*
* Return Value
* None
*/
static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
u8 reset_complete = 0;
pinstance->ioa_reset_in_progress = 1;
if (pinstance->reset_cmd != cmd) {
pmcraid_err("reset is called with different command block\n");
pinstance->reset_cmd = cmd;
}
pmcraid_info("reset_engine: state = %d, command = %p\n",
pinstance->ioa_state, cmd);
switch (pinstance->ioa_state) {
case IOA_STATE_DEAD:
/* If IOA is offline, whatever may be the reset reason, just
* return. callers might be waiting on the reset wait_q, wake
* up them
*/
pmcraid_err("IOA is offline no reset is possible\n");
reset_complete = 1;
break;
case IOA_STATE_IN_BRINGDOWN:
/* we enter here, once ioa shutdown command is processed by IOA
* Alert IOA for a possible reset. If reset alert fails, IOA
* goes through hard-reset
*/
pmcraid_disable_interrupts(pinstance, ~0);
pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
pmcraid_reset_alert(cmd);
break;
case IOA_STATE_UNKNOWN:
/* We may be called during probe or resume. Some pre-processing
* is required for prior to reset
*/
scsi_block_requests(pinstance->host);
/* If asked to reset while IOA was processing responses or
* there are any error responses then IOA may require
* hard-reset.
*/
if (pinstance->ioa_hard_reset == 0) {
if (ioread32(pinstance->ioa_status) &
INTRS_TRANSITION_TO_OPERATIONAL) {
pmcraid_info("sticky bit set, bring-up\n");
pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
pmcraid_reinit_cmdblk(cmd);
pmcraid_identify_hrrq(cmd);
} else {
pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
pmcraid_soft_reset(cmd);
}
} else {
/* Alert IOA of a possible reset and wait for critical
* operation in progress bit to reset
*/
pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
pmcraid_reset_alert(cmd);
}
break;
case IOA_STATE_IN_RESET_ALERT:
/* If critical operation in progress bit is reset or wait gets
* timed out, reset proceeds with starting BIST on the IOA.
* pmcraid_ioa_hard_reset keeps a count of reset attempts. If
* they are 3 or more, reset engine marks IOA dead and returns
*/
pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
pmcraid_start_bist(cmd);
break;
case IOA_STATE_IN_HARD_RESET:
pinstance->ioa_reset_attempts++;
/* retry reset if we haven't reached maximum allowed limit */
if (pinstance->ioa_reset_attempts > PMCRAID_RESET_ATTEMPTS) {
pinstance->ioa_reset_attempts = 0;
pmcraid_err("IOA didn't respond marking it as dead\n");
pinstance->ioa_state = IOA_STATE_DEAD;
if (pinstance->ioa_bringdown)
pmcraid_notify_ioastate(pinstance,
PMC_DEVICE_EVENT_SHUTDOWN_FAILED);
else
pmcraid_notify_ioastate(pinstance,
PMC_DEVICE_EVENT_RESET_FAILED);
reset_complete = 1;
break;
}
/* Once either bist or pci reset is done, restore PCI config
* space. If this fails, proceed with hard reset again
*/
pci_restore_state(pinstance->pdev);
/* fail all pending commands */
pmcraid_fail_outstanding_cmds(pinstance);
/* check if unit check is active, if so extract dump */
if (pinstance->ioa_unit_check) {
pmcraid_info("unit check is active\n");
pinstance->ioa_unit_check = 0;
pmcraid_get_dump(pinstance);
pinstance->ioa_reset_attempts--;
pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
pmcraid_reset_alert(cmd);
break;
}
/* if the reset reason is to bring-down the ioa, we might be
* done with the reset restore pci_config_space and complete
* the reset
*/
if (pinstance->ioa_bringdown) {
pmcraid_info("bringing down the adapter\n");
pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
pinstance->ioa_bringdown = 0;
pinstance->ioa_state = IOA_STATE_UNKNOWN;
pmcraid_notify_ioastate(pinstance,
PMC_DEVICE_EVENT_SHUTDOWN_SUCCESS);
reset_complete = 1;
} else {
/* bring-up IOA, so proceed with soft reset
* Reinitialize hrrq_buffers and their indices also
* enable interrupts after a pci_restore_state
*/
if (pmcraid_reset_enable_ioa(pinstance)) {
pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
pmcraid_info("bringing up the adapter\n");
pmcraid_reinit_cmdblk(cmd);
pmcraid_identify_hrrq(cmd);
} else {
pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
pmcraid_soft_reset(cmd);
}
}
break;
case IOA_STATE_IN_SOFT_RESET:
/* TRANSITION TO OPERATIONAL is on so start initialization
* sequence
*/
pmcraid_info("In softreset proceeding with bring-up\n");
pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
/* Initialization commands start with HRRQ identification. From
* now on tasklet completes most of the commands as IOA is up
* and intrs are enabled
*/
pmcraid_identify_hrrq(cmd);
break;
case IOA_STATE_IN_BRINGUP:
/* we are done with bringing up of IOA, change the ioa_state to
* operational and wake up any waiters
*/
pinstance->ioa_state = IOA_STATE_OPERATIONAL;
reset_complete = 1;
break;
case IOA_STATE_OPERATIONAL:
default:
/* When IOA is operational and a reset is requested, check for
* the reset reason. If reset is to bring down IOA, unregister
* HCAMs and initiate shutdown; if adapter reset is forced then
* restart reset sequence again
*/
if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE &&
pinstance->force_ioa_reset == 0) {
pmcraid_notify_ioastate(pinstance,
PMC_DEVICE_EVENT_RESET_SUCCESS);
reset_complete = 1;
} else {
if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE)
pinstance->ioa_state = IOA_STATE_IN_BRINGDOWN;
pmcraid_reinit_cmdblk(cmd);
pmcraid_unregister_hcams(cmd);
}
break;
}
/* reset will be completed if ioa_state is either DEAD or UNKNOWN or
* OPERATIONAL. Reset all control variables used during reset, wake up
* any waiting threads and let the SCSI mid-layer send commands. Note
* that host_lock must be held before invoking scsi_report_bus_reset.
*/
if (reset_complete) {
pinstance->ioa_reset_in_progress = 0;
pinstance->ioa_reset_attempts = 0;
pinstance->reset_cmd = NULL;
pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
pinstance->ioa_bringdown = 0;
pmcraid_return_cmd(cmd);
/* If target state is to bring up the adapter, proceed with
* hcam registration and resource exposure to mid-layer.
*/
if (pinstance->ioa_state == IOA_STATE_OPERATIONAL)
pmcraid_register_hcams(pinstance);
wake_up_all(&pinstance->reset_wait_q);
}
return;
}
/**
* pmcraid_initiate_reset - initiates reset sequence. This is called from
* ISR/tasklet during error interrupts including IOA unit check. If reset
* is already in progress, it just returns, otherwise initiates IOA reset
* to bring IOA up to operational state.
*
* @pinstance: pointer to adapter instance structure
*
* Return value
* none
*/
static void pmcraid_initiate_reset(struct pmcraid_instance *pinstance)
{
struct pmcraid_cmd *cmd;
/* If the reset is already in progress, just return, otherwise start
* reset sequence and return
*/
if (!pinstance->ioa_reset_in_progress) {
scsi_block_requests(pinstance->host);
cmd = pmcraid_get_free_cmd(pinstance);
if (cmd == NULL) {
pmcraid_err("no cmnd blocks for initiate_reset\n");
return;
}
pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
pinstance->reset_cmd = cmd;
pinstance->force_ioa_reset = 1;
pmcraid_notify_ioastate(pinstance,
PMC_DEVICE_EVENT_RESET_START);
pmcraid_ioa_reset(cmd);
}
}
/**
* pmcraid_reset_reload - utility routine for doing IOA reset either to bringup
* or bringdown IOA
* @pinstance: pointer adapter instance structure
* @shutdown_type: shutdown type to be used NONE, NORMAL or ABRREV
* @target_state: expected target state after reset
*
* Note: This command initiates reset and waits for its completion. Hence this
* should not be called from isr/timer/tasklet functions (timeout handlers,
* error response handlers and interrupt handlers).
*
* Return Value
* 1 in case ioa_state is not target_state, 0 otherwise.
*/
static int pmcraid_reset_reload(
struct pmcraid_instance *pinstance,
u8 shutdown_type,
u8 target_state
)
{
struct pmcraid_cmd *reset_cmd = NULL;
unsigned long lock_flags;
int reset = 1;
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
if (pinstance->ioa_reset_in_progress) {
pmcraid_info("reset_reload: reset is already in progress\n");
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
wait_event(pinstance->reset_wait_q,
!pinstance->ioa_reset_in_progress);
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
if (pinstance->ioa_state == IOA_STATE_DEAD) {
spin_unlock_irqrestore(pinstance->host->host_lock,
lock_flags);
pmcraid_info("reset_reload: IOA is dead\n");
return reset;
} else if (pinstance->ioa_state == target_state) {
reset = 0;
}
}
if (reset) {
pmcraid_info("reset_reload: proceeding with reset\n");
scsi_block_requests(pinstance->host);
reset_cmd = pmcraid_get_free_cmd(pinstance);
if (reset_cmd == NULL) {
pmcraid_err("no free cmnd for reset_reload\n");
spin_unlock_irqrestore(pinstance->host->host_lock,
lock_flags);
return reset;
}
if (shutdown_type == SHUTDOWN_NORMAL)
pinstance->ioa_bringdown = 1;
pinstance->ioa_shutdown_type = shutdown_type;
pinstance->reset_cmd = reset_cmd;
pinstance->force_ioa_reset = reset;
pmcraid_info("reset_reload: initiating reset\n");
pmcraid_ioa_reset(reset_cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
pmcraid_info("reset_reload: waiting for reset to complete\n");
wait_event(pinstance->reset_wait_q,
!pinstance->ioa_reset_in_progress);
pmcraid_info("reset_reload: reset is complete !!\n");
scsi_unblock_requests(pinstance->host);
if (pinstance->ioa_state == target_state)
reset = 0;
}
return reset;
}
/**
* pmcraid_reset_bringdown - wrapper over pmcraid_reset_reload to bringdown IOA
*
* @pinstance: pointer to adapter instance structure
*
* Return Value
* whatever is returned from pmcraid_reset_reload
*/
static int pmcraid_reset_bringdown(struct pmcraid_instance *pinstance)
{
return pmcraid_reset_reload(pinstance,
SHUTDOWN_NORMAL,
IOA_STATE_UNKNOWN);
}
/**
* pmcraid_reset_bringup - wrapper over pmcraid_reset_reload to bring up IOA
*
* @pinstance: pointer to adapter instance structure
*
* Return Value
* whatever is returned from pmcraid_reset_reload
*/
static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance)
{
pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_RESET_START);
return pmcraid_reset_reload(pinstance,
SHUTDOWN_NONE,
IOA_STATE_OPERATIONAL);
}
/**
* pmcraid_request_sense - Send request sense to a device
* @cmd: pmcraid command struct
*
* This function sends a request sense to a device as a result of a check
* condition. This method re-uses the same command block that failed earlier.
*/
static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
{
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
/* allocate DMAable memory for sense buffers */
cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev,
SCSI_SENSE_BUFFERSIZE,
&cmd->sense_buffer_dma);
if (cmd->sense_buffer == NULL) {
pmcraid_err
("couldn't allocate sense buffer for request sense\n");
pmcraid_erp_done(cmd);
return;
}
/* re-use the command block */
memset(&cmd->ioa_cb->ioasa, 0, sizeof(struct pmcraid_ioasa));
memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
ioarcb->request_flags0 = (SYNC_COMPLETE |
NO_LINK_DESCS |
INHIBIT_UL_CHECK);
ioarcb->request_type = REQ_TYPE_SCSI;
ioarcb->cdb[0] = REQUEST_SENSE;
ioarcb->cdb[4] = SCSI_SENSE_BUFFERSIZE;
ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[0]));
ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
ioarcb->data_transfer_length = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
ioadl->flags = IOADL_FLAGS_LAST_DESC;
/* request sense might be called as part of error response processing
* which runs in tasklets context. It is possible that mid-layer might
* schedule queuecommand during this time, hence, writting to IOARRIN
* must be protect by host_lock
*/
pmcraid_send_cmd(cmd, pmcraid_erp_done,
PMCRAID_REQUEST_SENSE_TIMEOUT,
pmcraid_timeout_handler);
}
/**
* pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
* @cmd: command that failed
* @sense: true if request_sense is required after cancel all
*
* This function sends a cancel all to a device to clear the queue.
*/
static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
{
struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done
: pmcraid_request_sense;
memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
ioarcb->request_flags0 = SYNC_OVERRIDE;
ioarcb->request_type = REQ_TYPE_IOACMD;
ioarcb->cdb[0] = PMCRAID_CANCEL_ALL_REQUESTS;
if (RES_IS_GSCSI(res->cfg_entry))
ioarcb->cdb[1] = PMCRAID_SYNC_COMPLETE_AFTER_CANCEL;
ioarcb->ioadl_bus_addr = 0;
ioarcb->ioadl_length = 0;
ioarcb->data_transfer_length = 0;
ioarcb->ioarcb_bus_addr &= (~0x1FULL);
/* writing to IOARRIN must be protected by host_lock, as mid-layer
* schedule queuecommand while we are doing this
*/
pmcraid_send_cmd(cmd, cmd_done,
PMCRAID_REQUEST_SENSE_TIMEOUT,
pmcraid_timeout_handler);
}
/**
* pmcraid_frame_auto_sense: frame fixed format sense information
*
* @cmd: pointer to failing command block
*
* Return value
* none
*/
static void pmcraid_frame_auto_sense(struct pmcraid_cmd *cmd)
{
u8 *sense_buf = cmd->scsi_cmd->sense_buffer;
struct pmcraid_resource_entry *res = cmd->scsi_cmd->device->hostdata;
struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
u32 ioasc = le32_to_cpu(ioasa->ioasc);
u32 failing_lba = 0;
memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
if (RES_IS_VSET(res->cfg_entry) &&
ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC &&
ioasa->u.vset.failing_lba_hi != 0) {
sense_buf[0] = 0x72;
sense_buf[1] = PMCRAID_IOASC_SENSE_KEY(ioasc);
sense_buf[2] = PMCRAID_IOASC_SENSE_CODE(ioasc);
sense_buf[3] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
sense_buf[7] = 12;
sense_buf[8] = 0;
sense_buf[9] = 0x0A;
sense_buf[10] = 0x80;
failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_hi);
sense_buf[12] = (failing_lba & 0xff000000) >> 24;
sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
sense_buf[15] = failing_lba & 0x000000ff;
failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_lo);
sense_buf[16] = (failing_lba & 0xff000000) >> 24;
sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
sense_buf[19] = failing_lba & 0x000000ff;
} else {
sense_buf[0] = 0x70;
sense_buf[2] = PMCRAID_IOASC_SENSE_KEY(ioasc);
sense_buf[12] = PMCRAID_IOASC_SENSE_CODE(ioasc);
sense_buf[13] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
if (ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC) {
if (RES_IS_VSET(res->cfg_entry))
failing_lba =
le32_to_cpu(ioasa->u.
vset.failing_lba_lo);
sense_buf[0] |= 0x80;
sense_buf[3] = (failing_lba >> 24) & 0xff;
sense_buf[4] = (failing_lba >> 16) & 0xff;
sense_buf[5] = (failing_lba >> 8) & 0xff;
sense_buf[6] = failing_lba & 0xff;
}
sense_buf[7] = 6; /* additional length */
}
}
/**
* pmcraid_error_handler - Error response handlers for a SCSI op
* @cmd: pointer to pmcraid_cmd that has failed
*
* This function determines whether or not to initiate ERP on the affected
* device. This is called from a tasklet, which doesn't hold any locks.
*
* Return value:
* 0 it caller can complete the request, otherwise 1 where in error
* handler itself completes the request and returns the command block
* back to free-pool
*/
static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
{
struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
struct pmcraid_instance *pinstance = cmd->drv_inst;
struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
u32 ioasc = le32_to_cpu(ioasa->ioasc);
u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
u32 sense_copied = 0;
if (!res) {
pmcraid_info("resource pointer is NULL\n");
return 0;
}
/* If this was a SCSI read/write command keep count of errors */
if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
atomic_inc(&res->read_failures);
else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
atomic_inc(&res->write_failures);
if (!RES_IS_GSCSI(res->cfg_entry) &&
masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
pmcraid_frame_auto_sense(cmd);
}
/* Log IOASC/IOASA information based on user settings */
pmcraid_ioasc_logger(ioasc, cmd);
switch (masked_ioasc) {
case PMCRAID_IOASC_AC_TERMINATED_BY_HOST:
scsi_cmd->result |= (DID_ABORT << 16);
break;
case PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE:
case PMCRAID_IOASC_HW_CANNOT_COMMUNICATE:
scsi_cmd->result |= (DID_NO_CONNECT << 16);
break;
case PMCRAID_IOASC_NR_SYNC_REQUIRED:
res->sync_reqd = 1;
scsi_cmd->result |= (DID_IMM_RETRY << 16);
break;
case PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC:
scsi_cmd->result |= (DID_PASSTHROUGH << 16);
break;
case PMCRAID_IOASC_UA_BUS_WAS_RESET:
case PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER:
if (!res->reset_progress)
scsi_report_bus_reset(pinstance->host,
scsi_cmd->device->channel);
scsi_cmd->result |= (DID_ERROR << 16);
break;
case PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR:
scsi_cmd->result |= PMCRAID_IOASC_SENSE_STATUS(ioasc);
res->sync_reqd = 1;
/* if check_condition is not active return with error otherwise
* get/frame the sense buffer
*/
if (PMCRAID_IOASC_SENSE_STATUS(ioasc) !=
SAM_STAT_CHECK_CONDITION &&
PMCRAID_IOASC_SENSE_STATUS(ioasc) != SAM_STAT_ACA_ACTIVE)
return 0;
/* If we have auto sense data as part of IOASA pass it to
* mid-layer
*/
if (ioasa->auto_sense_length != 0) {
short sense_len = ioasa->auto_sense_length;
int data_size = min_t(u16, le16_to_cpu(sense_len),
SCSI_SENSE_BUFFERSIZE);
memcpy(scsi_cmd->sense_buffer,
ioasa->sense_data,
data_size);
sense_copied = 1;
}
if (RES_IS_GSCSI(res->cfg_entry))
pmcraid_cancel_all(cmd, sense_copied);
else if (sense_copied)
pmcraid_erp_done(cmd);
else
pmcraid_request_sense(cmd);
return 1;
case PMCRAID_IOASC_NR_INIT_CMD_REQUIRED:
break;
default:
if (PMCRAID_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
scsi_cmd->result |= (DID_ERROR << 16);
break;
}
return 0;
}
/**
* pmcraid_reset_device - device reset handler functions
*
* @scsi_cmd: scsi command struct
* @modifier: reset modifier indicating the reset sequence to be performed
*
* This function issues a device reset to the affected device.
* A LUN reset will be sent to the device first. If that does
* not work, a target reset will be sent.
*
* Return value:
* SUCCESS / FAILED
*/
static int pmcraid_reset_device(
struct scsi_cmnd *scsi_cmd,
unsigned long timeout,
u8 modifier
)
{
struct pmcraid_cmd *cmd;
struct pmcraid_instance *pinstance;
struct pmcraid_resource_entry *res;
struct pmcraid_ioarcb *ioarcb;
unsigned long lock_flags;
u32 ioasc;
pinstance =
(struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
res = scsi_cmd->device->hostdata;
if (!res) {
sdev_printk(KERN_ERR, scsi_cmd->device,
"reset_device: NULL resource pointer\n");
return FAILED;
}
/* If adapter is currently going through reset/reload, return failed.
* This will force the mid-layer to call _eh_bus/host reset, which
* will then go to sleep and wait for the reset to complete
*/
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
if (pinstance->ioa_reset_in_progress ||
pinstance->ioa_state == IOA_STATE_DEAD) {
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
return FAILED;
}
res->reset_progress = 1;
pmcraid_info("Resetting %s resource with addr %x\n",
((modifier & RESET_DEVICE_LUN) ? "LUN" :
((modifier & RESET_DEVICE_TARGET) ? "TARGET" : "BUS")),
le32_to_cpu(res->cfg_entry.resource_address));
/* get a free cmd block */
cmd = pmcraid_get_free_cmd(pinstance);
if (cmd == NULL) {
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
pmcraid_err("%s: no cmd blocks are available\n", __func__);
return FAILED;
}
ioarcb = &cmd->ioa_cb->ioarcb;
ioarcb->resource_handle = res->cfg_entry.resource_handle;
ioarcb->request_type = REQ_TYPE_IOACMD;
ioarcb->cdb[0] = PMCRAID_RESET_DEVICE;
/* Initialize reset modifier bits */
if (modifier)
modifier = ENABLE_RESET_MODIFIER | modifier;
ioarcb->cdb[1] = modifier;
init_completion(&cmd->wait_for_completion);
cmd->completion_req = 1;
pmcraid_info("cmd(CDB[0] = %x) for %x with index = %d\n",
cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle),
le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2);
pmcraid_send_cmd(cmd,
pmcraid_internal_done,
timeout,
pmcraid_timeout_handler);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
/* RESET_DEVICE command completes after all pending IOARCBs are
* completed. Once this command is completed, pmcraind_internal_done
* will wake up the 'completion' queue.
*/
wait_for_completion(&cmd->wait_for_completion);
/* complete the command here itself and return the command block
* to free list
*/
pmcraid_return_cmd(cmd);
res->reset_progress = 0;
ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
/* set the return value based on the returned ioasc */
return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
}
/**
* _pmcraid_io_done - helper for pmcraid_io_done function
*
* @cmd: pointer to pmcraid command struct
* @reslen: residual data length to be set in the ioasa
* @ioasc: ioasc either returned by IOA or set by driver itself.
*
* This function is invoked by pmcraid_io_done to complete mid-layer
* scsi ops.
*
* Return value:
* 0 if caller is required to return it to free_pool. Returns 1 if
* caller need not worry about freeing command block as error handler
* will take care of that.
*/
static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc)
{
struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
int rc = 0;
scsi_set_resid(scsi_cmd, reslen);
pmcraid_info("response(%d) CDB[0] = %x ioasc:result: %x:%x\n",
le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
cmd->ioa_cb->ioarcb.cdb[0],
ioasc, scsi_cmd->result);
if (PMCRAID_IOASC_SENSE_KEY(ioasc) != 0)
rc = pmcraid_error_handler(cmd);
if (rc == 0) {
scsi_dma_unmap(scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
}
return rc;
}
/**
* pmcraid_io_done - SCSI completion function
*
* @cmd: pointer to pmcraid command struct
*
* This function is invoked by tasklet/mid-layer error handler to completing
* the SCSI ops sent from mid-layer.
*
* Return value
* none
*/
static void pmcraid_io_done(struct pmcraid_cmd *cmd)
{
u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
u32 reslen = le32_to_cpu(cmd->ioa_cb->ioasa.residual_data_length);
if (_pmcraid_io_done(cmd, reslen, ioasc) == 0)
pmcraid_return_cmd(cmd);
}
/**
* pmcraid_abort_cmd - Aborts a single IOARCB already submitted to IOA
*
* @cmd: command block of the command to be aborted
*
* Return Value:
* returns pointer to command structure used as cancelling cmd
*/
static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
{
struct pmcraid_cmd *cancel_cmd;
struct pmcraid_instance *pinstance;
struct pmcraid_resource_entry *res;
pinstance = (struct pmcraid_instance *)cmd->drv_inst;
res = cmd->scsi_cmd->device->hostdata;
cancel_cmd = pmcraid_get_free_cmd(pinstance);
if (cancel_cmd == NULL) {
pmcraid_err("%s: no cmd blocks are available\n", __func__);
return NULL;
}
pmcraid_prepare_cancel_cmd(cancel_cmd, cmd);
pmcraid_info("aborting command CDB[0]= %x with index = %d\n",
cmd->ioa_cb->ioarcb.cdb[0],
cmd->ioa_cb->ioarcb.response_handle >> 2);
init_completion(&cancel_cmd->wait_for_completion);
cancel_cmd->completion_req = 1;
pmcraid_info("command (%d) CDB[0] = %x for %x\n",
le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2,
cancel_cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle));
pmcraid_send_cmd(cancel_cmd,
pmcraid_internal_done,
PMCRAID_INTERNAL_TIMEOUT,
pmcraid_timeout_handler);
return cancel_cmd;
}
/**
* pmcraid_abort_complete - Waits for ABORT TASK completion
*
* @cancel_cmd: command block use as cancelling command
*
* Return Value:
* returns SUCCESS if ABORT TASK has good completion
* otherwise FAILED
*/
static int pmcraid_abort_complete(struct pmcraid_cmd *cancel_cmd)
{
struct pmcraid_resource_entry *res;
u32 ioasc;
wait_for_completion(&cancel_cmd->wait_for_completion);
res = cancel_cmd->res;
cancel_cmd->res = NULL;
ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
/* If the abort task is not timed out we will get a Good completion
* as sense_key, otherwise we may get one the following responses
* due to subsequent bus reset or device reset. In case IOASC is
* NR_SYNC_REQUIRED, set sync_reqd flag for the corresponding resource
*/
if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) {
if (ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED)
res->sync_reqd = 1;
ioasc = 0;
}
/* complete the command here itself */
pmcraid_return_cmd(cancel_cmd);
return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
}
/**
* pmcraid_eh_abort_handler - entry point for aborting a single task on errors
*
* @scsi_cmd: scsi command struct given by mid-layer. When this is called
* mid-layer ensures that no other commands are queued. This
* never gets called under interrupt, but a separate eh thread.
*
* Return value:
* SUCCESS / FAILED
*/
static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
{
struct pmcraid_instance *pinstance;
struct pmcraid_cmd *cmd;
struct pmcraid_resource_entry *res;
unsigned long host_lock_flags;
unsigned long pending_lock_flags;
struct pmcraid_cmd *cancel_cmd = NULL;
int cmd_found = 0;
int rc = FAILED;
pinstance =
(struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
scmd_printk(KERN_INFO, scsi_cmd,
"I/O command timed out, aborting it.\n");
res = scsi_cmd->device->hostdata;
if (res == NULL)
return rc;
/* If we are currently going through reset/reload, return failed.
* This will force the mid-layer to eventually call
* pmcraid_eh_host_reset which will then go to sleep and wait for the
* reset to complete
*/
spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags);
if (pinstance->ioa_reset_in_progress ||
pinstance->ioa_state == IOA_STATE_DEAD) {
spin_unlock_irqrestore(pinstance->host->host_lock,
host_lock_flags);
return rc;
}
/* loop over pending cmd list to find cmd corresponding to this
* scsi_cmd. Note that this command might not have been completed
* already. locking: all pending commands are protected with
* pending_pool_lock.
*/
spin_lock_irqsave(&pinstance->pending_pool_lock, pending_lock_flags);
list_for_each_entry(cmd, &pinstance->pending_cmd_pool, free_list) {
if (cmd->scsi_cmd == scsi_cmd) {
cmd_found = 1;
break;
}
}
spin_unlock_irqrestore(&pinstance->pending_pool_lock,
pending_lock_flags);
/* If the command to be aborted was given to IOA and still pending with
* it, send ABORT_TASK to abort this and wait for its completion
*/
if (cmd_found)
cancel_cmd = pmcraid_abort_cmd(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock,
host_lock_flags);
if (cancel_cmd) {
cancel_cmd->res = cmd->scsi_cmd->device->hostdata;
rc = pmcraid_abort_complete(cancel_cmd);
}
return cmd_found ? rc : SUCCESS;
}
/**
* pmcraid_eh_xxxx_reset_handler - bus/target/device reset handler callbacks
*
* @scmd: pointer to scsi_cmd that was sent to the resource to be reset.
*
* All these routines invokve pmcraid_reset_device with appropriate parameters.
* Since these are called from mid-layer EH thread, no other IO will be queued
* to the resource being reset. However, control path (IOCTL) may be active so
* it is necessary to synchronize IOARRIN writes which pmcraid_reset_device
* takes care by locking/unlocking host_lock.
*
* Return value
* SUCCESS or FAILED
*/
static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
{
scmd_printk(KERN_INFO, scmd,
"resetting device due to an I/O command timeout.\n");
return pmcraid_reset_device(scmd,
PMCRAID_INTERNAL_TIMEOUT,
RESET_DEVICE_LUN);
}
static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd)
{
scmd_printk(KERN_INFO, scmd,
"Doing bus reset due to an I/O command timeout.\n");
return pmcraid_reset_device(scmd,
PMCRAID_RESET_BUS_TIMEOUT,
RESET_DEVICE_BUS);
}
static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
{
scmd_printk(KERN_INFO, scmd,
"Doing target reset due to an I/O command timeout.\n");
return pmcraid_reset_device(scmd,
PMCRAID_INTERNAL_TIMEOUT,
RESET_DEVICE_TARGET);
}
/**
* pmcraid_eh_host_reset_handler - adapter reset handler callback
*
* @scmd: pointer to scsi_cmd that was sent to a resource of adapter
*
* Initiates adapter reset to bring it up to operational state
*
* Return value
* SUCCESS or FAILED
*/
static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
{
unsigned long interval = 10000; /* 10 seconds interval */
int waits = jiffies_to_msecs(PMCRAID_RESET_HOST_TIMEOUT) / interval;
struct pmcraid_instance *pinstance =
(struct pmcraid_instance *)(scmd->device->host->hostdata);
/* wait for an additional 150 seconds just in case firmware could come
* up and if it could complete all the pending commands excluding the
* two HCAM (CCN and LDN).
*/
while (waits--) {
if (atomic_read(&pinstance->outstanding_cmds) <=
PMCRAID_MAX_HCAM_CMD)
return SUCCESS;
msleep(interval);
}
dev_err(&pinstance->pdev->dev,
"Adapter being reset due to an I/O command timeout.\n");
return pmcraid_reset_bringup(pinstance) == 0 ? SUCCESS : FAILED;
}
/**
* pmcraid_task_attributes - Translate SPI Q-Tags to task attributes
* @scsi_cmd: scsi command struct
*
* Return value
* number of tags or 0 if the task is not tagged
*/
static u8 pmcraid_task_attributes(struct scsi_cmnd *scsi_cmd)
{
char tag[2];
u8 rc = 0;
if (scsi_populate_tag_msg(scsi_cmd, tag)) {
switch (tag[0]) {
case MSG_SIMPLE_TAG:
rc = TASK_TAG_SIMPLE;
break;
case MSG_HEAD_TAG:
rc = TASK_TAG_QUEUE_HEAD;
break;
case MSG_ORDERED_TAG:
rc = TASK_TAG_ORDERED;
break;
};
}
return rc;
}
/**
* pmcraid_init_ioadls - initializes IOADL related fields in IOARCB
* @cmd: pmcraid command struct
* @sgcount: count of scatter-gather elements
*
* Return value
* returns pointer pmcraid_ioadl_desc, initialized to point to internal
* or external IOADLs
*/
struct pmcraid_ioadl_desc *
pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
{
struct pmcraid_ioadl_desc *ioadl;
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
int ioadl_count = 0;
if (ioarcb->add_cmd_param_length)
ioadl_count = DIV_ROUND_UP(ioarcb->add_cmd_param_length, 16);
ioarcb->ioadl_length =
sizeof(struct pmcraid_ioadl_desc) * sgcount;
if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) {
/* external ioadls start at offset 0x80 from control_block
* structure, re-using 24 out of 27 ioadls part of IOARCB.
* It is necessary to indicate to firmware that driver is
* using ioadls to be treated as external to IOARCB.
*/
ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
ioarcb->ioadl_bus_addr =
cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[3]));
ioadl = &ioarcb->add_data.u.ioadl[3];
} else {
ioarcb->ioadl_bus_addr =
cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[ioadl_count]));
ioadl = &ioarcb->add_data.u.ioadl[ioadl_count];
ioarcb->ioarcb_bus_addr |=
DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8);
}
return ioadl;
}
/**
* pmcraid_build_ioadl - Build a scatter/gather list and map the buffer
* @pinstance: pointer to adapter instance structure
* @cmd: pmcraid command struct
*
* This function is invoked by queuecommand entry point while sending a command
* to firmware. This builds ioadl descriptors and sets up ioarcb fields.
*
* Return value:
* 0 on success or -1 on failure
*/
static int pmcraid_build_ioadl(
struct pmcraid_instance *pinstance,
struct pmcraid_cmd *cmd
)
{
int i, nseg;
struct scatterlist *sglist;
struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
u32 length = scsi_bufflen(scsi_cmd);
if (!length)
return 0;
nseg = scsi_dma_map(scsi_cmd);
if (nseg < 0) {
scmd_printk(KERN_ERR, scsi_cmd, "scsi_map_dma failed!\n");
return -1;
} else if (nseg > PMCRAID_MAX_IOADLS) {
scsi_dma_unmap(scsi_cmd);
scmd_printk(KERN_ERR, scsi_cmd,
"sg count is (%d) more than allowed!\n", nseg);
return -1;
}
/* Initialize IOARCB data transfer length fields */
if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE)
ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioarcb->data_transfer_length = cpu_to_le32(length);
ioadl = pmcraid_init_ioadls(cmd, nseg);
/* Initialize IOADL descriptor addresses */
scsi_for_each_sg(scsi_cmd, sglist, nseg, i) {
ioadl[i].data_len = cpu_to_le32(sg_dma_len(sglist));
ioadl[i].address = cpu_to_le64(sg_dma_address(sglist));
ioadl[i].flags = 0;
}
/* setup last descriptor */
ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
return 0;
}
/**
* pmcraid_free_sglist - Frees an allocated SG buffer list
* @sglist: scatter/gather list pointer
*
* Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
*
* Return value:
* none
*/
static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
{
int i;
for (i = 0; i < sglist->num_sg; i++)
__free_pages(sg_page(&(sglist->scatterlist[i])),
sglist->order);
kfree(sglist);
}
/**
* pmcraid_alloc_sglist - Allocates memory for a SG list
* @buflen: buffer length
*
* Allocates a DMA'able buffer in chunks and assembles a scatter/gather
* list.
*
* Return value
* pointer to sglist / NULL on failure
*/
static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
{
struct pmcraid_sglist *sglist;
struct scatterlist *scatterlist;
struct page *page;
int num_elem, i, j;
int sg_size;
int order;
int bsize_elem;
sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
order = (sg_size > 0) ? get_order(sg_size) : 0;
bsize_elem = PAGE_SIZE * (1 << order);
/* Determine the actual number of sg entries needed */
if (buflen % bsize_elem)
num_elem = (buflen / bsize_elem) + 1;
else
num_elem = buflen / bsize_elem;
/* Allocate a scatter/gather list for the DMA */
sglist = kzalloc(sizeof(struct pmcraid_sglist) +
(sizeof(struct scatterlist) * (num_elem - 1)),
GFP_KERNEL);
if (sglist == NULL)
return NULL;
scatterlist = sglist->scatterlist;
sg_init_table(scatterlist, num_elem);
sglist->order = order;
sglist->num_sg = num_elem;
sg_size = buflen;
for (i = 0; i < num_elem; i++) {
page = alloc_pages(GFP_KERNEL|GFP_DMA|__GFP_ZERO, order);
if (!page) {
for (j = i - 1; j >= 0; j--)
__free_pages(sg_page(&scatterlist[j]), order);
kfree(sglist);
return NULL;
}
sg_set_page(&scatterlist[i], page,
sg_size < bsize_elem ? sg_size : bsize_elem, 0);
sg_size -= bsize_elem;
}
return sglist;
}
/**
* pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
* @sglist: scatter/gather list pointer
* @buffer: buffer pointer
* @len: buffer length
* @direction: data transfer direction
*
* Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
*
* Return value:
* 0 on success / other on failure
*/
static int pmcraid_copy_sglist(
struct pmcraid_sglist *sglist,
unsigned long buffer,
u32 len,
int direction
)
{
struct scatterlist *scatterlist;
void *kaddr;
int bsize_elem;
int i;
int rc = 0;
/* Determine the actual number of bytes per element */
bsize_elem = PAGE_SIZE * (1 << sglist->order);
scatterlist = sglist->scatterlist;
for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
struct page *page = sg_page(&scatterlist[i]);
kaddr = kmap(page);
if (direction == DMA_TO_DEVICE)
rc = __copy_from_user(kaddr,
(void *)buffer,
bsize_elem);
else
rc = __copy_to_user((void *)buffer, kaddr, bsize_elem);
kunmap(page);
if (rc) {
pmcraid_err("failed to copy user data into sg list\n");
return -EFAULT;
}
scatterlist[i].length = bsize_elem;
}
if (len % bsize_elem) {
struct page *page = sg_page(&scatterlist[i]);
kaddr = kmap(page);
if (direction == DMA_TO_DEVICE)
rc = __copy_from_user(kaddr,
(void *)buffer,
len % bsize_elem);
else
rc = __copy_to_user((void *)buffer,
kaddr,
len % bsize_elem);
kunmap(page);
scatterlist[i].length = len % bsize_elem;
}
if (rc) {
pmcraid_err("failed to copy user data into sg list\n");
rc = -EFAULT;
}
return rc;
}
/**
* pmcraid_queuecommand - Queue a mid-layer request
* @scsi_cmd: scsi command struct
* @done: done function
*
* This function queues a request generated by the mid-layer. Midlayer calls
* this routine within host->lock. Some of the functions called by queuecommand
* would use cmd block queue locks (free_pool_lock and pending_pool_lock)
*
* Return value:
* 0 on success
* SCSI_MLQUEUE_DEVICE_BUSY if device is busy
* SCSI_MLQUEUE_HOST_BUSY if host is busy
*/
static int pmcraid_queuecommand_lck(
struct scsi_cmnd *scsi_cmd,
void (*done) (struct scsi_cmnd *)
)
{
struct pmcraid_instance *pinstance;
struct pmcraid_resource_entry *res;
struct pmcraid_ioarcb *ioarcb;
struct pmcraid_cmd *cmd;
u32 fw_version;
int rc = 0;
pinstance =
(struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
scsi_cmd->scsi_done = done;
res = scsi_cmd->device->hostdata;
scsi_cmd->result = (DID_OK << 16);
/* if adapter is marked as dead, set result to DID_NO_CONNECT complete
* the command
*/
if (pinstance->ioa_state == IOA_STATE_DEAD) {
pmcraid_info("IOA is dead, but queuecommand is scheduled\n");
scsi_cmd->result = (DID_NO_CONNECT << 16);
scsi_cmd->scsi_done(scsi_cmd);
return 0;
}
/* If IOA reset is in progress, can't queue the commands */
if (pinstance->ioa_reset_in_progress)
return SCSI_MLQUEUE_HOST_BUSY;
/* Firmware doesn't support SYNCHRONIZE_CACHE command (0x35), complete
* the command here itself with success return
*/
if (scsi_cmd->cmnd[0] == SYNCHRONIZE_CACHE) {
pmcraid_info("SYNC_CACHE(0x35), completing in driver itself\n");
scsi_cmd->scsi_done(scsi_cmd);
return 0;
}
/* initialize the command and IOARCB to be sent to IOA */
cmd = pmcraid_get_free_cmd(pinstance);
if (cmd == NULL) {
pmcraid_err("free command block is not available\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
cmd->scsi_cmd = scsi_cmd;
ioarcb = &(cmd->ioa_cb->ioarcb);
memcpy(ioarcb->cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
ioarcb->resource_handle = res->cfg_entry.resource_handle;
ioarcb->request_type = REQ_TYPE_SCSI;
/* set hrrq number where the IOA should respond to. Note that all cmds
* generated internally uses hrrq_id 0, exception to this is the cmd
* block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
* hrrq_id assigned here in queuecommand
*/
ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
pinstance->num_hrrq;
cmd->cmd_done = pmcraid_io_done;
if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) {
if (scsi_cmd->underflow == 0)
ioarcb->request_flags0 |= INHIBIT_UL_CHECK;
if (res->sync_reqd) {
ioarcb->request_flags0 |= SYNC_COMPLETE;
res->sync_reqd = 0;
}
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioarcb->request_flags1 |= pmcraid_task_attributes(scsi_cmd);
if (RES_IS_GSCSI(res->cfg_entry))
ioarcb->request_flags1 |= DELAY_AFTER_RESET;
}
rc = pmcraid_build_ioadl(pinstance, cmd);
pmcraid_info("command (%d) CDB[0] = %x for %x:%x:%x:%x\n",
le32_to_cpu(ioarcb->response_handle) >> 2,
scsi_cmd->cmnd[0], pinstance->host->unique_id,
RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID :
PMCRAID_PHYS_BUS_ID,
RES_IS_VSET(res->cfg_entry) ?
(fw_version <= PMCRAID_FW_VERSION_1 ?
res->cfg_entry.unique_flags1 :
res->cfg_entry.array_id & 0xFF) :
RES_TARGET(res->cfg_entry.resource_address),
RES_LUN(res->cfg_entry.resource_address));
if (likely(rc == 0)) {
_pmcraid_fire_command(cmd);
} else {
pmcraid_err("queuecommand could not build ioadl\n");
pmcraid_return_cmd(cmd);
rc = SCSI_MLQUEUE_HOST_BUSY;
}
return rc;
}
static DEF_SCSI_QCMD(pmcraid_queuecommand)
/**
* pmcraid_open -char node "open" entry, allowed only users with admin access
*/
static int pmcraid_chr_open(struct inode *inode, struct file *filep)
{
struct pmcraid_instance *pinstance;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
/* Populate adapter instance * pointer for use by ioctl */
pinstance = container_of(inode->i_cdev, struct pmcraid_instance, cdev);
filep->private_data = pinstance;
return 0;
}
/**
* pmcraid_release - char node "release" entry point
*/
static int pmcraid_chr_release(struct inode *inode, struct file *filep)
{
struct pmcraid_instance *pinstance = filep->private_data;
filep->private_data = NULL;
fasync_helper(-1, filep, 0, &pinstance->aen_queue);
return 0;
}
/**
* pmcraid_fasync - Async notifier registration from applications
*
* This function adds the calling process to a driver global queue. When an
* event occurs, SIGIO will be sent to all processes in this queue.
*/
static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
{
struct pmcraid_instance *pinstance;
int rc;
pinstance = filep->private_data;
mutex_lock(&pinstance->aen_queue_lock);
rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue);
mutex_unlock(&pinstance->aen_queue_lock);
return rc;
}
/**
* pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
* commands sent over IOCTL interface
*
* @cmd : pointer to struct pmcraid_cmd
* @buflen : length of the request buffer
* @direction : data transfer direction
*
* Return value
* 0 on success, non-zero error code on failure
*/
static int pmcraid_build_passthrough_ioadls(
struct pmcraid_cmd *cmd,
int buflen,
int direction
)
{
struct pmcraid_sglist *sglist = NULL;
struct scatterlist *sg = NULL;
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_ioadl_desc *ioadl;
int i;
sglist = pmcraid_alloc_sglist(buflen);
if (!sglist) {
pmcraid_err("can't allocate memory for passthrough SGls\n");
return -ENOMEM;
}
sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev,
sglist->scatterlist,
sglist->num_sg, direction);
if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
dev_err(&cmd->drv_inst->pdev->dev,
"Failed to map passthrough buffer!\n");
pmcraid_free_sglist(sglist);
return -EIO;
}
cmd->sglist = sglist;
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
/* Initialize IOADL descriptor addresses */
for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
ioadl[i].flags = 0;
}
/* setup the last descriptor */
ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
return 0;
}
/**
* pmcraid_release_passthrough_ioadls - release passthrough ioadls
*
* @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
* @buflen: size of the request buffer
* @direction: data transfer direction
*
* Return value
* 0 on success, non-zero error code on failure
*/
static void pmcraid_release_passthrough_ioadls(
struct pmcraid_cmd *cmd,
int buflen,
int direction
)
{
struct pmcraid_sglist *sglist = cmd->sglist;
if (buflen > 0) {
pci_unmap_sg(cmd->drv_inst->pdev,
sglist->scatterlist,
sglist->num_sg,
direction);
pmcraid_free_sglist(sglist);
cmd->sglist = NULL;
}
}
/**
* pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
*
* @pinstance: pointer to adapter instance structure
* @cmd: ioctl code
* @arg: pointer to pmcraid_passthrough_buffer user buffer
*
* Return value
* 0 on success, non-zero error code on failure
*/
static long pmcraid_ioctl_passthrough(
struct pmcraid_instance *pinstance,
unsigned int ioctl_cmd,
unsigned int buflen,
unsigned long arg
)
{
struct pmcraid_passthrough_ioctl_buffer *buffer;
struct pmcraid_ioarcb *ioarcb;
struct pmcraid_cmd *cmd;
struct pmcraid_cmd *cancel_cmd;
unsigned long request_buffer;
unsigned long request_offset;
unsigned long lock_flags;
void *ioasa;
u32 ioasc;
int request_size;
int buffer_size;
u8 access, direction;
int rc = 0;
/* If IOA reset is in progress, wait 10 secs for reset to complete */
if (pinstance->ioa_reset_in_progress) {
rc = wait_event_interruptible_timeout(
pinstance->reset_wait_q,
!pinstance->ioa_reset_in_progress,
msecs_to_jiffies(10000));
if (!rc)
return -ETIMEDOUT;
else if (rc < 0)
return -ERESTARTSYS;
}
/* If adapter is not in operational state, return error */
if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
pmcraid_err("IOA is not operational\n");
return -ENOTTY;
}
buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!buffer) {
pmcraid_err("no memory for passthrough buffer\n");
return -ENOMEM;
}
request_offset =
offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
request_buffer = arg + request_offset;
rc = __copy_from_user(buffer,
(struct pmcraid_passthrough_ioctl_buffer *) arg,
sizeof(struct pmcraid_passthrough_ioctl_buffer));
ioasa =
(void *)(arg +
offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
if (rc) {
pmcraid_err("ioctl: can't copy passthrough buffer\n");
rc = -EFAULT;
goto out_free_buffer;
}
request_size = buffer->ioarcb.data_transfer_length;
if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
access = VERIFY_READ;
direction = DMA_TO_DEVICE;
} else {
access = VERIFY_WRITE;
direction = DMA_FROM_DEVICE;
}
if (request_size > 0) {
rc = access_ok(access, arg, request_offset + request_size);
if (!rc) {
rc = -EFAULT;
goto out_free_buffer;
}
} else if (request_size < 0) {
rc = -EINVAL;
goto out_free_buffer;
}
/* check if we have any additional command parameters */
if (buffer->ioarcb.add_cmd_param_length > PMCRAID_ADD_CMD_PARAM_LEN) {
rc = -EINVAL;
goto out_free_buffer;
}
cmd = pmcraid_get_free_cmd(pinstance);
if (!cmd) {
pmcraid_err("free command block is not available\n");
rc = -ENOMEM;
goto out_free_buffer;
}
cmd->scsi_cmd = NULL;
ioarcb = &(cmd->ioa_cb->ioarcb);
/* Copy the user-provided IOARCB stuff field by field */
ioarcb->resource_handle = buffer->ioarcb.resource_handle;
ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
ioarcb->request_type = buffer->ioarcb.request_type;
ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
if (buffer->ioarcb.add_cmd_param_length) {
ioarcb->add_cmd_param_length =
buffer->ioarcb.add_cmd_param_length;
ioarcb->add_cmd_param_offset =
buffer->ioarcb.add_cmd_param_offset;
memcpy(ioarcb->add_data.u.add_cmd_params,
buffer->ioarcb.add_data.u.add_cmd_params,
buffer->ioarcb.add_cmd_param_length);
}
/* set hrrq number where the IOA should respond to. Note that all cmds
* generated internally uses hrrq_id 0, exception to this is the cmd
* block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
* hrrq_id assigned here in queuecommand
*/
ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
pinstance->num_hrrq;
if (request_size) {
rc = pmcraid_build_passthrough_ioadls(cmd,
request_size,
direction);
if (rc) {
pmcraid_err("couldn't build passthrough ioadls\n");
goto out_free_buffer;
}
}
/* If data is being written into the device, copy the data from user
* buffers
*/
if (direction == DMA_TO_DEVICE && request_size > 0) {
rc = pmcraid_copy_sglist(cmd->sglist,
request_buffer,
request_size,
direction);
if (rc) {
pmcraid_err("failed to copy user buffer\n");
goto out_free_sglist;
}
}
/* passthrough ioctl is a blocking command so, put the user to sleep
* until timeout. Note that a timeout value of 0 means, do timeout.
*/
cmd->cmd_done = pmcraid_internal_done;
init_completion(&cmd->wait_for_completion);
cmd->completion_req = 1;
pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
_pmcraid_fire_command(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
/* NOTE ! Remove the below line once abort_task is implemented
* in firmware. This line disables ioctl command timeout handling logic
* similar to IO command timeout handling, making ioctl commands to wait
* until the command completion regardless of timeout value specified in
* ioarcb
*/
buffer->ioarcb.cmd_timeout = 0;
/* If command timeout is specified put caller to wait till that time,
* otherwise it would be blocking wait. If command gets timed out, it
* will be aborted.
*/
if (buffer->ioarcb.cmd_timeout == 0) {
wait_for_completion(&cmd->wait_for_completion);
} else if (!wait_for_completion_timeout(
&cmd->wait_for_completion,
msecs_to_jiffies(buffer->ioarcb.cmd_timeout * 1000))) {
pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle >> 2),
cmd->ioa_cb->ioarcb.cdb[0]);
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
cancel_cmd = pmcraid_abort_cmd(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
if (cancel_cmd) {
wait_for_completion(&cancel_cmd->wait_for_completion);
ioasc = cancel_cmd->ioa_cb->ioasa.ioasc;
pmcraid_return_cmd(cancel_cmd);
/* if abort task couldn't find the command i.e it got
* completed prior to aborting, return good completion.
* if command got aborted successfully or there was IOA
* reset due to abort task itself getting timedout then
* return -ETIMEDOUT
*/
if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
rc = -ETIMEDOUT;
goto out_handle_response;
}
}
/* no command block for abort task or abort task failed to abort
* the IOARCB, then wait for 150 more seconds and initiate reset
* sequence after timeout
*/
if (!wait_for_completion_timeout(
&cmd->wait_for_completion,
msecs_to_jiffies(150 * 1000))) {
pmcraid_reset_bringup(cmd->drv_inst);
rc = -ETIMEDOUT;
}
}
out_handle_response:
/* copy entire IOASA buffer and return IOCTL success.
* If copying IOASA to user-buffer fails, return
* EFAULT
*/
if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
sizeof(struct pmcraid_ioasa))) {
pmcraid_err("failed to copy ioasa buffer to user\n");
rc = -EFAULT;
}
/* If the data transfer was from device, copy the data onto user
* buffers
*/
else if (direction == DMA_FROM_DEVICE && request_size > 0) {
rc = pmcraid_copy_sglist(cmd->sglist,
request_buffer,
request_size,
direction);
if (rc) {
pmcraid_err("failed to copy user buffer\n");
rc = -EFAULT;
}
}
out_free_sglist:
pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
pmcraid_return_cmd(cmd);
out_free_buffer:
kfree(buffer);
return rc;
}
/**
* pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
*
* @pinstance: pointer to adapter instance structure
* @cmd: ioctl command passed in
* @buflen: length of user_buffer
* @user_buffer: user buffer pointer
*
* Return Value
* 0 in case of success, otherwise appropriate error code
*/
static long pmcraid_ioctl_driver(
struct pmcraid_instance *pinstance,
unsigned int cmd,
unsigned int buflen,
void __user *user_buffer
)
{
int rc = -ENOSYS;
if (!access_ok(VERIFY_READ, user_buffer, _IOC_SIZE(cmd))) {
pmcraid_err("ioctl_driver: access fault in request buffer\n");
return -EFAULT;
}
switch (cmd) {
case PMCRAID_IOCTL_RESET_ADAPTER:
pmcraid_reset_bringup(pinstance);
rc = 0;
break;
default:
break;
}
return rc;
}
/**
* pmcraid_check_ioctl_buffer - check for proper access to user buffer
*
* @cmd: ioctl command
* @arg: user buffer
* @hdr: pointer to kernel memory for pmcraid_ioctl_header
*
* Return Value
* negetive error code if there are access issues, otherwise zero.
* Upon success, returns ioctl header copied out of user buffer.
*/
static int pmcraid_check_ioctl_buffer(
int cmd,
void __user *arg,
struct pmcraid_ioctl_header *hdr
)
{
int rc = 0;
int access = VERIFY_READ;
if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) {
pmcraid_err("couldn't copy ioctl header from user buffer\n");
return -EFAULT;
}
/* check for valid driver signature */
rc = memcmp(hdr->signature,
PMCRAID_IOCTL_SIGNATURE,
sizeof(hdr->signature));
if (rc) {
pmcraid_err("signature verification failed\n");
return -EINVAL;
}
/* check for appropriate buffer access */
if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ)
access = VERIFY_WRITE;
rc = access_ok(access,
(arg + sizeof(struct pmcraid_ioctl_header)),
hdr->buffer_length);
if (!rc) {
pmcraid_err("access failed for user buffer of size %d\n",
hdr->buffer_length);
return -EFAULT;
}
return 0;
}
/**
* pmcraid_ioctl - char node ioctl entry point
*/
static long pmcraid_chr_ioctl(
struct file *filep,
unsigned int cmd,
unsigned long arg
)
{
struct pmcraid_instance *pinstance = NULL;
struct pmcraid_ioctl_header *hdr = NULL;
int retval = -ENOTTY;
hdr = kmalloc(GFP_KERNEL, sizeof(struct pmcraid_ioctl_header));
if (!hdr) {
pmcraid_err("faile to allocate memory for ioctl header\n");
return -ENOMEM;
}
retval = pmcraid_check_ioctl_buffer(cmd, (void *)arg, hdr);
if (retval) {
pmcraid_info("chr_ioctl: header check failed\n");
kfree(hdr);
return retval;
}
pinstance = filep->private_data;
if (!pinstance) {
pmcraid_info("adapter instance is not found\n");
kfree(hdr);
return -ENOTTY;
}
switch (_IOC_TYPE(cmd)) {
case PMCRAID_PASSTHROUGH_IOCTL:
/* If ioctl code is to download microcode, we need to block
* mid-layer requests.
*/
if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
scsi_block_requests(pinstance->host);
retval = pmcraid_ioctl_passthrough(pinstance,
cmd,
hdr->buffer_length,
arg);
if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
scsi_unblock_requests(pinstance->host);
break;
case PMCRAID_DRIVER_IOCTL:
arg += sizeof(struct pmcraid_ioctl_header);
retval = pmcraid_ioctl_driver(pinstance,
cmd,
hdr->buffer_length,
(void __user *)arg);
break;
default:
retval = -ENOTTY;
break;
}
kfree(hdr);
return retval;
}
/**
* File operations structure for management interface
*/
static const struct file_operations pmcraid_fops = {
.owner = THIS_MODULE,
.open = pmcraid_chr_open,
.release = pmcraid_chr_release,
.fasync = pmcraid_chr_fasync,
.unlocked_ioctl = pmcraid_chr_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = pmcraid_chr_ioctl,
#endif
.llseek = noop_llseek,
};
/**
* pmcraid_show_log_level - Display adapter's error logging level
* @dev: class device struct
* @buf: buffer
*
* Return value:
* number of bytes printed to buffer
*/
static ssize_t pmcraid_show_log_level(
struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct pmcraid_instance *pinstance =
(struct pmcraid_instance *)shost->hostdata;
return snprintf(buf, PAGE_SIZE, "%d\n", pinstance->current_log_level);
}
/**
* pmcraid_store_log_level - Change the adapter's error logging level
* @dev: class device struct
* @buf: buffer
* @count: not used
*
* Return value:
* number of bytes printed to buffer
*/
static ssize_t pmcraid_store_log_level(
struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count
)
{
struct Scsi_Host *shost;
struct pmcraid_instance *pinstance;
unsigned long val;
if (strict_strtoul(buf, 10, &val))
return -EINVAL;
/* log-level should be from 0 to 2 */
if (val > 2)
return -EINVAL;
shost = class_to_shost(dev);
pinstance = (struct pmcraid_instance *)shost->hostdata;
pinstance->current_log_level = val;
return strlen(buf);
}
static struct device_attribute pmcraid_log_level_attr = {
.attr = {
.name = "log_level",
.mode = S_IRUGO | S_IWUSR,
},
.show = pmcraid_show_log_level,
.store = pmcraid_store_log_level,
};
/**
* pmcraid_show_drv_version - Display driver version
* @dev: class device struct
* @buf: buffer
*
* Return value:
* number of bytes printed to buffer
*/
static ssize_t pmcraid_show_drv_version(
struct device *dev,
struct device_attribute *attr,
char *buf
)
{
return snprintf(buf, PAGE_SIZE, "version: %s\n",
PMCRAID_DRIVER_VERSION);
}
static struct device_attribute pmcraid_driver_version_attr = {
.attr = {
.name = "drv_version",
.mode = S_IRUGO,
},
.show = pmcraid_show_drv_version,
};
/**
* pmcraid_show_io_adapter_id - Display driver assigned adapter id
* @dev: class device struct
* @buf: buffer
*
* Return value:
* number of bytes printed to buffer
*/
static ssize_t pmcraid_show_adapter_id(
struct device *dev,
struct device_attribute *attr,
char *buf
)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct pmcraid_instance *pinstance =
(struct pmcraid_instance *)shost->hostdata;
u32 adapter_id = (pinstance->pdev->bus->number << 8) |
pinstance->pdev->devfn;
u32 aen_group = pmcraid_event_family.id;
return snprintf(buf, PAGE_SIZE,
"adapter id: %d\nminor: %d\naen group: %d\n",
adapter_id, MINOR(pinstance->cdev.dev), aen_group);
}
static struct device_attribute pmcraid_adapter_id_attr = {
.attr = {
.name = "adapter_id",
.mode = S_IRUGO | S_IWUSR,
},
.show = pmcraid_show_adapter_id,
};
static struct device_attribute *pmcraid_host_attrs[] = {
&pmcraid_log_level_attr,
&pmcraid_driver_version_attr,
&pmcraid_adapter_id_attr,
NULL,
};
/* host template structure for pmcraid driver */
static struct scsi_host_template pmcraid_host_template = {
.module = THIS_MODULE,
.name = PMCRAID_DRIVER_NAME,
.queuecommand = pmcraid_queuecommand,
.eh_abort_handler = pmcraid_eh_abort_handler,
.eh_bus_reset_handler = pmcraid_eh_bus_reset_handler,
.eh_target_reset_handler = pmcraid_eh_target_reset_handler,
.eh_device_reset_handler = pmcraid_eh_device_reset_handler,
.eh_host_reset_handler = pmcraid_eh_host_reset_handler,
.slave_alloc = pmcraid_slave_alloc,
.slave_configure = pmcraid_slave_configure,
.slave_destroy = pmcraid_slave_destroy,
.change_queue_depth = pmcraid_change_queue_depth,
.change_queue_type = pmcraid_change_queue_type,
.can_queue = PMCRAID_MAX_IO_CMD,
.this_id = -1,
.sg_tablesize = PMCRAID_MAX_IOADLS,
.max_sectors = PMCRAID_IOA_MAX_SECTORS,
.cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = pmcraid_host_attrs,
.proc_name = PMCRAID_DRIVER_NAME
};
/*
* pmcraid_isr_msix - implements MSI-X interrupt handling routine
* @irq: interrupt vector number
* @dev_id: pointer hrrq_vector
*
* Return Value
* IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
*/
static irqreturn_t pmcraid_isr_msix(int irq, void *dev_id)
{
struct pmcraid_isr_param *hrrq_vector;
struct pmcraid_instance *pinstance;
unsigned long lock_flags;
u32 intrs_val;
int hrrq_id;
hrrq_vector = (struct pmcraid_isr_param *)dev_id;
hrrq_id = hrrq_vector->hrrq_id;
pinstance = hrrq_vector->drv_inst;
if (!hrrq_id) {
/* Read the interrupt */
intrs_val = pmcraid_read_interrupts(pinstance);
if (intrs_val &&
((ioread32(pinstance->int_regs.host_ioa_interrupt_reg)
& DOORBELL_INTR_MSIX_CLR) == 0)) {
/* Any error interrupts including unit_check,
* initiate IOA reset.In case of unit check indicate
* to reset_sequence that IOA unit checked and prepare
* for a dump during reset sequence
*/
if (intrs_val & PMCRAID_ERROR_INTERRUPTS) {
if (intrs_val & INTRS_IOA_UNIT_CHECK)
pinstance->ioa_unit_check = 1;
pmcraid_err("ISR: error interrupts: %x \
initiating reset\n", intrs_val);
spin_lock_irqsave(pinstance->host->host_lock,
lock_flags);
pmcraid_initiate_reset(pinstance);
spin_unlock_irqrestore(
pinstance->host->host_lock,
lock_flags);
}
/* If interrupt was as part of the ioa initialization,
* clear it. Delete the timer and wakeup the
* reset engine to proceed with reset sequence
*/
if (intrs_val & INTRS_TRANSITION_TO_OPERATIONAL)
pmcraid_clr_trans_op(pinstance);
/* Clear the interrupt register by writing
* to host to ioa doorbell. Once done
* FW will clear the interrupt.
*/
iowrite32(DOORBELL_INTR_MSIX_CLR,
pinstance->int_regs.host_ioa_interrupt_reg);
ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
}
}
tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id]));
return IRQ_HANDLED;
}
/**
* pmcraid_isr - implements legacy interrupt handling routine
*
* @irq: interrupt vector number
* @dev_id: pointer hrrq_vector
*
* Return Value
* IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
*/
static irqreturn_t pmcraid_isr(int irq, void *dev_id)
{
struct pmcraid_isr_param *hrrq_vector;
struct pmcraid_instance *pinstance;
u32 intrs;
unsigned long lock_flags;
int hrrq_id = 0;
/* In case of legacy interrupt mode where interrupts are shared across
* isrs, it may be possible that the current interrupt is not from IOA
*/
if (!dev_id) {
printk(KERN_INFO "%s(): NULL host pointer\n", __func__);
return IRQ_NONE;
}
hrrq_vector = (struct pmcraid_isr_param *)dev_id;
pinstance = hrrq_vector->drv_inst;
intrs = pmcraid_read_interrupts(pinstance);
if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0))
return IRQ_NONE;
/* Any error interrupts including unit_check, initiate IOA reset.
* In case of unit check indicate to reset_sequence that IOA unit
* checked and prepare for a dump during reset sequence
*/
if (intrs & PMCRAID_ERROR_INTERRUPTS) {
if (intrs & INTRS_IOA_UNIT_CHECK)
pinstance->ioa_unit_check = 1;
iowrite32(intrs,
pinstance->int_regs.ioa_host_interrupt_clr_reg);
pmcraid_err("ISR: error interrupts: %x initiating reset\n",
intrs);
intrs = ioread32(
pinstance->int_regs.ioa_host_interrupt_clr_reg);
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
pmcraid_initiate_reset(pinstance);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
} else {
/* If interrupt was as part of the ioa initialization,
* clear. Delete the timer and wakeup the
* reset engine to proceed with reset sequence
*/
if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
pmcraid_clr_trans_op(pinstance);
} else {
iowrite32(intrs,
pinstance->int_regs.ioa_host_interrupt_clr_reg);
ioread32(
pinstance->int_regs.ioa_host_interrupt_clr_reg);
tasklet_schedule(
&(pinstance->isr_tasklet[hrrq_id]));
}
}
return IRQ_HANDLED;
}
/**
* pmcraid_worker_function - worker thread function
*
* @workp: pointer to struct work queue
*
* Return Value
* None
*/
static void pmcraid_worker_function(struct work_struct *workp)
{
struct pmcraid_instance *pinstance;
struct pmcraid_resource_entry *res;
struct pmcraid_resource_entry *temp;
struct scsi_device *sdev;
unsigned long lock_flags;
unsigned long host_lock_flags;
u16 fw_version;
u8 bus, target, lun;
pinstance = container_of(workp, struct pmcraid_instance, worker_q);
/* add resources only after host is added into system */
if (!atomic_read(&pinstance->expose_resources))
return;
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) {
if (res->change_detected == RES_CHANGE_DEL && res->scsi_dev) {
sdev = res->scsi_dev;
/* host_lock must be held before calling
* scsi_device_get
*/
spin_lock_irqsave(pinstance->host->host_lock,
host_lock_flags);
if (!scsi_device_get(sdev)) {
spin_unlock_irqrestore(
pinstance->host->host_lock,
host_lock_flags);
pmcraid_info("deleting %x from midlayer\n",
res->cfg_entry.resource_address);
list_move_tail(&res->queue,
&pinstance->free_res_q);
spin_unlock_irqrestore(
&pinstance->resource_lock,
lock_flags);
scsi_remove_device(sdev);
scsi_device_put(sdev);
spin_lock_irqsave(&pinstance->resource_lock,
lock_flags);
res->change_detected = 0;
} else {
spin_unlock_irqrestore(
pinstance->host->host_lock,
host_lock_flags);
}
}
}
list_for_each_entry(res, &pinstance->used_res_q, queue) {
if (res->change_detected == RES_CHANGE_ADD) {
if (!pmcraid_expose_resource(fw_version,
&res->cfg_entry))
continue;
if (RES_IS_VSET(res->cfg_entry)) {
bus = PMCRAID_VSET_BUS_ID;
if (fw_version <= PMCRAID_FW_VERSION_1)
target = res->cfg_entry.unique_flags1;
else
target = res->cfg_entry.array_id & 0xFF;
lun = PMCRAID_VSET_LUN_ID;
} else {
bus = PMCRAID_PHYS_BUS_ID;
target =
RES_TARGET(
res->cfg_entry.resource_address);
lun = RES_LUN(res->cfg_entry.resource_address);
}
res->change_detected = 0;
spin_unlock_irqrestore(&pinstance->resource_lock,
lock_flags);
scsi_add_device(pinstance->host, bus, target, lun);
spin_lock_irqsave(&pinstance->resource_lock,
lock_flags);
}
}
spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
}
/**
* pmcraid_tasklet_function - Tasklet function
*
* @instance: pointer to msix param structure
*
* Return Value
* None
*/
static void pmcraid_tasklet_function(unsigned long instance)
{
struct pmcraid_isr_param *hrrq_vector;
struct pmcraid_instance *pinstance;
unsigned long hrrq_lock_flags;
unsigned long pending_lock_flags;
unsigned long host_lock_flags;
spinlock_t *lockp; /* hrrq buffer lock */
int id;
__le32 resp;
hrrq_vector = (struct pmcraid_isr_param *)instance;
pinstance = hrrq_vector->drv_inst;
id = hrrq_vector->hrrq_id;
lockp = &(pinstance->hrrq_lock[id]);
/* loop through each of the commands responded by IOA. Each HRRQ buf is
* protected by its own lock. Traversals must be done within this lock
* as there may be multiple tasklets running on multiple CPUs. Note
* that the lock is held just for picking up the response handle and
* manipulating hrrq_curr/toggle_bit values.
*/
spin_lock_irqsave(lockp, hrrq_lock_flags);
resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
while ((resp & HRRQ_TOGGLE_BIT) ==
pinstance->host_toggle_bit[id]) {
int cmd_index = resp >> 2;
struct pmcraid_cmd *cmd = NULL;
if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) {
pinstance->hrrq_curr[id]++;
} else {
pinstance->hrrq_curr[id] = pinstance->hrrq_start[id];
pinstance->host_toggle_bit[id] ^= 1u;
}
if (cmd_index >= PMCRAID_MAX_CMD) {
/* In case of invalid response handle, log message */
pmcraid_err("Invalid response handle %d\n", cmd_index);
resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
continue;
}
cmd = pinstance->cmd_list[cmd_index];
spin_unlock_irqrestore(lockp, hrrq_lock_flags);
spin_lock_irqsave(&pinstance->pending_pool_lock,
pending_lock_flags);
list_del(&cmd->free_list);
spin_unlock_irqrestore(&pinstance->pending_pool_lock,
pending_lock_flags);
del_timer(&cmd->timer);
atomic_dec(&pinstance->outstanding_cmds);
if (cmd->cmd_done == pmcraid_ioa_reset) {
spin_lock_irqsave(pinstance->host->host_lock,
host_lock_flags);
cmd->cmd_done(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock,
host_lock_flags);
} else if (cmd->cmd_done != NULL) {
cmd->cmd_done(cmd);
}
/* loop over until we are done with all responses */
spin_lock_irqsave(lockp, hrrq_lock_flags);
resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
}
spin_unlock_irqrestore(lockp, hrrq_lock_flags);
}
/**
* pmcraid_unregister_interrupt_handler - de-register interrupts handlers
* @pinstance: pointer to adapter instance structure
*
* This routine un-registers registered interrupt handler and
* also frees irqs/vectors.
*
* Retun Value
* None
*/
static
void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
{
int i;
for (i = 0; i < pinstance->num_hrrq; i++)
free_irq(pinstance->hrrq_vector[i].vector,
&(pinstance->hrrq_vector[i]));
if (pinstance->interrupt_mode) {
pci_disable_msix(pinstance->pdev);
pinstance->interrupt_mode = 0;
}
}
/**
* pmcraid_register_interrupt_handler - registers interrupt handler
* @pinstance: pointer to per-adapter instance structure
*
* Return Value
* 0 on success, non-zero error code otherwise.
*/
static int
pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
{
int rc;
struct pci_dev *pdev = pinstance->pdev;
if ((pmcraid_enable_msix) &&
(pci_find_capability(pdev, PCI_CAP_ID_MSIX))) {
int num_hrrq = PMCRAID_NUM_MSIX_VECTORS;
struct msix_entry entries[PMCRAID_NUM_MSIX_VECTORS];
int i;
for (i = 0; i < PMCRAID_NUM_MSIX_VECTORS; i++)
entries[i].entry = i;
rc = pci_enable_msix(pdev, entries, num_hrrq);
if (rc < 0)
goto pmcraid_isr_legacy;
/* Check how many MSIX vectors are allocated and register
* msi-x handlers for each of them giving appropriate buffer
*/
if (rc > 0) {
num_hrrq = rc;
if (pci_enable_msix(pdev, entries, num_hrrq))
goto pmcraid_isr_legacy;
}
for (i = 0; i < num_hrrq; i++) {
pinstance->hrrq_vector[i].hrrq_id = i;
pinstance->hrrq_vector[i].drv_inst = pinstance;
pinstance->hrrq_vector[i].vector = entries[i].vector;
rc = request_irq(pinstance->hrrq_vector[i].vector,
pmcraid_isr_msix, 0,
PMCRAID_DRIVER_NAME,
&(pinstance->hrrq_vector[i]));
if (rc) {
int j;
for (j = 0; j < i; j++)
free_irq(entries[j].vector,
&(pinstance->hrrq_vector[j]));
pci_disable_msix(pdev);
goto pmcraid_isr_legacy;
}
}
pinstance->num_hrrq = num_hrrq;
pinstance->interrupt_mode = 1;
iowrite32(DOORBELL_INTR_MODE_MSIX,
pinstance->int_regs.host_ioa_interrupt_reg);
ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
goto pmcraid_isr_out;
}
pmcraid_isr_legacy:
/* If MSI-X registration failed fallback to legacy mode, where
* only one hrrq entry will be used
*/
pinstance->hrrq_vector[0].hrrq_id = 0;
pinstance->hrrq_vector[0].drv_inst = pinstance;
pinstance->hrrq_vector[0].vector = pdev->irq;
pinstance->num_hrrq = 1;
rc = 0;
rc = request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
pmcraid_isr_out:
return rc;
}
/**
* pmcraid_release_cmd_blocks - release buufers allocated for command blocks
* @pinstance: per adapter instance structure pointer
* @max_index: number of buffer blocks to release
*
* Return Value
* None
*/
static void
pmcraid_release_cmd_blocks(struct pmcraid_instance *pinstance, int max_index)
{
int i;
for (i = 0; i < max_index; i++) {
kmem_cache_free(pinstance->cmd_cachep, pinstance->cmd_list[i]);
pinstance->cmd_list[i] = NULL;
}
kmem_cache_destroy(pinstance->cmd_cachep);
pinstance->cmd_cachep = NULL;
}
/**
* pmcraid_release_control_blocks - releases buffers alloced for control blocks
* @pinstance: pointer to per adapter instance structure
* @max_index: number of buffers (from 0 onwards) to release
*
* This function assumes that the command blocks for which control blocks are
* linked are not released.
*
* Return Value
* None
*/
static void
pmcraid_release_control_blocks(
struct pmcraid_instance *pinstance,
int max_index
)
{
int i;
if (pinstance->control_pool == NULL)
return;
for (i = 0; i < max_index; i++) {
pci_pool_free(pinstance->control_pool,
pinstance->cmd_list[i]->ioa_cb,
pinstance->cmd_list[i]->ioa_cb_bus_addr);
pinstance->cmd_list[i]->ioa_cb = NULL;
pinstance->cmd_list[i]->ioa_cb_bus_addr = 0;
}
pci_pool_destroy(pinstance->control_pool);
pinstance->control_pool = NULL;
}
/**
* pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures
* @pinstance - pointer to per adapter instance structure
*
* Allocates memory for command blocks using kernel slab allocator.
*
* Return Value
* 0 in case of success; -ENOMEM in case of failure
*/
static int __devinit
pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
{
int i;
sprintf(pinstance->cmd_pool_name, "pmcraid_cmd_pool_%d",
pinstance->host->unique_id);
pinstance->cmd_cachep = kmem_cache_create(
pinstance->cmd_pool_name,
sizeof(struct pmcraid_cmd), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!pinstance->cmd_cachep)
return -ENOMEM;
for (i = 0; i < PMCRAID_MAX_CMD; i++) {
pinstance->cmd_list[i] =
kmem_cache_alloc(pinstance->cmd_cachep, GFP_KERNEL);
if (!pinstance->cmd_list[i]) {
pmcraid_release_cmd_blocks(pinstance, i);
return -ENOMEM;
}
}
return 0;
}
/**
* pmcraid_allocate_control_blocks - allocates memory control blocks
* @pinstance : pointer to per adapter instance structure
*
* This function allocates PCI memory for DMAable buffers like IOARCB, IOADLs
* and IOASAs. This is called after command blocks are already allocated.
*
* Return Value
* 0 in case it can allocate all control blocks, otherwise -ENOMEM
*/
static int __devinit
pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
{
int i;
sprintf(pinstance->ctl_pool_name, "pmcraid_control_pool_%d",
pinstance->host->unique_id);
pinstance->control_pool =
pci_pool_create(pinstance->ctl_pool_name,
pinstance->pdev,
sizeof(struct pmcraid_control_block),
PMCRAID_IOARCB_ALIGNMENT, 0);
if (!pinstance->control_pool)
return -ENOMEM;
for (i = 0; i < PMCRAID_MAX_CMD; i++) {
pinstance->cmd_list[i]->ioa_cb =
pci_pool_alloc(
pinstance->control_pool,
GFP_KERNEL,
&(pinstance->cmd_list[i]->ioa_cb_bus_addr));
if (!pinstance->cmd_list[i]->ioa_cb) {
pmcraid_release_control_blocks(pinstance, i);
return -ENOMEM;
}
memset(pinstance->cmd_list[i]->ioa_cb, 0,
sizeof(struct pmcraid_control_block));
}
return 0;
}
/**
* pmcraid_release_host_rrqs - release memory allocated for hrrq buffer(s)
* @pinstance: pointer to per adapter instance structure
* @maxindex: size of hrrq buffer pointer array
*
* Return Value
* None
*/
static void
pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
{
int i;
for (i = 0; i < maxindex; i++) {
pci_free_consistent(pinstance->pdev,
HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
pinstance->hrrq_start[i],
pinstance->hrrq_start_bus_addr[i]);
/* reset pointers and toggle bit to zeros */
pinstance->hrrq_start[i] = NULL;
pinstance->hrrq_start_bus_addr[i] = 0;
pinstance->host_toggle_bit[i] = 0;
}
}
/**
* pmcraid_allocate_host_rrqs - Allocate and initialize host RRQ buffers
* @pinstance: pointer to per adapter instance structure
*
* Return value
* 0 hrrq buffers are allocated, -ENOMEM otherwise.
*/
static int __devinit
pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
{
int i, buffer_size;
buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
for (i = 0; i < pinstance->num_hrrq; i++) {
pinstance->hrrq_start[i] =
pci_alloc_consistent(
pinstance->pdev,
buffer_size,
&(pinstance->hrrq_start_bus_addr[i]));
if (pinstance->hrrq_start[i] == 0) {
pmcraid_err("pci_alloc failed for hrrq vector : %d\n",
i);
pmcraid_release_host_rrqs(pinstance, i);
return -ENOMEM;
}
memset(pinstance->hrrq_start[i], 0, buffer_size);
pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
pinstance->hrrq_end[i] =
pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
pinstance->host_toggle_bit[i] = 1;
spin_lock_init(&pinstance->hrrq_lock[i]);
}
return 0;
}
/**
* pmcraid_release_hcams - release HCAM buffers
*
* @pinstance: pointer to per adapter instance structure
*
* Return value
* none
*/
static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
{
if (pinstance->ccn.msg != NULL) {
pci_free_consistent(pinstance->pdev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ccn_ext),
pinstance->ccn.msg,
pinstance->ccn.baddr);
pinstance->ccn.msg = NULL;
pinstance->ccn.hcam = NULL;
pinstance->ccn.baddr = 0;
}
if (pinstance->ldn.msg != NULL) {
pci_free_consistent(pinstance->pdev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ldn),
pinstance->ldn.msg,
pinstance->ldn.baddr);
pinstance->ldn.msg = NULL;
pinstance->ldn.hcam = NULL;
pinstance->ldn.baddr = 0;
}
}
/**
* pmcraid_allocate_hcams - allocates HCAM buffers
* @pinstance : pointer to per adapter instance structure
*
* Return Value:
* 0 in case of successful allocation, non-zero otherwise
*/
static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
{
pinstance->ccn.msg = pci_alloc_consistent(
pinstance->pdev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ccn_ext),
&(pinstance->ccn.baddr));
pinstance->ldn.msg = pci_alloc_consistent(
pinstance->pdev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ldn),
&(pinstance->ldn.baddr));
if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
pmcraid_release_hcams(pinstance);
} else {
pinstance->ccn.hcam =
(void *)pinstance->ccn.msg + PMCRAID_AEN_HDR_SIZE;
pinstance->ldn.hcam =
(void *)pinstance->ldn.msg + PMCRAID_AEN_HDR_SIZE;
atomic_set(&pinstance->ccn.ignore, 0);
atomic_set(&pinstance->ldn.ignore, 0);
}
return (pinstance->ldn.msg == NULL) ? -ENOMEM : 0;
}
/**
* pmcraid_release_config_buffers - release config.table buffers
* @pinstance: pointer to per adapter instance structure
*
* Return Value
* none
*/
static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
{
if (pinstance->cfg_table != NULL &&
pinstance->cfg_table_bus_addr != 0) {
pci_free_consistent(pinstance->pdev,
sizeof(struct pmcraid_config_table),
pinstance->cfg_table,
pinstance->cfg_table_bus_addr);
pinstance->cfg_table = NULL;
pinstance->cfg_table_bus_addr = 0;
}
if (pinstance->res_entries != NULL) {
int i;
for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
list_del(&pinstance->res_entries[i].queue);
kfree(pinstance->res_entries);
pinstance->res_entries = NULL;
}
pmcraid_release_hcams(pinstance);
}
/**
* pmcraid_allocate_config_buffers - allocates DMAable memory for config table
* @pinstance : pointer to per adapter instance structure
*
* Return Value
* 0 for successful allocation, -ENOMEM for any failure
*/
static int __devinit
pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
{
int i;
pinstance->res_entries =
kzalloc(sizeof(struct pmcraid_resource_entry) *
PMCRAID_MAX_RESOURCES, GFP_KERNEL);
if (NULL == pinstance->res_entries) {
pmcraid_err("failed to allocate memory for resource table\n");
return -ENOMEM;
}
for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
list_add_tail(&pinstance->res_entries[i].queue,
&pinstance->free_res_q);
pinstance->cfg_table =
pci_alloc_consistent(pinstance->pdev,
sizeof(struct pmcraid_config_table),
&pinstance->cfg_table_bus_addr);
if (NULL == pinstance->cfg_table) {
pmcraid_err("couldn't alloc DMA memory for config table\n");
pmcraid_release_config_buffers(pinstance);
return -ENOMEM;
}
if (pmcraid_allocate_hcams(pinstance)) {
pmcraid_err("could not alloc DMA memory for HCAMS\n");
pmcraid_release_config_buffers(pinstance);
return -ENOMEM;
}
return 0;
}
/**
* pmcraid_init_tasklets - registers tasklets for response handling
*
* @pinstance: pointer adapter instance structure
*
* Return value
* none
*/
static void pmcraid_init_tasklets(struct pmcraid_instance *pinstance)
{
int i;
for (i = 0; i < pinstance->num_hrrq; i++)
tasklet_init(&pinstance->isr_tasklet[i],
pmcraid_tasklet_function,
(unsigned long)&pinstance->hrrq_vector[i]);
}
/**
* pmcraid_kill_tasklets - destroys tasklets registered for response handling
*
* @pinstance: pointer to adapter instance structure
*
* Return value
* none
*/
static void pmcraid_kill_tasklets(struct pmcraid_instance *pinstance)
{
int i;
for (i = 0; i < pinstance->num_hrrq; i++)
tasklet_kill(&pinstance->isr_tasklet[i]);
}
/**
* pmcraid_release_buffers - release per-adapter buffers allocated
*
* @pinstance: pointer to adapter soft state
*
* Return Value
* none
*/
static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
{
pmcraid_release_config_buffers(pinstance);
pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD);
pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
if (pinstance->inq_data != NULL) {
pci_free_consistent(pinstance->pdev,
sizeof(struct pmcraid_inquiry_data),
pinstance->inq_data,
pinstance->inq_data_baddr);
pinstance->inq_data = NULL;
pinstance->inq_data_baddr = 0;
}
if (pinstance->timestamp_data != NULL) {
pci_free_consistent(pinstance->pdev,
sizeof(struct pmcraid_timestamp_data),
pinstance->timestamp_data,
pinstance->timestamp_data_baddr);
pinstance->timestamp_data = NULL;
pinstance->timestamp_data_baddr = 0;
}
}
/**
* pmcraid_init_buffers - allocates memory and initializes various structures
* @pinstance: pointer to per adapter instance structure
*
* This routine pre-allocates memory based on the type of block as below:
* cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
* IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator
* config-table entries : DMAable memory using pci_alloc_consistent
* HostRRQs : DMAable memory, using pci_alloc_consistent
*
* Return Value
* 0 in case all of the blocks are allocated, -ENOMEM otherwise.
*/
static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance)
{
int i;
if (pmcraid_allocate_host_rrqs(pinstance)) {
pmcraid_err("couldn't allocate memory for %d host rrqs\n",
pinstance->num_hrrq);
return -ENOMEM;
}
if (pmcraid_allocate_config_buffers(pinstance)) {
pmcraid_err("couldn't allocate memory for config buffers\n");
pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
return -ENOMEM;
}
if (pmcraid_allocate_cmd_blocks(pinstance)) {
pmcraid_err("couldn't allocate memory for cmd blocks\n");
pmcraid_release_config_buffers(pinstance);
pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
return -ENOMEM;
}
if (pmcraid_allocate_control_blocks(pinstance)) {
pmcraid_err("couldn't allocate memory control blocks\n");
pmcraid_release_config_buffers(pinstance);
pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
return -ENOMEM;
}
/* allocate DMAable memory for page D0 INQUIRY buffer */
pinstance->inq_data = pci_alloc_consistent(
pinstance->pdev,
sizeof(struct pmcraid_inquiry_data),
&pinstance->inq_data_baddr);
if (pinstance->inq_data == NULL) {
pmcraid_err("couldn't allocate DMA memory for INQUIRY\n");
pmcraid_release_buffers(pinstance);
return -ENOMEM;
}
/* allocate DMAable memory for set timestamp data buffer */
pinstance->timestamp_data = pci_alloc_consistent(
pinstance->pdev,
sizeof(struct pmcraid_timestamp_data),
&pinstance->timestamp_data_baddr);
if (pinstance->timestamp_data == NULL) {
pmcraid_err("couldn't allocate DMA memory for \
set time_stamp \n");
pmcraid_release_buffers(pinstance);
return -ENOMEM;
}
/* Initialize all the command blocks and add them to free pool. No
* need to lock (free_pool_lock) as this is done in initialization
* itself
*/
for (i = 0; i < PMCRAID_MAX_CMD; i++) {
struct pmcraid_cmd *cmdp = pinstance->cmd_list[i];
pmcraid_init_cmdblk(cmdp, i);
cmdp->drv_inst = pinstance;
list_add_tail(&cmdp->free_list, &pinstance->free_cmd_pool);
}
return 0;
}
/**
* pmcraid_reinit_buffers - resets various buffer pointers
* @pinstance: pointer to adapter instance
* Return value
* none
*/
static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance)
{
int i;
int buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
for (i = 0; i < pinstance->num_hrrq; i++) {
memset(pinstance->hrrq_start[i], 0, buffer_size);
pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
pinstance->hrrq_end[i] =
pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
pinstance->host_toggle_bit[i] = 1;
}
}
/**
* pmcraid_init_instance - initialize per instance data structure
* @pdev: pointer to pci device structure
* @host: pointer to Scsi_Host structure
* @mapped_pci_addr: memory mapped IOA configuration registers
*
* Return Value
* 0 on success, non-zero in case of any failure
*/
static int __devinit pmcraid_init_instance(
struct pci_dev *pdev,
struct Scsi_Host *host,
void __iomem *mapped_pci_addr
)
{
struct pmcraid_instance *pinstance =
(struct pmcraid_instance *)host->hostdata;
pinstance->host = host;
pinstance->pdev = pdev;
/* Initialize register addresses */
pinstance->mapped_dma_addr = mapped_pci_addr;
/* Initialize chip-specific details */
{
struct pmcraid_chip_details *chip_cfg = pinstance->chip_cfg;
struct pmcraid_interrupts *pint_regs = &pinstance->int_regs;
pinstance->ioarrin = mapped_pci_addr + chip_cfg->ioarrin;
pint_regs->ioa_host_interrupt_reg =
mapped_pci_addr + chip_cfg->ioa_host_intr;
pint_regs->ioa_host_interrupt_clr_reg =
mapped_pci_addr + chip_cfg->ioa_host_intr_clr;
pint_regs->ioa_host_msix_interrupt_reg =
mapped_pci_addr + chip_cfg->ioa_host_msix_intr;
pint_regs->host_ioa_interrupt_reg =
mapped_pci_addr + chip_cfg->host_ioa_intr;
pint_regs->host_ioa_interrupt_clr_reg =
mapped_pci_addr + chip_cfg->host_ioa_intr_clr;
/* Current version of firmware exposes interrupt mask set
* and mask clr registers through memory mapped bar0.
*/
pinstance->mailbox = mapped_pci_addr + chip_cfg->mailbox;
pinstance->ioa_status = mapped_pci_addr + chip_cfg->ioastatus;
pint_regs->ioa_host_interrupt_mask_reg =
mapped_pci_addr + chip_cfg->ioa_host_mask;
pint_regs->ioa_host_interrupt_mask_clr_reg =
mapped_pci_addr + chip_cfg->ioa_host_mask_clr;
pint_regs->global_interrupt_mask_reg =
mapped_pci_addr + chip_cfg->global_intr_mask;
};
pinstance->ioa_reset_attempts = 0;
init_waitqueue_head(&pinstance->reset_wait_q);
atomic_set(&pinstance->outstanding_cmds, 0);
atomic_set(&pinstance->last_message_id, 0);
atomic_set(&pinstance->expose_resources, 0);
INIT_LIST_HEAD(&pinstance->free_res_q);
INIT_LIST_HEAD(&pinstance->used_res_q);
INIT_LIST_HEAD(&pinstance->free_cmd_pool);
INIT_LIST_HEAD(&pinstance->pending_cmd_pool);
spin_lock_init(&pinstance->free_pool_lock);
spin_lock_init(&pinstance->pending_pool_lock);
spin_lock_init(&pinstance->resource_lock);
mutex_init(&pinstance->aen_queue_lock);
/* Work-queue (Shared) for deferred processing error handling */
INIT_WORK(&pinstance->worker_q, pmcraid_worker_function);
/* Initialize the default log_level */
pinstance->current_log_level = pmcraid_log_level;
/* Setup variables required for reset engine */
pinstance->ioa_state = IOA_STATE_UNKNOWN;
pinstance->reset_cmd = NULL;
return 0;
}
/**
* pmcraid_shutdown - shutdown adapter controller.
* @pdev: pci device struct
*
* Issues an adapter shutdown to the card waits for its completion
*
* Return value
* none
*/
static void pmcraid_shutdown(struct pci_dev *pdev)
{
struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
pmcraid_reset_bringdown(pinstance);
}
/**
* pmcraid_get_minor - returns unused minor number from minor number bitmap
*/
static unsigned short pmcraid_get_minor(void)
{
int minor;
minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor));
__set_bit(minor, pmcraid_minor);
return minor;
}
/**
* pmcraid_release_minor - releases given minor back to minor number bitmap
*/
static void pmcraid_release_minor(unsigned short minor)
{
__clear_bit(minor, pmcraid_minor);
}
/**
* pmcraid_setup_chrdev - allocates a minor number and registers a char device
*
* @pinstance: pointer to adapter instance for which to register device
*
* Return value
* 0 in case of success, otherwise non-zero
*/
static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
{
int minor;
int error;
minor = pmcraid_get_minor();
cdev_init(&pinstance->cdev, &pmcraid_fops);
pinstance->cdev.owner = THIS_MODULE;
error = cdev_add(&pinstance->cdev, MKDEV(pmcraid_major, minor), 1);
if (error)
pmcraid_release_minor(minor);
else
device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
NULL, "%s%u", PMCRAID_DEVFILE, minor);
return error;
}
/**
* pmcraid_release_chrdev - unregisters per-adapter management interface
*
* @pinstance: pointer to adapter instance structure
*
* Return value
* none
*/
static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
{
pmcraid_release_minor(MINOR(pinstance->cdev.dev));
device_destroy(pmcraid_class,
MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev)));
cdev_del(&pinstance->cdev);
}
/**
* pmcraid_remove - IOA hot plug remove entry point
* @pdev: pci device struct
*
* Return value
* none
*/
static void __devexit pmcraid_remove(struct pci_dev *pdev)
{
struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
/* remove the management interface (/dev file) for this device */
pmcraid_release_chrdev(pinstance);
/* remove host template from scsi midlayer */
scsi_remove_host(pinstance->host);
/* block requests from mid-layer */
scsi_block_requests(pinstance->host);
/* initiate shutdown adapter */
pmcraid_shutdown(pdev);
pmcraid_disable_interrupts(pinstance, ~0);
flush_work_sync(&pinstance->worker_q);
pmcraid_kill_tasklets(pinstance);
pmcraid_unregister_interrupt_handler(pinstance);
pmcraid_release_buffers(pinstance);
iounmap(pinstance->mapped_dma_addr);
pci_release_regions(pdev);
scsi_host_put(pinstance->host);
pci_disable_device(pdev);
return;
}
#ifdef CONFIG_PM
/**
* pmcraid_suspend - driver suspend entry point for power management
* @pdev: PCI device structure
* @state: PCI power state to suspend routine
*
* Return Value - 0 always
*/
static int pmcraid_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
pmcraid_shutdown(pdev);
pmcraid_disable_interrupts(pinstance, ~0);
pmcraid_kill_tasklets(pinstance);
pci_set_drvdata(pinstance->pdev, pinstance);
pmcraid_unregister_interrupt_handler(pinstance);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
/**
* pmcraid_resume - driver resume entry point PCI power management
* @pdev: PCI device structure
*
* Return Value - 0 in case of success. Error code in case of any failure
*/
static int pmcraid_resume(struct pci_dev *pdev)
{
struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
struct Scsi_Host *host = pinstance->host;
int rc;
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake(pdev, PCI_D0, 0);
pci_restore_state(pdev);
rc = pci_enable_device(pdev);
if (rc) {
dev_err(&pdev->dev, "resume: Enable device failed\n");
return rc;
}
pci_set_master(pdev);
if ((sizeof(dma_addr_t) == 4) ||
pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc == 0)
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc != 0) {
dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n");
goto disable_device;
}
pmcraid_disable_interrupts(pinstance, ~0);
atomic_set(&pinstance->outstanding_cmds, 0);
rc = pmcraid_register_interrupt_handler(pinstance);
if (rc) {
dev_err(&pdev->dev,
"resume: couldn't register interrupt handlers\n");
rc = -ENODEV;
goto release_host;
}
pmcraid_init_tasklets(pinstance);
pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
/* Start with hard reset sequence which brings up IOA to operational
* state as well as completes the reset sequence.
*/
pinstance->ioa_hard_reset = 1;
/* Start IOA firmware initialization and bring card to Operational
* state.
*/
if (pmcraid_reset_bringup(pinstance)) {
dev_err(&pdev->dev, "couldn't initialize IOA\n");
rc = -ENODEV;
goto release_tasklets;
}
return 0;
release_tasklets:
pmcraid_disable_interrupts(pinstance, ~0);
pmcraid_kill_tasklets(pinstance);
pmcraid_unregister_interrupt_handler(pinstance);
release_host:
scsi_host_put(host);
disable_device:
pci_disable_device(pdev);
return rc;
}
#else
#define pmcraid_suspend NULL
#define pmcraid_resume NULL
#endif /* CONFIG_PM */
/**
* pmcraid_complete_ioa_reset - Called by either timer or tasklet during
* completion of the ioa reset
* @cmd: pointer to reset command block
*/
static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long flags;
spin_lock_irqsave(pinstance->host->host_lock, flags);
pmcraid_ioa_reset(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, flags);
scsi_unblock_requests(pinstance->host);
schedule_work(&pinstance->worker_q);
}
/**
* pmcraid_set_supported_devs - sends SET SUPPORTED DEVICES to IOAFP
*
* @cmd: pointer to pmcraid_cmd structure
*
* Return Value
* 0 for success or non-zero for failure cases
*/
static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd)
{
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
void (*cmd_done) (struct pmcraid_cmd *) = pmcraid_complete_ioa_reset;
pmcraid_reinit_cmdblk(cmd);
ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
ioarcb->request_type = REQ_TYPE_IOACMD;
ioarcb->cdb[0] = PMCRAID_SET_SUPPORTED_DEVICES;
ioarcb->cdb[1] = ALL_DEVICES_SUPPORTED;
/* If this was called as part of resource table reinitialization due to
* lost CCN, it is enough to return the command block back to free pool
* as part of set_supported_devs completion function.
*/
if (cmd->drv_inst->reinit_cfg_table) {
cmd->drv_inst->reinit_cfg_table = 0;
cmd->release = 1;
cmd_done = pmcraid_reinit_cfgtable_done;
}
/* we will be done with the reset sequence after set supported devices,
* setup the done function to return the command block back to free
* pool
*/
pmcraid_send_cmd(cmd,
cmd_done,
PMCRAID_SET_SUP_DEV_TIMEOUT,
pmcraid_timeout_handler);
return;
}
/**
* pmcraid_set_timestamp - set the timestamp to IOAFP
*
* @cmd: pointer to pmcraid_cmd structure
*
* Return Value
* 0 for success or non-zero for failure cases
*/
static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
__be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN);
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
struct timeval tv;
__le64 timestamp;
do_gettimeofday(&tv);
timestamp = tv.tv_sec * 1000;
pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp);
pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8);
pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16);
pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24);
pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32);
pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp) >> 40);
pmcraid_reinit_cmdblk(cmd);
ioarcb->request_type = REQ_TYPE_SCSI;
ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP;
ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION;
memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len));
ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[0]));
ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
ioarcb->data_transfer_length =
cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
ioadl = &(ioarcb->add_data.u.ioadl[0]);
ioadl->flags = IOADL_FLAGS_LAST_DESC;
ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr);
ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
if (!pinstance->timestamp_error) {
pinstance->timestamp_error = 0;
pmcraid_send_cmd(cmd, pmcraid_set_supported_devs,
PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
} else {
pmcraid_send_cmd(cmd, pmcraid_return_cmd,
PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
return;
}
}
/**
* pmcraid_init_res_table - Initialize the resource table
* @cmd: pointer to pmcraid command struct
*
* This function looks through the existing resource table, comparing
* it with the config table. This function will take care of old/new
* devices and schedule adding/removing them from the mid-layer
* as appropriate.
*
* Return value
* None
*/
static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
struct pmcraid_resource_entry *res, *temp;
struct pmcraid_config_table_entry *cfgte;
unsigned long lock_flags;
int found, rc, i;
u16 fw_version;
LIST_HEAD(old_res);
if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED)
pmcraid_err("IOA requires microcode download\n");
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
/* resource list is protected by pinstance->resource_lock.
* init_res_table can be called from probe (user-thread) or runtime
* reset (timer/tasklet)
*/
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue)
list_move_tail(&res->queue, &old_res);
for (i = 0; i < pinstance->cfg_table->num_entries; i++) {
if (be16_to_cpu(pinstance->inq_data->fw_version) <=
PMCRAID_FW_VERSION_1)
cfgte = &pinstance->cfg_table->entries[i];
else
cfgte = (struct pmcraid_config_table_entry *)
&pinstance->cfg_table->entries_ext[i];
if (!pmcraid_expose_resource(fw_version, cfgte))
continue;
found = 0;
/* If this entry was already detected and initialized */
list_for_each_entry_safe(res, temp, &old_res, queue) {
rc = memcmp(&res->cfg_entry.resource_address,
&cfgte->resource_address,
sizeof(cfgte->resource_address));
if (!rc) {
list_move_tail(&res->queue,
&pinstance->used_res_q);
found = 1;
break;
}
}
/* If this is new entry, initialize it and add it the queue */
if (!found) {
if (list_empty(&pinstance->free_res_q)) {
pmcraid_err("Too many devices attached\n");
break;
}
found = 1;
res = list_entry(pinstance->free_res_q.next,
struct pmcraid_resource_entry, queue);
res->scsi_dev = NULL;
res->change_detected = RES_CHANGE_ADD;
res->reset_progress = 0;
list_move_tail(&res->queue, &pinstance->used_res_q);
}
/* copy new configuration table entry details into driver
* maintained resource entry
*/
if (found) {
memcpy(&res->cfg_entry, cfgte,
pinstance->config_table_entry_size);
pmcraid_info("New res type:%x, vset:%x, addr:%x:\n",
res->cfg_entry.resource_type,
(fw_version <= PMCRAID_FW_VERSION_1 ?
res->cfg_entry.unique_flags1 :
res->cfg_entry.array_id & 0xFF),
le32_to_cpu(res->cfg_entry.resource_address));
}
}
/* Detect any deleted entries, mark them for deletion from mid-layer */
list_for_each_entry_safe(res, temp, &old_res, queue) {
if (res->scsi_dev) {
res->change_detected = RES_CHANGE_DEL;
res->cfg_entry.resource_handle =
PMCRAID_INVALID_RES_HANDLE;
list_move_tail(&res->queue, &pinstance->used_res_q);
} else {
list_move_tail(&res->queue, &pinstance->free_res_q);
}
}
/* release the resource list lock */
spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
pmcraid_set_timestamp(cmd);
}
/**
* pmcraid_querycfg - Send a Query IOA Config to the adapter.
* @cmd: pointer pmcraid_cmd struct
*
* This function sends a Query IOA Configuration command to the adapter to
* retrieve the IOA configuration table.
*
* Return value:
* none
*/
static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
{
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
struct pmcraid_instance *pinstance = cmd->drv_inst;
int cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table));
if (be16_to_cpu(pinstance->inq_data->fw_version) <=
PMCRAID_FW_VERSION_1)
pinstance->config_table_entry_size =
sizeof(struct pmcraid_config_table_entry);
else
pinstance->config_table_entry_size =
sizeof(struct pmcraid_config_table_entry_ext);
ioarcb->request_type = REQ_TYPE_IOACMD;
ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
ioarcb->cdb[0] = PMCRAID_QUERY_IOA_CONFIG;
/* firmware requires 4-byte length field, specified in B.E format */
memcpy(&(ioarcb->cdb[10]), &cfg_table_size, sizeof(cfg_table_size));
/* Since entire config table can be described by single IOADL, it can
* be part of IOARCB itself
*/
ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[0]));
ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioarcb->data_transfer_length =
cpu_to_le32(sizeof(struct pmcraid_config_table));
ioadl = &(ioarcb->add_data.u.ioadl[0]);
ioadl->flags = IOADL_FLAGS_LAST_DESC;
ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
pmcraid_send_cmd(cmd, pmcraid_init_res_table,
PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
}
/**
* pmcraid_probe - PCI probe entry pointer for PMC MaxRAID controller driver
* @pdev: pointer to pci device structure
* @dev_id: pointer to device ids structure
*
* Return Value
* returns 0 if the device is claimed and successfully configured.
* returns non-zero error code in case of any failure
*/
static int __devinit pmcraid_probe(
struct pci_dev *pdev,
const struct pci_device_id *dev_id
)
{
struct pmcraid_instance *pinstance;
struct Scsi_Host *host;
void __iomem *mapped_pci_addr;
int rc = PCIBIOS_SUCCESSFUL;
if (atomic_read(&pmcraid_adapter_count) >= PMCRAID_MAX_ADAPTERS) {
pmcraid_err
("maximum number(%d) of supported adapters reached\n",
atomic_read(&pmcraid_adapter_count));
return -ENOMEM;
}
atomic_inc(&pmcraid_adapter_count);
rc = pci_enable_device(pdev);
if (rc) {
dev_err(&pdev->dev, "Cannot enable adapter\n");
atomic_dec(&pmcraid_adapter_count);
return rc;
}
dev_info(&pdev->dev,
"Found new IOA(%x:%x), Total IOA count: %d\n",
pdev->vendor, pdev->device,
atomic_read(&pmcraid_adapter_count));
rc = pci_request_regions(pdev, PMCRAID_DRIVER_NAME);
if (rc < 0) {
dev_err(&pdev->dev,
"Couldn't register memory range of registers\n");
goto out_disable_device;
}
mapped_pci_addr = pci_iomap(pdev, 0, 0);
if (!mapped_pci_addr) {
dev_err(&pdev->dev, "Couldn't map PCI registers memory\n");
rc = -ENOMEM;
goto out_release_regions;
}
pci_set_master(pdev);
/* Firmware requires the system bus address of IOARCB to be within
* 32-bit addressable range though it has 64-bit IOARRIN register.
* However, firmware supports 64-bit streaming DMA buffers, whereas
* coherent buffers are to be 32-bit. Since pci_alloc_consistent always
* returns memory within 4GB (if not, change this logic), coherent
* buffers are within firmware acceptable address ranges.
*/
if ((sizeof(dma_addr_t) == 4) ||
pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
/* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
* bit mask for pci_alloc_consistent to return addresses within 4GB
*/
if (rc == 0)
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc != 0) {
dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
goto cleanup_nomem;
}
host = scsi_host_alloc(&pmcraid_host_template,
sizeof(struct pmcraid_instance));
if (!host) {
dev_err(&pdev->dev, "scsi_host_alloc failed!\n");
rc = -ENOMEM;
goto cleanup_nomem;
}
host->max_id = PMCRAID_MAX_NUM_TARGETS_PER_BUS;
host->max_lun = PMCRAID_MAX_NUM_LUNS_PER_TARGET;
host->unique_id = host->host_no;
host->max_channel = PMCRAID_MAX_BUS_TO_SCAN;
host->max_cmd_len = PMCRAID_MAX_CDB_LEN;
/* zero out entire instance structure */
pinstance = (struct pmcraid_instance *)host->hostdata;
memset(pinstance, 0, sizeof(*pinstance));
pinstance->chip_cfg =
(struct pmcraid_chip_details *)(dev_id->driver_data);
rc = pmcraid_init_instance(pdev, host, mapped_pci_addr);
if (rc < 0) {
dev_err(&pdev->dev, "failed to initialize adapter instance\n");
goto out_scsi_host_put;
}
pci_set_drvdata(pdev, pinstance);
/* Save PCI config-space for use following the reset */
rc = pci_save_state(pinstance->pdev);
if (rc != 0) {
dev_err(&pdev->dev, "Failed to save PCI config space\n");
goto out_scsi_host_put;
}
pmcraid_disable_interrupts(pinstance, ~0);
rc = pmcraid_register_interrupt_handler(pinstance);
if (rc) {
dev_err(&pdev->dev, "couldn't register interrupt handler\n");
goto out_scsi_host_put;
}
pmcraid_init_tasklets(pinstance);
/* allocate verious buffers used by LLD.*/
rc = pmcraid_init_buffers(pinstance);
if (rc) {
pmcraid_err("couldn't allocate memory blocks\n");
goto out_unregister_isr;
}
/* check the reset type required */
pmcraid_reset_type(pinstance);
pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
/* Start IOA firmware initialization and bring card to Operational
* state.
*/
pmcraid_info("starting IOA initialization sequence\n");
if (pmcraid_reset_bringup(pinstance)) {
dev_err(&pdev->dev, "couldn't initialize IOA\n");
rc = 1;
goto out_release_bufs;
}
/* Add adapter instance into mid-layer list */
rc = scsi_add_host(pinstance->host, &pdev->dev);
if (rc != 0) {
pmcraid_err("couldn't add host into mid-layer: %d\n", rc);
goto out_release_bufs;
}
scsi_scan_host(pinstance->host);
rc = pmcraid_setup_chrdev(pinstance);
if (rc != 0) {
pmcraid_err("couldn't create mgmt interface, error: %x\n",
rc);
goto out_remove_host;
}
/* Schedule worker thread to handle CCN and take care of adding and
* removing devices to OS
*/
atomic_set(&pinstance->expose_resources, 1);
schedule_work(&pinstance->worker_q);
return rc;
out_remove_host:
scsi_remove_host(host);
out_release_bufs:
pmcraid_release_buffers(pinstance);
out_unregister_isr:
pmcraid_kill_tasklets(pinstance);
pmcraid_unregister_interrupt_handler(pinstance);
out_scsi_host_put:
scsi_host_put(host);
cleanup_nomem:
iounmap(mapped_pci_addr);
out_release_regions:
pci_release_regions(pdev);
out_disable_device:
atomic_dec(&pmcraid_adapter_count);
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return -ENODEV;
}
/*
* PCI driver structure of pcmraid driver
*/
static struct pci_driver pmcraid_driver = {
.name = PMCRAID_DRIVER_NAME,
.id_table = pmcraid_pci_table,
.probe = pmcraid_probe,
.remove = pmcraid_remove,
.suspend = pmcraid_suspend,
.resume = pmcraid_resume,
.shutdown = pmcraid_shutdown
};
/**
* pmcraid_init - module load entry point
*/
static int __init pmcraid_init(void)
{
dev_t dev;
int error;
pmcraid_info("%s Device Driver version: %s\n",
PMCRAID_DRIVER_NAME, PMCRAID_DRIVER_VERSION);
error = alloc_chrdev_region(&dev, 0,
PMCRAID_MAX_ADAPTERS,
PMCRAID_DEVFILE);
if (error) {
pmcraid_err("failed to get a major number for adapters\n");
goto out_init;
}
pmcraid_major = MAJOR(dev);
pmcraid_class = class_create(THIS_MODULE, PMCRAID_DEVFILE);
if (IS_ERR(pmcraid_class)) {
error = PTR_ERR(pmcraid_class);
pmcraid_err("failed to register with with sysfs, error = %x\n",
error);
goto out_unreg_chrdev;
}
error = pmcraid_netlink_init();
if (error)
goto out_unreg_chrdev;
error = pci_register_driver(&pmcraid_driver);
if (error == 0)
goto out_init;
pmcraid_err("failed to register pmcraid driver, error = %x\n",
error);
class_destroy(pmcraid_class);
pmcraid_netlink_release();
out_unreg_chrdev:
unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS);
out_init:
return error;
}
/**
* pmcraid_exit - module unload entry point
*/
static void __exit pmcraid_exit(void)
{
pmcraid_netlink_release();
unregister_chrdev_region(MKDEV(pmcraid_major, 0),
PMCRAID_MAX_ADAPTERS);
pci_unregister_driver(&pmcraid_driver);
class_destroy(pmcraid_class);
}
module_init(pmcraid_init);
module_exit(pmcraid_exit);
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3484_0 |
crossvul-cpp_data_bad_5669_0 | /*
* linux/drivers/video/fbmem.c
*
* Copyright (C) 1994 Martin Schaller
*
* 2001 - Documented with DocBook
* - Brad Douglas <brad@neruo.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
#include <linux/compat.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/vt.h>
#include <linux/init.h>
#include <linux/linux_logo.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/console.h>
#include <linux/kmod.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/efi.h>
#include <linux/fb.h>
#include <asm/fb.h>
/*
* Frame buffer device initialization and setup routines
*/
#define FBPIXMAPSIZE (1024 * 8)
static DEFINE_MUTEX(registration_lock);
struct fb_info *registered_fb[FB_MAX] __read_mostly;
int num_registered_fb __read_mostly;
static struct fb_info *get_fb_info(unsigned int idx)
{
struct fb_info *fb_info;
if (idx >= FB_MAX)
return ERR_PTR(-ENODEV);
mutex_lock(®istration_lock);
fb_info = registered_fb[idx];
if (fb_info)
atomic_inc(&fb_info->count);
mutex_unlock(®istration_lock);
return fb_info;
}
static void put_fb_info(struct fb_info *fb_info)
{
if (!atomic_dec_and_test(&fb_info->count))
return;
if (fb_info->fbops->fb_destroy)
fb_info->fbops->fb_destroy(fb_info);
}
int lock_fb_info(struct fb_info *info)
{
mutex_lock(&info->lock);
if (!info->fbops) {
mutex_unlock(&info->lock);
return 0;
}
return 1;
}
EXPORT_SYMBOL(lock_fb_info);
/*
* Helpers
*/
int fb_get_color_depth(struct fb_var_screeninfo *var,
struct fb_fix_screeninfo *fix)
{
int depth = 0;
if (fix->visual == FB_VISUAL_MONO01 ||
fix->visual == FB_VISUAL_MONO10)
depth = 1;
else {
if (var->green.length == var->blue.length &&
var->green.length == var->red.length &&
var->green.offset == var->blue.offset &&
var->green.offset == var->red.offset)
depth = var->green.length;
else
depth = var->green.length + var->red.length +
var->blue.length;
}
return depth;
}
EXPORT_SYMBOL(fb_get_color_depth);
/*
* Data padding functions.
*/
void fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u32 height)
{
__fb_pad_aligned_buffer(dst, d_pitch, src, s_pitch, height);
}
EXPORT_SYMBOL(fb_pad_aligned_buffer);
void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx, u32 height,
u32 shift_high, u32 shift_low, u32 mod)
{
u8 mask = (u8) (0xfff << shift_high), tmp;
int i, j;
for (i = height; i--; ) {
for (j = 0; j < idx; j++) {
tmp = dst[j];
tmp &= mask;
tmp |= *src >> shift_low;
dst[j] = tmp;
tmp = *src << shift_high;
dst[j+1] = tmp;
src++;
}
tmp = dst[idx];
tmp &= mask;
tmp |= *src >> shift_low;
dst[idx] = tmp;
if (shift_high < mod) {
tmp = *src << shift_high;
dst[idx+1] = tmp;
}
src++;
dst += d_pitch;
}
}
EXPORT_SYMBOL(fb_pad_unaligned_buffer);
/*
* we need to lock this section since fb_cursor
* may use fb_imageblit()
*/
char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size)
{
u32 align = buf->buf_align - 1, offset;
char *addr = buf->addr;
/* If IO mapped, we need to sync before access, no sharing of
* the pixmap is done
*/
if (buf->flags & FB_PIXMAP_IO) {
if (info->fbops->fb_sync && (buf->flags & FB_PIXMAP_SYNC))
info->fbops->fb_sync(info);
return addr;
}
/* See if we fit in the remaining pixmap space */
offset = buf->offset + align;
offset &= ~align;
if (offset + size > buf->size) {
/* We do not fit. In order to be able to re-use the buffer,
* we must ensure no asynchronous DMA'ing or whatever operation
* is in progress, we sync for that.
*/
if (info->fbops->fb_sync && (buf->flags & FB_PIXMAP_SYNC))
info->fbops->fb_sync(info);
offset = 0;
}
buf->offset = offset + size;
addr += offset;
return addr;
}
#ifdef CONFIG_LOGO
static inline unsigned safe_shift(unsigned d, int n)
{
return n < 0 ? d >> -n : d << n;
}
static void fb_set_logocmap(struct fb_info *info,
const struct linux_logo *logo)
{
struct fb_cmap palette_cmap;
u16 palette_green[16];
u16 palette_blue[16];
u16 palette_red[16];
int i, j, n;
const unsigned char *clut = logo->clut;
palette_cmap.start = 0;
palette_cmap.len = 16;
palette_cmap.red = palette_red;
palette_cmap.green = palette_green;
palette_cmap.blue = palette_blue;
palette_cmap.transp = NULL;
for (i = 0; i < logo->clutsize; i += n) {
n = logo->clutsize - i;
/* palette_cmap provides space for only 16 colors at once */
if (n > 16)
n = 16;
palette_cmap.start = 32 + i;
palette_cmap.len = n;
for (j = 0; j < n; ++j) {
palette_cmap.red[j] = clut[0] << 8 | clut[0];
palette_cmap.green[j] = clut[1] << 8 | clut[1];
palette_cmap.blue[j] = clut[2] << 8 | clut[2];
clut += 3;
}
fb_set_cmap(&palette_cmap, info);
}
}
static void fb_set_logo_truepalette(struct fb_info *info,
const struct linux_logo *logo,
u32 *palette)
{
static const unsigned char mask[] = { 0,0x80,0xc0,0xe0,0xf0,0xf8,0xfc,0xfe,0xff };
unsigned char redmask, greenmask, bluemask;
int redshift, greenshift, blueshift;
int i;
const unsigned char *clut = logo->clut;
/*
* We have to create a temporary palette since console palette is only
* 16 colors long.
*/
/* Bug: Doesn't obey msb_right ... (who needs that?) */
redmask = mask[info->var.red.length < 8 ? info->var.red.length : 8];
greenmask = mask[info->var.green.length < 8 ? info->var.green.length : 8];
bluemask = mask[info->var.blue.length < 8 ? info->var.blue.length : 8];
redshift = info->var.red.offset - (8 - info->var.red.length);
greenshift = info->var.green.offset - (8 - info->var.green.length);
blueshift = info->var.blue.offset - (8 - info->var.blue.length);
for ( i = 0; i < logo->clutsize; i++) {
palette[i+32] = (safe_shift((clut[0] & redmask), redshift) |
safe_shift((clut[1] & greenmask), greenshift) |
safe_shift((clut[2] & bluemask), blueshift));
clut += 3;
}
}
static void fb_set_logo_directpalette(struct fb_info *info,
const struct linux_logo *logo,
u32 *palette)
{
int redshift, greenshift, blueshift;
int i;
redshift = info->var.red.offset;
greenshift = info->var.green.offset;
blueshift = info->var.blue.offset;
for (i = 32; i < 32 + logo->clutsize; i++)
palette[i] = i << redshift | i << greenshift | i << blueshift;
}
static void fb_set_logo(struct fb_info *info,
const struct linux_logo *logo, u8 *dst,
int depth)
{
int i, j, k;
const u8 *src = logo->data;
u8 xor = (info->fix.visual == FB_VISUAL_MONO01) ? 0xff : 0;
u8 fg = 1, d;
switch (fb_get_color_depth(&info->var, &info->fix)) {
case 1:
fg = 1;
break;
case 2:
fg = 3;
break;
default:
fg = 7;
break;
}
if (info->fix.visual == FB_VISUAL_MONO01 ||
info->fix.visual == FB_VISUAL_MONO10)
fg = ~((u8) (0xfff << info->var.green.length));
switch (depth) {
case 4:
for (i = 0; i < logo->height; i++)
for (j = 0; j < logo->width; src++) {
*dst++ = *src >> 4;
j++;
if (j < logo->width) {
*dst++ = *src & 0x0f;
j++;
}
}
break;
case 1:
for (i = 0; i < logo->height; i++) {
for (j = 0; j < logo->width; src++) {
d = *src ^ xor;
for (k = 7; k >= 0; k--) {
*dst++ = ((d >> k) & 1) ? fg : 0;
j++;
}
}
}
break;
}
}
/*
* Three (3) kinds of logo maps exist. linux_logo_clut224 (>16 colors),
* linux_logo_vga16 (16 colors) and linux_logo_mono (2 colors). Depending on
* the visual format and color depth of the framebuffer, the DAC, the
* pseudo_palette, and the logo data will be adjusted accordingly.
*
* Case 1 - linux_logo_clut224:
* Color exceeds the number of console colors (16), thus we set the hardware DAC
* using fb_set_cmap() appropriately. The "needs_cmapreset" flag will be set.
*
* For visuals that require color info from the pseudo_palette, we also construct
* one for temporary use. The "needs_directpalette" or "needs_truepalette" flags
* will be set.
*
* Case 2 - linux_logo_vga16:
* The number of colors just matches the console colors, thus there is no need
* to set the DAC or the pseudo_palette. However, the bitmap is packed, ie,
* each byte contains color information for two pixels (upper and lower nibble).
* To be consistent with fb_imageblit() usage, we therefore separate the two
* nibbles into separate bytes. The "depth" flag will be set to 4.
*
* Case 3 - linux_logo_mono:
* This is similar with Case 2. Each byte contains information for 8 pixels.
* We isolate each bit and expand each into a byte. The "depth" flag will
* be set to 1.
*/
static struct logo_data {
int depth;
int needs_directpalette;
int needs_truepalette;
int needs_cmapreset;
const struct linux_logo *logo;
} fb_logo __read_mostly;
static void fb_rotate_logo_ud(const u8 *in, u8 *out, u32 width, u32 height)
{
u32 size = width * height, i;
out += size - 1;
for (i = size; i--; )
*out-- = *in++;
}
static void fb_rotate_logo_cw(const u8 *in, u8 *out, u32 width, u32 height)
{
int i, j, h = height - 1;
for (i = 0; i < height; i++)
for (j = 0; j < width; j++)
out[height * j + h - i] = *in++;
}
static void fb_rotate_logo_ccw(const u8 *in, u8 *out, u32 width, u32 height)
{
int i, j, w = width - 1;
for (i = 0; i < height; i++)
for (j = 0; j < width; j++)
out[height * (w - j) + i] = *in++;
}
static void fb_rotate_logo(struct fb_info *info, u8 *dst,
struct fb_image *image, int rotate)
{
u32 tmp;
if (rotate == FB_ROTATE_UD) {
fb_rotate_logo_ud(image->data, dst, image->width,
image->height);
image->dx = info->var.xres - image->width - image->dx;
image->dy = info->var.yres - image->height - image->dy;
} else if (rotate == FB_ROTATE_CW) {
fb_rotate_logo_cw(image->data, dst, image->width,
image->height);
tmp = image->width;
image->width = image->height;
image->height = tmp;
tmp = image->dy;
image->dy = image->dx;
image->dx = info->var.xres - image->width - tmp;
} else if (rotate == FB_ROTATE_CCW) {
fb_rotate_logo_ccw(image->data, dst, image->width,
image->height);
tmp = image->width;
image->width = image->height;
image->height = tmp;
tmp = image->dx;
image->dx = image->dy;
image->dy = info->var.yres - image->height - tmp;
}
image->data = dst;
}
static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
int rotate, unsigned int num)
{
unsigned int x;
if (rotate == FB_ROTATE_UR) {
for (x = 0;
x < num && image->dx + image->width <= info->var.xres;
x++) {
info->fbops->fb_imageblit(info, image);
image->dx += image->width + 8;
}
} else if (rotate == FB_ROTATE_UD) {
for (x = 0; x < num && image->dx >= 0; x++) {
info->fbops->fb_imageblit(info, image);
image->dx -= image->width + 8;
}
} else if (rotate == FB_ROTATE_CW) {
for (x = 0;
x < num && image->dy + image->height <= info->var.yres;
x++) {
info->fbops->fb_imageblit(info, image);
image->dy += image->height + 8;
}
} else if (rotate == FB_ROTATE_CCW) {
for (x = 0; x < num && image->dy >= 0; x++) {
info->fbops->fb_imageblit(info, image);
image->dy -= image->height + 8;
}
}
}
static int fb_show_logo_line(struct fb_info *info, int rotate,
const struct linux_logo *logo, int y,
unsigned int n)
{
u32 *palette = NULL, *saved_pseudo_palette = NULL;
unsigned char *logo_new = NULL, *logo_rotate = NULL;
struct fb_image image;
/* Return if the frame buffer is not mapped or suspended */
if (logo == NULL || info->state != FBINFO_STATE_RUNNING ||
info->flags & FBINFO_MODULE)
return 0;
image.depth = 8;
image.data = logo->data;
if (fb_logo.needs_cmapreset)
fb_set_logocmap(info, logo);
if (fb_logo.needs_truepalette ||
fb_logo.needs_directpalette) {
palette = kmalloc(256 * 4, GFP_KERNEL);
if (palette == NULL)
return 0;
if (fb_logo.needs_truepalette)
fb_set_logo_truepalette(info, logo, palette);
else
fb_set_logo_directpalette(info, logo, palette);
saved_pseudo_palette = info->pseudo_palette;
info->pseudo_palette = palette;
}
if (fb_logo.depth <= 4) {
logo_new = kmalloc(logo->width * logo->height, GFP_KERNEL);
if (logo_new == NULL) {
kfree(palette);
if (saved_pseudo_palette)
info->pseudo_palette = saved_pseudo_palette;
return 0;
}
image.data = logo_new;
fb_set_logo(info, logo, logo_new, fb_logo.depth);
}
image.dx = 0;
image.dy = y;
image.width = logo->width;
image.height = logo->height;
if (rotate) {
logo_rotate = kmalloc(logo->width *
logo->height, GFP_KERNEL);
if (logo_rotate)
fb_rotate_logo(info, logo_rotate, &image, rotate);
}
fb_do_show_logo(info, &image, rotate, n);
kfree(palette);
if (saved_pseudo_palette != NULL)
info->pseudo_palette = saved_pseudo_palette;
kfree(logo_new);
kfree(logo_rotate);
return logo->height;
}
#ifdef CONFIG_FB_LOGO_EXTRA
#define FB_LOGO_EX_NUM_MAX 10
static struct logo_data_extra {
const struct linux_logo *logo;
unsigned int n;
} fb_logo_ex[FB_LOGO_EX_NUM_MAX];
static unsigned int fb_logo_ex_num;
void fb_append_extra_logo(const struct linux_logo *logo, unsigned int n)
{
if (!n || fb_logo_ex_num == FB_LOGO_EX_NUM_MAX)
return;
fb_logo_ex[fb_logo_ex_num].logo = logo;
fb_logo_ex[fb_logo_ex_num].n = n;
fb_logo_ex_num++;
}
static int fb_prepare_extra_logos(struct fb_info *info, unsigned int height,
unsigned int yres)
{
unsigned int i;
/* FIXME: logo_ex supports only truecolor fb. */
if (info->fix.visual != FB_VISUAL_TRUECOLOR)
fb_logo_ex_num = 0;
for (i = 0; i < fb_logo_ex_num; i++) {
if (fb_logo_ex[i].logo->type != fb_logo.logo->type) {
fb_logo_ex[i].logo = NULL;
continue;
}
height += fb_logo_ex[i].logo->height;
if (height > yres) {
height -= fb_logo_ex[i].logo->height;
fb_logo_ex_num = i;
break;
}
}
return height;
}
static int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
{
unsigned int i;
for (i = 0; i < fb_logo_ex_num; i++)
y += fb_show_logo_line(info, rotate,
fb_logo_ex[i].logo, y, fb_logo_ex[i].n);
return y;
}
#else /* !CONFIG_FB_LOGO_EXTRA */
static inline int fb_prepare_extra_logos(struct fb_info *info,
unsigned int height,
unsigned int yres)
{
return height;
}
static inline int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
{
return y;
}
#endif /* CONFIG_FB_LOGO_EXTRA */
int fb_prepare_logo(struct fb_info *info, int rotate)
{
int depth = fb_get_color_depth(&info->var, &info->fix);
unsigned int yres;
memset(&fb_logo, 0, sizeof(struct logo_data));
if (info->flags & FBINFO_MISC_TILEBLITTING ||
info->flags & FBINFO_MODULE)
return 0;
if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
depth = info->var.blue.length;
if (info->var.red.length < depth)
depth = info->var.red.length;
if (info->var.green.length < depth)
depth = info->var.green.length;
}
if (info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR && depth > 4) {
/* assume console colormap */
depth = 4;
}
/* Return if no suitable logo was found */
fb_logo.logo = fb_find_logo(depth);
if (!fb_logo.logo) {
return 0;
}
if (rotate == FB_ROTATE_UR || rotate == FB_ROTATE_UD)
yres = info->var.yres;
else
yres = info->var.xres;
if (fb_logo.logo->height > yres) {
fb_logo.logo = NULL;
return 0;
}
/* What depth we asked for might be different from what we get */
if (fb_logo.logo->type == LINUX_LOGO_CLUT224)
fb_logo.depth = 8;
else if (fb_logo.logo->type == LINUX_LOGO_VGA16)
fb_logo.depth = 4;
else
fb_logo.depth = 1;
if (fb_logo.depth > 4 && depth > 4) {
switch (info->fix.visual) {
case FB_VISUAL_TRUECOLOR:
fb_logo.needs_truepalette = 1;
break;
case FB_VISUAL_DIRECTCOLOR:
fb_logo.needs_directpalette = 1;
fb_logo.needs_cmapreset = 1;
break;
case FB_VISUAL_PSEUDOCOLOR:
fb_logo.needs_cmapreset = 1;
break;
}
}
return fb_prepare_extra_logos(info, fb_logo.logo->height, yres);
}
int fb_show_logo(struct fb_info *info, int rotate)
{
int y;
y = fb_show_logo_line(info, rotate, fb_logo.logo, 0,
num_online_cpus());
y = fb_show_extra_logos(info, y, rotate);
return y;
}
#else
int fb_prepare_logo(struct fb_info *info, int rotate) { return 0; }
int fb_show_logo(struct fb_info *info, int rotate) { return 0; }
#endif /* CONFIG_LOGO */
static void *fb_seq_start(struct seq_file *m, loff_t *pos)
{
mutex_lock(®istration_lock);
return (*pos < FB_MAX) ? pos : NULL;
}
static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return (*pos < FB_MAX) ? pos : NULL;
}
static void fb_seq_stop(struct seq_file *m, void *v)
{
mutex_unlock(®istration_lock);
}
static int fb_seq_show(struct seq_file *m, void *v)
{
int i = *(loff_t *)v;
struct fb_info *fi = registered_fb[i];
if (fi)
seq_printf(m, "%d %s\n", fi->node, fi->fix.id);
return 0;
}
static const struct seq_operations proc_fb_seq_ops = {
.start = fb_seq_start,
.next = fb_seq_next,
.stop = fb_seq_stop,
.show = fb_seq_show,
};
static int proc_fb_open(struct inode *inode, struct file *file)
{
return seq_open(file, &proc_fb_seq_ops);
}
static const struct file_operations fb_proc_fops = {
.owner = THIS_MODULE,
.open = proc_fb_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* We hold a reference to the fb_info in file->private_data,
* but if the current registered fb has changed, we don't
* actually want to use it.
*
* So look up the fb_info using the inode minor number,
* and just verify it against the reference we have.
*/
static struct fb_info *file_fb_info(struct file *file)
{
struct inode *inode = file_inode(file);
int fbidx = iminor(inode);
struct fb_info *info = registered_fb[fbidx];
if (info != file->private_data)
info = NULL;
return info;
}
static ssize_t
fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
unsigned long p = *ppos;
struct fb_info *info = file_fb_info(file);
u8 *buffer, *dst;
u8 __iomem *src;
int c, cnt = 0, err = 0;
unsigned long total_size;
if (!info || ! info->screen_base)
return -ENODEV;
if (info->state != FBINFO_STATE_RUNNING)
return -EPERM;
if (info->fbops->fb_read)
return info->fbops->fb_read(info, buf, count, ppos);
total_size = info->screen_size;
if (total_size == 0)
total_size = info->fix.smem_len;
if (p >= total_size)
return 0;
if (count >= total_size)
count = total_size;
if (count + p > total_size)
count = total_size - p;
buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count,
GFP_KERNEL);
if (!buffer)
return -ENOMEM;
src = (u8 __iomem *) (info->screen_base + p);
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
while (count) {
c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
dst = buffer;
fb_memcpy_fromfb(dst, src, c);
dst += c;
src += c;
if (copy_to_user(buf, buffer, c)) {
err = -EFAULT;
break;
}
*ppos += c;
buf += c;
cnt += c;
count -= c;
}
kfree(buffer);
return (err) ? err : cnt;
}
static ssize_t
fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
unsigned long p = *ppos;
struct fb_info *info = file_fb_info(file);
u8 *buffer, *src;
u8 __iomem *dst;
int c, cnt = 0, err = 0;
unsigned long total_size;
if (!info || !info->screen_base)
return -ENODEV;
if (info->state != FBINFO_STATE_RUNNING)
return -EPERM;
if (info->fbops->fb_write)
return info->fbops->fb_write(info, buf, count, ppos);
total_size = info->screen_size;
if (total_size == 0)
total_size = info->fix.smem_len;
if (p > total_size)
return -EFBIG;
if (count > total_size) {
err = -EFBIG;
count = total_size;
}
if (count + p > total_size) {
if (!err)
err = -ENOSPC;
count = total_size - p;
}
buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count,
GFP_KERNEL);
if (!buffer)
return -ENOMEM;
dst = (u8 __iomem *) (info->screen_base + p);
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
while (count) {
c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
src = buffer;
if (copy_from_user(src, buf, c)) {
err = -EFAULT;
break;
}
fb_memcpy_tofb(dst, src, c);
dst += c;
src += c;
*ppos += c;
buf += c;
cnt += c;
count -= c;
}
kfree(buffer);
return (cnt) ? cnt : err;
}
int
fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var)
{
struct fb_fix_screeninfo *fix = &info->fix;
unsigned int yres = info->var.yres;
int err = 0;
if (var->yoffset > 0) {
if (var->vmode & FB_VMODE_YWRAP) {
if (!fix->ywrapstep || (var->yoffset % fix->ywrapstep))
err = -EINVAL;
else
yres = 0;
} else if (!fix->ypanstep || (var->yoffset % fix->ypanstep))
err = -EINVAL;
}
if (var->xoffset > 0 && (!fix->xpanstep ||
(var->xoffset % fix->xpanstep)))
err = -EINVAL;
if (err || !info->fbops->fb_pan_display ||
var->yoffset > info->var.yres_virtual - yres ||
var->xoffset > info->var.xres_virtual - info->var.xres)
return -EINVAL;
if ((err = info->fbops->fb_pan_display(var, info)))
return err;
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
if (var->vmode & FB_VMODE_YWRAP)
info->var.vmode |= FB_VMODE_YWRAP;
else
info->var.vmode &= ~FB_VMODE_YWRAP;
return 0;
}
static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
u32 activate)
{
struct fb_event event;
struct fb_blit_caps caps, fbcaps;
int err = 0;
memset(&caps, 0, sizeof(caps));
memset(&fbcaps, 0, sizeof(fbcaps));
caps.flags = (activate & FB_ACTIVATE_ALL) ? 1 : 0;
event.info = info;
event.data = ∩︀
fb_notifier_call_chain(FB_EVENT_GET_REQ, &event);
info->fbops->fb_get_caps(info, &fbcaps, var);
if (((fbcaps.x ^ caps.x) & caps.x) ||
((fbcaps.y ^ caps.y) & caps.y) ||
(fbcaps.len < caps.len))
err = -EINVAL;
return err;
}
int
fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
{
int flags = info->flags;
int ret = 0;
if (var->activate & FB_ACTIVATE_INV_MODE) {
struct fb_videomode mode1, mode2;
fb_var_to_videomode(&mode1, var);
fb_var_to_videomode(&mode2, &info->var);
/* make sure we don't delete the videomode of current var */
ret = fb_mode_is_equal(&mode1, &mode2);
if (!ret) {
struct fb_event event;
event.info = info;
event.data = &mode1;
ret = fb_notifier_call_chain(FB_EVENT_MODE_DELETE, &event);
}
if (!ret)
fb_delete_videomode(&mode1, &info->modelist);
ret = (ret) ? -EINVAL : 0;
goto done;
}
if ((var->activate & FB_ACTIVATE_FORCE) ||
memcmp(&info->var, var, sizeof(struct fb_var_screeninfo))) {
u32 activate = var->activate;
/* When using FOURCC mode, make sure the red, green, blue and
* transp fields are set to 0.
*/
if ((info->fix.capabilities & FB_CAP_FOURCC) &&
var->grayscale > 1) {
if (var->red.offset || var->green.offset ||
var->blue.offset || var->transp.offset ||
var->red.length || var->green.length ||
var->blue.length || var->transp.length ||
var->red.msb_right || var->green.msb_right ||
var->blue.msb_right || var->transp.msb_right)
return -EINVAL;
}
if (!info->fbops->fb_check_var) {
*var = info->var;
goto done;
}
ret = info->fbops->fb_check_var(var, info);
if (ret)
goto done;
if ((var->activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) {
struct fb_var_screeninfo old_var;
struct fb_videomode mode;
if (info->fbops->fb_get_caps) {
ret = fb_check_caps(info, var, activate);
if (ret)
goto done;
}
old_var = info->var;
info->var = *var;
if (info->fbops->fb_set_par) {
ret = info->fbops->fb_set_par(info);
if (ret) {
info->var = old_var;
printk(KERN_WARNING "detected "
"fb_set_par error, "
"error code: %d\n", ret);
goto done;
}
}
fb_pan_display(info, &info->var);
fb_set_cmap(&info->cmap, info);
fb_var_to_videomode(&mode, &info->var);
if (info->modelist.prev && info->modelist.next &&
!list_empty(&info->modelist))
ret = fb_add_videomode(&mode, &info->modelist);
if (!ret && (flags & FBINFO_MISC_USEREVENT)) {
struct fb_event event;
int evnt = (activate & FB_ACTIVATE_ALL) ?
FB_EVENT_MODE_CHANGE_ALL :
FB_EVENT_MODE_CHANGE;
info->flags &= ~FBINFO_MISC_USEREVENT;
event.info = info;
event.data = &mode;
fb_notifier_call_chain(evnt, &event);
}
}
}
done:
return ret;
}
int
fb_blank(struct fb_info *info, int blank)
{
struct fb_event event;
int ret = -EINVAL, early_ret;
if (blank > FB_BLANK_POWERDOWN)
blank = FB_BLANK_POWERDOWN;
event.info = info;
event.data = ␣
early_ret = fb_notifier_call_chain(FB_EARLY_EVENT_BLANK, &event);
if (info->fbops->fb_blank)
ret = info->fbops->fb_blank(blank, info);
if (!ret)
fb_notifier_call_chain(FB_EVENT_BLANK, &event);
else {
/*
* if fb_blank is failed then revert effects of
* the early blank event.
*/
if (!early_ret)
fb_notifier_call_chain(FB_R_EARLY_EVENT_BLANK, &event);
}
return ret;
}
static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct fb_ops *fb;
struct fb_var_screeninfo var;
struct fb_fix_screeninfo fix;
struct fb_con2fbmap con2fb;
struct fb_cmap cmap_from;
struct fb_cmap_user cmap;
struct fb_event event;
void __user *argp = (void __user *)arg;
long ret = 0;
switch (cmd) {
case FBIOGET_VSCREENINFO:
if (!lock_fb_info(info))
return -ENODEV;
var = info->var;
unlock_fb_info(info);
ret = copy_to_user(argp, &var, sizeof(var)) ? -EFAULT : 0;
break;
case FBIOPUT_VSCREENINFO:
if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
if (!lock_fb_info(info))
return -ENODEV;
console_lock();
info->flags |= FBINFO_MISC_USEREVENT;
ret = fb_set_var(info, &var);
info->flags &= ~FBINFO_MISC_USEREVENT;
console_unlock();
unlock_fb_info(info);
if (!ret && copy_to_user(argp, &var, sizeof(var)))
ret = -EFAULT;
break;
case FBIOGET_FSCREENINFO:
if (!lock_fb_info(info))
return -ENODEV;
fix = info->fix;
unlock_fb_info(info);
ret = copy_to_user(argp, &fix, sizeof(fix)) ? -EFAULT : 0;
break;
case FBIOPUTCMAP:
if (copy_from_user(&cmap, argp, sizeof(cmap)))
return -EFAULT;
ret = fb_set_user_cmap(&cmap, info);
break;
case FBIOGETCMAP:
if (copy_from_user(&cmap, argp, sizeof(cmap)))
return -EFAULT;
if (!lock_fb_info(info))
return -ENODEV;
cmap_from = info->cmap;
unlock_fb_info(info);
ret = fb_cmap_to_user(&cmap_from, &cmap);
break;
case FBIOPAN_DISPLAY:
if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
if (!lock_fb_info(info))
return -ENODEV;
console_lock();
ret = fb_pan_display(info, &var);
console_unlock();
unlock_fb_info(info);
if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
return -EFAULT;
break;
case FBIO_CURSOR:
ret = -EINVAL;
break;
case FBIOGET_CON2FBMAP:
if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
return -EFAULT;
if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
return -EINVAL;
con2fb.framebuffer = -1;
event.data = &con2fb;
if (!lock_fb_info(info))
return -ENODEV;
event.info = info;
fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event);
unlock_fb_info(info);
ret = copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0;
break;
case FBIOPUT_CON2FBMAP:
if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
return -EFAULT;
if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
return -EINVAL;
if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
return -EINVAL;
if (!registered_fb[con2fb.framebuffer])
request_module("fb%d", con2fb.framebuffer);
if (!registered_fb[con2fb.framebuffer]) {
ret = -EINVAL;
break;
}
event.data = &con2fb;
if (!lock_fb_info(info))
return -ENODEV;
console_lock();
event.info = info;
ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
console_unlock();
unlock_fb_info(info);
break;
case FBIOBLANK:
if (!lock_fb_info(info))
return -ENODEV;
console_lock();
info->flags |= FBINFO_MISC_USEREVENT;
ret = fb_blank(info, arg);
info->flags &= ~FBINFO_MISC_USEREVENT;
console_unlock();
unlock_fb_info(info);
break;
default:
if (!lock_fb_info(info))
return -ENODEV;
fb = info->fbops;
if (fb->fb_ioctl)
ret = fb->fb_ioctl(info, cmd, arg);
else
ret = -ENOTTY;
unlock_fb_info(info);
}
return ret;
}
static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct fb_info *info = file_fb_info(file);
if (!info)
return -ENODEV;
return do_fb_ioctl(info, cmd, arg);
}
#ifdef CONFIG_COMPAT
struct fb_fix_screeninfo32 {
char id[16];
compat_caddr_t smem_start;
u32 smem_len;
u32 type;
u32 type_aux;
u32 visual;
u16 xpanstep;
u16 ypanstep;
u16 ywrapstep;
u32 line_length;
compat_caddr_t mmio_start;
u32 mmio_len;
u32 accel;
u16 reserved[3];
};
struct fb_cmap32 {
u32 start;
u32 len;
compat_caddr_t red;
compat_caddr_t green;
compat_caddr_t blue;
compat_caddr_t transp;
};
static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct fb_cmap_user __user *cmap;
struct fb_cmap32 __user *cmap32;
__u32 data;
int err;
cmap = compat_alloc_user_space(sizeof(*cmap));
cmap32 = compat_ptr(arg);
if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32)))
return -EFAULT;
if (get_user(data, &cmap32->red) ||
put_user(compat_ptr(data), &cmap->red) ||
get_user(data, &cmap32->green) ||
put_user(compat_ptr(data), &cmap->green) ||
get_user(data, &cmap32->blue) ||
put_user(compat_ptr(data), &cmap->blue) ||
get_user(data, &cmap32->transp) ||
put_user(compat_ptr(data), &cmap->transp))
return -EFAULT;
err = do_fb_ioctl(info, cmd, (unsigned long) cmap);
if (!err) {
if (copy_in_user(&cmap32->start,
&cmap->start,
2 * sizeof(__u32)))
err = -EFAULT;
}
return err;
}
static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
struct fb_fix_screeninfo32 __user *fix32)
{
__u32 data;
int err;
err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
data = (__u32) (unsigned long) fix->smem_start;
err |= put_user(data, &fix32->smem_start);
err |= put_user(fix->smem_len, &fix32->smem_len);
err |= put_user(fix->type, &fix32->type);
err |= put_user(fix->type_aux, &fix32->type_aux);
err |= put_user(fix->visual, &fix32->visual);
err |= put_user(fix->xpanstep, &fix32->xpanstep);
err |= put_user(fix->ypanstep, &fix32->ypanstep);
err |= put_user(fix->ywrapstep, &fix32->ywrapstep);
err |= put_user(fix->line_length, &fix32->line_length);
data = (__u32) (unsigned long) fix->mmio_start;
err |= put_user(data, &fix32->mmio_start);
err |= put_user(fix->mmio_len, &fix32->mmio_len);
err |= put_user(fix->accel, &fix32->accel);
err |= copy_to_user(fix32->reserved, fix->reserved,
sizeof(fix->reserved));
return err;
}
static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
mm_segment_t old_fs;
struct fb_fix_screeninfo fix;
struct fb_fix_screeninfo32 __user *fix32;
int err;
fix32 = compat_ptr(arg);
old_fs = get_fs();
set_fs(KERNEL_DS);
err = do_fb_ioctl(info, cmd, (unsigned long) &fix);
set_fs(old_fs);
if (!err)
err = do_fscreeninfo_to_user(&fix, fix32);
return err;
}
static long fb_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fb_info *info = file_fb_info(file);
struct fb_ops *fb;
long ret = -ENOIOCTLCMD;
if (!info)
return -ENODEV;
fb = info->fbops;
switch(cmd) {
case FBIOGET_VSCREENINFO:
case FBIOPUT_VSCREENINFO:
case FBIOPAN_DISPLAY:
case FBIOGET_CON2FBMAP:
case FBIOPUT_CON2FBMAP:
arg = (unsigned long) compat_ptr(arg);
case FBIOBLANK:
ret = do_fb_ioctl(info, cmd, arg);
break;
case FBIOGET_FSCREENINFO:
ret = fb_get_fscreeninfo(info, cmd, arg);
break;
case FBIOGETCMAP:
case FBIOPUTCMAP:
ret = fb_getput_cmap(info, cmd, arg);
break;
default:
if (fb->fb_compat_ioctl)
ret = fb->fb_compat_ioctl(info, cmd, arg);
break;
}
return ret;
}
#endif
static int
fb_mmap(struct file *file, struct vm_area_struct * vma)
{
struct fb_info *info = file_fb_info(file);
struct fb_ops *fb;
unsigned long off;
unsigned long start;
u32 len;
if (!info)
return -ENODEV;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
off = vma->vm_pgoff << PAGE_SHIFT;
fb = info->fbops;
if (!fb)
return -ENODEV;
mutex_lock(&info->mm_lock);
if (fb->fb_mmap) {
int res;
res = fb->fb_mmap(info, vma);
mutex_unlock(&info->mm_lock);
return res;
}
/* frame buffer memory */
start = info->fix.smem_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
if (off >= len) {
/* memory mapped io */
off -= len;
if (info->var.accel_flags) {
mutex_unlock(&info->mm_lock);
return -EINVAL;
}
start = info->fix.mmio_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
}
mutex_unlock(&info->mm_lock);
start &= PAGE_MASK;
if ((vma->vm_end - vma->vm_start + off) > len)
return -EINVAL;
off += start;
vma->vm_pgoff = off >> PAGE_SHIFT;
/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
fb_pgprotect(file, vma, off);
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
static int
fb_open(struct inode *inode, struct file *file)
__acquires(&info->lock)
__releases(&info->lock)
{
int fbidx = iminor(inode);
struct fb_info *info;
int res = 0;
info = get_fb_info(fbidx);
if (!info) {
request_module("fb%d", fbidx);
info = get_fb_info(fbidx);
if (!info)
return -ENODEV;
}
if (IS_ERR(info))
return PTR_ERR(info);
mutex_lock(&info->lock);
if (!try_module_get(info->fbops->owner)) {
res = -ENODEV;
goto out;
}
file->private_data = info;
if (info->fbops->fb_open) {
res = info->fbops->fb_open(info,1);
if (res)
module_put(info->fbops->owner);
}
#ifdef CONFIG_FB_DEFERRED_IO
if (info->fbdefio)
fb_deferred_io_open(info, inode, file);
#endif
out:
mutex_unlock(&info->lock);
if (res)
put_fb_info(info);
return res;
}
static int
fb_release(struct inode *inode, struct file *file)
__acquires(&info->lock)
__releases(&info->lock)
{
struct fb_info * const info = file->private_data;
mutex_lock(&info->lock);
if (info->fbops->fb_release)
info->fbops->fb_release(info,1);
module_put(info->fbops->owner);
mutex_unlock(&info->lock);
put_fb_info(info);
return 0;
}
static const struct file_operations fb_fops = {
.owner = THIS_MODULE,
.read = fb_read,
.write = fb_write,
.unlocked_ioctl = fb_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = fb_compat_ioctl,
#endif
.mmap = fb_mmap,
.open = fb_open,
.release = fb_release,
#ifdef HAVE_ARCH_FB_UNMAPPED_AREA
.get_unmapped_area = get_fb_unmapped_area,
#endif
#ifdef CONFIG_FB_DEFERRED_IO
.fsync = fb_deferred_io_fsync,
#endif
.llseek = default_llseek,
};
struct class *fb_class;
EXPORT_SYMBOL(fb_class);
static int fb_check_foreignness(struct fb_info *fi)
{
const bool foreign_endian = fi->flags & FBINFO_FOREIGN_ENDIAN;
fi->flags &= ~FBINFO_FOREIGN_ENDIAN;
#ifdef __BIG_ENDIAN
fi->flags |= foreign_endian ? 0 : FBINFO_BE_MATH;
#else
fi->flags |= foreign_endian ? FBINFO_BE_MATH : 0;
#endif /* __BIG_ENDIAN */
if (fi->flags & FBINFO_BE_MATH && !fb_be_math(fi)) {
pr_err("%s: enable CONFIG_FB_BIG_ENDIAN to "
"support this framebuffer\n", fi->fix.id);
return -ENOSYS;
} else if (!(fi->flags & FBINFO_BE_MATH) && fb_be_math(fi)) {
pr_err("%s: enable CONFIG_FB_LITTLE_ENDIAN to "
"support this framebuffer\n", fi->fix.id);
return -ENOSYS;
}
return 0;
}
static bool apertures_overlap(struct aperture *gen, struct aperture *hw)
{
/* is the generic aperture base the same as the HW one */
if (gen->base == hw->base)
return true;
/* is the generic aperture base inside the hw base->hw base+size */
if (gen->base > hw->base && gen->base < hw->base + hw->size)
return true;
return false;
}
static bool fb_do_apertures_overlap(struct apertures_struct *gena,
struct apertures_struct *hwa)
{
int i, j;
if (!hwa || !gena)
return false;
for (i = 0; i < hwa->count; ++i) {
struct aperture *h = &hwa->ranges[i];
for (j = 0; j < gena->count; ++j) {
struct aperture *g = &gena->ranges[j];
printk(KERN_DEBUG "checking generic (%llx %llx) vs hw (%llx %llx)\n",
(unsigned long long)g->base,
(unsigned long long)g->size,
(unsigned long long)h->base,
(unsigned long long)h->size);
if (apertures_overlap(g, h))
return true;
}
}
return false;
}
static int do_unregister_framebuffer(struct fb_info *fb_info);
#define VGA_FB_PHYS 0xA0000
static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary)
{
int i;
/* check all firmware fbs and kick off if the base addr overlaps */
for (i = 0 ; i < FB_MAX; i++) {
struct apertures_struct *gen_aper;
if (!registered_fb[i])
continue;
if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE))
continue;
gen_aper = registered_fb[i]->apertures;
if (fb_do_apertures_overlap(gen_aper, a) ||
(primary && gen_aper && gen_aper->count &&
gen_aper->ranges[0].base == VGA_FB_PHYS)) {
printk(KERN_INFO "fb: conflicting fb hw usage "
"%s vs %s - removing generic driver\n",
name, registered_fb[i]->fix.id);
do_unregister_framebuffer(registered_fb[i]);
}
}
}
static int do_register_framebuffer(struct fb_info *fb_info)
{
int i;
struct fb_event event;
struct fb_videomode mode;
if (fb_check_foreignness(fb_info))
return -ENOSYS;
do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
fb_is_primary_device(fb_info));
if (num_registered_fb == FB_MAX)
return -ENXIO;
num_registered_fb++;
for (i = 0 ; i < FB_MAX; i++)
if (!registered_fb[i])
break;
fb_info->node = i;
atomic_set(&fb_info->count, 1);
mutex_init(&fb_info->lock);
mutex_init(&fb_info->mm_lock);
fb_info->dev = device_create(fb_class, fb_info->device,
MKDEV(FB_MAJOR, i), NULL, "fb%d", i);
if (IS_ERR(fb_info->dev)) {
/* Not fatal */
printk(KERN_WARNING "Unable to create device for framebuffer %d; errno = %ld\n", i, PTR_ERR(fb_info->dev));
fb_info->dev = NULL;
} else
fb_init_device(fb_info);
if (fb_info->pixmap.addr == NULL) {
fb_info->pixmap.addr = kmalloc(FBPIXMAPSIZE, GFP_KERNEL);
if (fb_info->pixmap.addr) {
fb_info->pixmap.size = FBPIXMAPSIZE;
fb_info->pixmap.buf_align = 1;
fb_info->pixmap.scan_align = 1;
fb_info->pixmap.access_align = 32;
fb_info->pixmap.flags = FB_PIXMAP_DEFAULT;
}
}
fb_info->pixmap.offset = 0;
if (!fb_info->pixmap.blit_x)
fb_info->pixmap.blit_x = ~(u32)0;
if (!fb_info->pixmap.blit_y)
fb_info->pixmap.blit_y = ~(u32)0;
if (!fb_info->modelist.prev || !fb_info->modelist.next)
INIT_LIST_HEAD(&fb_info->modelist);
fb_var_to_videomode(&mode, &fb_info->var);
fb_add_videomode(&mode, &fb_info->modelist);
registered_fb[i] = fb_info;
event.info = fb_info;
if (!lock_fb_info(fb_info))
return -ENODEV;
console_lock();
fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
console_unlock();
unlock_fb_info(fb_info);
return 0;
}
static int do_unregister_framebuffer(struct fb_info *fb_info)
{
struct fb_event event;
int i, ret = 0;
i = fb_info->node;
if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
return -EINVAL;
if (!lock_fb_info(fb_info))
return -ENODEV;
console_lock();
event.info = fb_info;
ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
console_unlock();
unlock_fb_info(fb_info);
if (ret)
return -EINVAL;
unlink_framebuffer(fb_info);
if (fb_info->pixmap.addr &&
(fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
kfree(fb_info->pixmap.addr);
fb_destroy_modelist(&fb_info->modelist);
registered_fb[i] = NULL;
num_registered_fb--;
fb_cleanup_device(fb_info);
event.info = fb_info;
console_lock();
fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
console_unlock();
/* this may free fb info */
put_fb_info(fb_info);
return 0;
}
int unlink_framebuffer(struct fb_info *fb_info)
{
int i;
i = fb_info->node;
if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
return -EINVAL;
if (fb_info->dev) {
device_destroy(fb_class, MKDEV(FB_MAJOR, i));
fb_info->dev = NULL;
}
return 0;
}
EXPORT_SYMBOL(unlink_framebuffer);
void remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary)
{
mutex_lock(®istration_lock);
do_remove_conflicting_framebuffers(a, name, primary);
mutex_unlock(®istration_lock);
}
EXPORT_SYMBOL(remove_conflicting_framebuffers);
/**
* register_framebuffer - registers a frame buffer device
* @fb_info: frame buffer info structure
*
* Registers a frame buffer device @fb_info.
*
* Returns negative errno on error, or zero for success.
*
*/
int
register_framebuffer(struct fb_info *fb_info)
{
int ret;
mutex_lock(®istration_lock);
ret = do_register_framebuffer(fb_info);
mutex_unlock(®istration_lock);
return ret;
}
/**
* unregister_framebuffer - releases a frame buffer device
* @fb_info: frame buffer info structure
*
* Unregisters a frame buffer device @fb_info.
*
* Returns negative errno on error, or zero for success.
*
* This function will also notify the framebuffer console
* to release the driver.
*
* This is meant to be called within a driver's module_exit()
* function. If this is called outside module_exit(), ensure
* that the driver implements fb_open() and fb_release() to
* check that no processes are using the device.
*/
int
unregister_framebuffer(struct fb_info *fb_info)
{
int ret;
mutex_lock(®istration_lock);
ret = do_unregister_framebuffer(fb_info);
mutex_unlock(®istration_lock);
return ret;
}
/**
* fb_set_suspend - low level driver signals suspend
* @info: framebuffer affected
* @state: 0 = resuming, !=0 = suspending
*
* This is meant to be used by low level drivers to
* signal suspend/resume to the core & clients.
* It must be called with the console semaphore held
*/
void fb_set_suspend(struct fb_info *info, int state)
{
struct fb_event event;
event.info = info;
if (state) {
fb_notifier_call_chain(FB_EVENT_SUSPEND, &event);
info->state = FBINFO_STATE_SUSPENDED;
} else {
info->state = FBINFO_STATE_RUNNING;
fb_notifier_call_chain(FB_EVENT_RESUME, &event);
}
}
/**
* fbmem_init - init frame buffer subsystem
*
* Initialize the frame buffer subsystem.
*
* NOTE: This function is _only_ to be called by drivers/char/mem.c.
*
*/
static int __init
fbmem_init(void)
{
proc_create("fb", 0, NULL, &fb_proc_fops);
if (register_chrdev(FB_MAJOR,"fb",&fb_fops))
printk("unable to get major %d for fb devs\n", FB_MAJOR);
fb_class = class_create(THIS_MODULE, "graphics");
if (IS_ERR(fb_class)) {
printk(KERN_WARNING "Unable to create fb class; errno = %ld\n", PTR_ERR(fb_class));
fb_class = NULL;
}
return 0;
}
#ifdef MODULE
module_init(fbmem_init);
static void __exit
fbmem_exit(void)
{
remove_proc_entry("fb", NULL);
class_destroy(fb_class);
unregister_chrdev(FB_MAJOR, "fb");
}
module_exit(fbmem_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Framebuffer base");
#else
subsys_initcall(fbmem_init);
#endif
int fb_new_modelist(struct fb_info *info)
{
struct fb_event event;
struct fb_var_screeninfo var = info->var;
struct list_head *pos, *n;
struct fb_modelist *modelist;
struct fb_videomode *m, mode;
int err = 1;
list_for_each_safe(pos, n, &info->modelist) {
modelist = list_entry(pos, struct fb_modelist, list);
m = &modelist->mode;
fb_videomode_to_var(&var, m);
var.activate = FB_ACTIVATE_TEST;
err = fb_set_var(info, &var);
fb_var_to_videomode(&mode, &var);
if (err || !fb_mode_is_equal(m, &mode)) {
list_del(pos);
kfree(pos);
}
}
err = 1;
if (!list_empty(&info->modelist)) {
event.info = info;
err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
}
return err;
}
static char *video_options[FB_MAX] __read_mostly;
static int ofonly __read_mostly;
/**
* fb_get_options - get kernel boot parameters
* @name: framebuffer name as it would appear in
* the boot parameter line
* (video=<name>:<options>)
* @option: the option will be stored here
*
* NOTE: Needed to maintain backwards compatibility
*/
int fb_get_options(char *name, char **option)
{
char *opt, *options = NULL;
int retval = 0;
int name_len = strlen(name), i;
if (name_len && ofonly && strncmp(name, "offb", 4))
retval = 1;
if (name_len && !retval) {
for (i = 0; i < FB_MAX; i++) {
if (video_options[i] == NULL)
continue;
if (!video_options[i][0])
continue;
opt = video_options[i];
if (!strncmp(name, opt, name_len) &&
opt[name_len] == ':')
options = opt + name_len + 1;
}
}
if (options && !strncmp(options, "off", 3))
retval = 1;
if (option)
*option = options;
return retval;
}
#ifndef MODULE
/**
* video_setup - process command line options
* @options: string of options
*
* Process command line options for frame buffer subsystem.
*
* NOTE: This function is a __setup and __init function.
* It only stores the options. Drivers have to call
* fb_get_options() as necessary.
*
* Returns zero.
*
*/
static int __init video_setup(char *options)
{
int i, global = 0;
if (!options || !*options)
global = 1;
if (!global && !strncmp(options, "ofonly", 6)) {
ofonly = 1;
global = 1;
}
if (!global && !strchr(options, ':')) {
fb_mode_option = options;
global = 1;
}
if (!global) {
for (i = 0; i < FB_MAX; i++) {
if (video_options[i] == NULL) {
video_options[i] = options;
break;
}
}
}
return 1;
}
__setup("video=", video_setup);
#endif
/*
* Visible symbols for modules
*/
EXPORT_SYMBOL(register_framebuffer);
EXPORT_SYMBOL(unregister_framebuffer);
EXPORT_SYMBOL(num_registered_fb);
EXPORT_SYMBOL(registered_fb);
EXPORT_SYMBOL(fb_show_logo);
EXPORT_SYMBOL(fb_set_var);
EXPORT_SYMBOL(fb_blank);
EXPORT_SYMBOL(fb_pan_display);
EXPORT_SYMBOL(fb_get_buffer_offset);
EXPORT_SYMBOL(fb_set_suspend);
EXPORT_SYMBOL(fb_get_options);
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5669_0 |
crossvul-cpp_data_bad_2020_10 | /*-------------------------------------------------------------------------
*
* tsquery_util.c
* Utilities for tsquery datatype
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
*
*
* IDENTIFICATION
* src/backend/utils/adt/tsquery_util.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "tsearch/ts_utils.h"
#include "miscadmin.h"
QTNode *
QT2QTN(QueryItem *in, char *operand)
{
QTNode *node = (QTNode *) palloc0(sizeof(QTNode));
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
node->valnode = in;
if (in->type == QI_OPR)
{
node->child = (QTNode **) palloc0(sizeof(QTNode *) * 2);
node->child[0] = QT2QTN(in + 1, operand);
node->sign = node->child[0]->sign;
if (in->qoperator.oper == OP_NOT)
node->nchild = 1;
else
{
node->nchild = 2;
node->child[1] = QT2QTN(in + in->qoperator.left, operand);
node->sign |= node->child[1]->sign;
}
}
else if (operand)
{
node->word = operand + in->qoperand.distance;
node->sign = ((uint32) 1) << (((unsigned int) in->qoperand.valcrc) % 32);
}
return node;
}
void
QTNFree(QTNode *in)
{
if (!in)
return;
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
if (in->valnode->type == QI_VAL && in->word && (in->flags & QTN_WORDFREE) != 0)
pfree(in->word);
if (in->child)
{
if (in->valnode)
{
if (in->valnode->type == QI_OPR && in->nchild > 0)
{
int i;
for (i = 0; i < in->nchild; i++)
QTNFree(in->child[i]);
}
if (in->flags & QTN_NEEDFREE)
pfree(in->valnode);
}
pfree(in->child);
}
pfree(in);
}
int
QTNodeCompare(QTNode *an, QTNode *bn)
{
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
if (an->valnode->type != bn->valnode->type)
return (an->valnode->type > bn->valnode->type) ? -1 : 1;
if (an->valnode->type == QI_OPR)
{
QueryOperator *ao = &an->valnode->qoperator;
QueryOperator *bo = &bn->valnode->qoperator;
if (ao->oper != bo->oper)
return (ao->oper > bo->oper) ? -1 : 1;
if (an->nchild != bn->nchild)
return (an->nchild > bn->nchild) ? -1 : 1;
{
int i,
res;
for (i = 0; i < an->nchild; i++)
if ((res = QTNodeCompare(an->child[i], bn->child[i])) != 0)
return res;
}
return 0;
}
else if (an->valnode->type == QI_VAL)
{
QueryOperand *ao = &an->valnode->qoperand;
QueryOperand *bo = &bn->valnode->qoperand;
if (ao->valcrc != bo->valcrc)
{
return (ao->valcrc > bo->valcrc) ? -1 : 1;
}
return tsCompareString(an->word, ao->length, bn->word, bo->length, false);
}
else
{
elog(ERROR, "unrecognized QueryItem type: %d", an->valnode->type);
return 0; /* keep compiler quiet */
}
}
static int
cmpQTN(const void *a, const void *b)
{
return QTNodeCompare(*(QTNode *const *) a, *(QTNode *const *) b);
}
void
QTNSort(QTNode *in)
{
int i;
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
if (in->valnode->type != QI_OPR)
return;
for (i = 0; i < in->nchild; i++)
QTNSort(in->child[i]);
if (in->nchild > 1)
qsort((void *) in->child, in->nchild, sizeof(QTNode *), cmpQTN);
}
bool
QTNEq(QTNode *a, QTNode *b)
{
uint32 sign = a->sign & b->sign;
if (!(sign == a->sign && sign == b->sign))
return 0;
return (QTNodeCompare(a, b) == 0) ? true : false;
}
/*
* Remove unnecessary intermediate nodes. For example:
*
* OR OR
* a OR -> a b c
* b c
*/
void
QTNTernary(QTNode *in)
{
int i;
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
if (in->valnode->type != QI_OPR)
return;
for (i = 0; i < in->nchild; i++)
QTNTernary(in->child[i]);
for (i = 0; i < in->nchild; i++)
{
QTNode *cc = in->child[i];
if (cc->valnode->type == QI_OPR && in->valnode->qoperator.oper == cc->valnode->qoperator.oper)
{
int oldnchild = in->nchild;
in->nchild += cc->nchild - 1;
in->child = (QTNode **) repalloc(in->child, in->nchild * sizeof(QTNode *));
if (i + 1 != oldnchild)
memmove(in->child + i + cc->nchild, in->child + i + 1,
(oldnchild - i - 1) * sizeof(QTNode *));
memcpy(in->child + i, cc->child, cc->nchild * sizeof(QTNode *));
i += cc->nchild - 1;
if (cc->flags & QTN_NEEDFREE)
pfree(cc->valnode);
pfree(cc);
}
}
}
/*
* Convert a tree to binary tree by inserting intermediate nodes.
* (Opposite of QTNTernary)
*/
void
QTNBinary(QTNode *in)
{
int i;
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
if (in->valnode->type != QI_OPR)
return;
for (i = 0; i < in->nchild; i++)
QTNBinary(in->child[i]);
if (in->nchild <= 2)
return;
while (in->nchild > 2)
{
QTNode *nn = (QTNode *) palloc0(sizeof(QTNode));
nn->valnode = (QueryItem *) palloc0(sizeof(QueryItem));
nn->child = (QTNode **) palloc0(sizeof(QTNode *) * 2);
nn->nchild = 2;
nn->flags = QTN_NEEDFREE;
nn->child[0] = in->child[0];
nn->child[1] = in->child[1];
nn->sign = nn->child[0]->sign | nn->child[1]->sign;
nn->valnode->type = in->valnode->type;
nn->valnode->qoperator.oper = in->valnode->qoperator.oper;
in->child[0] = nn;
in->child[1] = in->child[in->nchild - 1];
in->nchild--;
}
}
/*
* Count the total length of operand string in tree, including '\0'-
* terminators.
*/
static void
cntsize(QTNode *in, int *sumlen, int *nnode)
{
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
*nnode += 1;
if (in->valnode->type == QI_OPR)
{
int i;
for (i = 0; i < in->nchild; i++)
cntsize(in->child[i], sumlen, nnode);
}
else
{
*sumlen += in->valnode->qoperand.length + 1;
}
}
typedef struct
{
QueryItem *curitem;
char *operand;
char *curoperand;
} QTN2QTState;
static void
fillQT(QTN2QTState *state, QTNode *in)
{
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
if (in->valnode->type == QI_VAL)
{
memcpy(state->curitem, in->valnode, sizeof(QueryOperand));
memcpy(state->curoperand, in->word, in->valnode->qoperand.length);
state->curitem->qoperand.distance = state->curoperand - state->operand;
state->curoperand[in->valnode->qoperand.length] = '\0';
state->curoperand += in->valnode->qoperand.length + 1;
state->curitem++;
}
else
{
QueryItem *curitem = state->curitem;
Assert(in->valnode->type == QI_OPR);
memcpy(state->curitem, in->valnode, sizeof(QueryOperator));
Assert(in->nchild <= 2);
state->curitem++;
fillQT(state, in->child[0]);
if (in->nchild == 2)
{
curitem->qoperator.left = state->curitem - curitem;
fillQT(state, in->child[1]);
}
}
}
TSQuery
QTN2QT(QTNode *in)
{
TSQuery out;
int len;
int sumlen = 0,
nnode = 0;
QTN2QTState state;
cntsize(in, &sumlen, &nnode);
len = COMPUTESIZE(nnode, sumlen);
out = (TSQuery) palloc0(len);
SET_VARSIZE(out, len);
out->size = nnode;
state.curitem = GETQUERY(out);
state.operand = state.curoperand = GETOPERAND(out);
fillQT(&state, in);
return out;
}
QTNode *
QTNCopy(QTNode *in)
{
QTNode *out;
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
out = (QTNode *) palloc(sizeof(QTNode));
*out = *in;
out->valnode = (QueryItem *) palloc(sizeof(QueryItem));
*(out->valnode) = *(in->valnode);
out->flags |= QTN_NEEDFREE;
if (in->valnode->type == QI_VAL)
{
out->word = palloc(in->valnode->qoperand.length + 1);
memcpy(out->word, in->word, in->valnode->qoperand.length);
out->word[in->valnode->qoperand.length] = '\0';
out->flags |= QTN_WORDFREE;
}
else
{
int i;
out->child = (QTNode **) palloc(sizeof(QTNode *) * in->nchild);
for (i = 0; i < in->nchild; i++)
out->child[i] = QTNCopy(in->child[i]);
}
return out;
}
void
QTNClearFlags(QTNode *in, uint32 flags)
{
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
in->flags &= ~flags;
if (in->valnode->type != QI_VAL)
{
int i;
for (i = 0; i < in->nchild; i++)
QTNClearFlags(in->child[i], flags);
}
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2020_10 |
crossvul-cpp_data_good_5817_0 | /*
* Go2Webinar decoder
* Copyright (c) 2012 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Go2Webinar decoder
*/
#include <zlib.h>
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
#include "dsputil.h"
#include "get_bits.h"
#include "internal.h"
#include "mjpeg.h"
enum ChunkType {
FRAME_INFO = 0xC8,
TILE_DATA,
CURSOR_POS,
CURSOR_SHAPE,
CHUNK_CC,
CHUNK_CD
};
enum Compression {
COMPR_EPIC_J_B = 2,
COMPR_KEMPF_J_B,
};
static const uint8_t luma_quant[64] = {
8, 6, 5, 8, 12, 20, 26, 31,
6, 6, 7, 10, 13, 29, 30, 28,
7, 7, 8, 12, 20, 29, 35, 28,
7, 9, 11, 15, 26, 44, 40, 31,
9, 11, 19, 28, 34, 55, 52, 39,
12, 18, 28, 32, 41, 52, 57, 46,
25, 32, 39, 44, 52, 61, 60, 51,
36, 46, 48, 49, 56, 50, 52, 50
};
static const uint8_t chroma_quant[64] = {
9, 9, 12, 24, 50, 50, 50, 50,
9, 11, 13, 33, 50, 50, 50, 50,
12, 13, 28, 50, 50, 50, 50, 50,
24, 33, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50,
};
typedef struct JPGContext {
DSPContext dsp;
ScanTable scantable;
VLC dc_vlc[2], ac_vlc[2];
int prev_dc[3];
DECLARE_ALIGNED(16, int16_t, block)[6][64];
uint8_t *buf;
} JPGContext;
typedef struct G2MContext {
JPGContext jc;
int version;
int compression;
int width, height, bpp;
int tile_width, tile_height;
int tiles_x, tiles_y, tile_x, tile_y;
int got_header;
uint8_t *framebuf;
int framebuf_stride, old_width, old_height;
uint8_t *synth_tile, *jpeg_tile;
int tile_stride, old_tile_w, old_tile_h;
uint8_t *kempf_buf, *kempf_flags;
uint8_t *cursor;
int cursor_stride;
int cursor_fmt;
int cursor_w, cursor_h, cursor_x, cursor_y;
int cursor_hot_x, cursor_hot_y;
} G2MContext;
static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table,
const uint8_t *val_table, int nb_codes,
int is_ac)
{
uint8_t huff_size[256] = { 0 };
uint16_t huff_code[256];
uint16_t huff_sym[256];
int i;
ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
for (i = 0; i < 256; i++)
huff_sym[i] = i + 16 * is_ac;
if (is_ac)
huff_sym[0] = 16 * 256;
return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
huff_code, 2, 2, huff_sym, 2, 2, 0);
}
static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c)
{
int ret;
ret = build_vlc(&c->dc_vlc[0], avpriv_mjpeg_bits_dc_luminance,
avpriv_mjpeg_val_dc, 12, 0);
if (ret)
return ret;
ret = build_vlc(&c->dc_vlc[1], avpriv_mjpeg_bits_dc_chrominance,
avpriv_mjpeg_val_dc, 12, 0);
if (ret)
return ret;
ret = build_vlc(&c->ac_vlc[0], avpriv_mjpeg_bits_ac_luminance,
avpriv_mjpeg_val_ac_luminance, 251, 1);
if (ret)
return ret;
ret = build_vlc(&c->ac_vlc[1], avpriv_mjpeg_bits_ac_chrominance,
avpriv_mjpeg_val_ac_chrominance, 251, 1);
if (ret)
return ret;
ff_dsputil_init(&c->dsp, avctx);
ff_init_scantable(c->dsp.idct_permutation, &c->scantable,
ff_zigzag_direct);
return 0;
}
static av_cold void jpg_free_context(JPGContext *ctx)
{
int i;
for (i = 0; i < 2; i++) {
ff_free_vlc(&ctx->dc_vlc[i]);
ff_free_vlc(&ctx->ac_vlc[i]);
}
av_freep(&ctx->buf);
}
static void jpg_unescape(const uint8_t *src, int src_size,
uint8_t *dst, int *dst_size)
{
const uint8_t *src_end = src + src_size;
uint8_t *dst_start = dst;
while (src < src_end) {
uint8_t x = *src++;
*dst++ = x;
if (x == 0xFF && !*src)
src++;
}
*dst_size = dst - dst_start;
}
static int jpg_decode_block(JPGContext *c, GetBitContext *gb,
int plane, int16_t *block)
{
int dc, val, pos;
const int is_chroma = !!plane;
const uint8_t *qmat = is_chroma ? chroma_quant : luma_quant;
c->dsp.clear_block(block);
dc = get_vlc2(gb, c->dc_vlc[is_chroma].table, 9, 3);
if (dc < 0)
return AVERROR_INVALIDDATA;
if (dc)
dc = get_xbits(gb, dc);
dc = dc * qmat[0] + c->prev_dc[plane];
block[0] = dc;
c->prev_dc[plane] = dc;
pos = 0;
while (pos < 63) {
val = get_vlc2(gb, c->ac_vlc[is_chroma].table, 9, 3);
if (val < 0)
return AVERROR_INVALIDDATA;
pos += val >> 4;
val &= 0xF;
if (pos > 63)
return val ? AVERROR_INVALIDDATA : 0;
if (val) {
int nbits = val;
val = get_xbits(gb, nbits);
val *= qmat[ff_zigzag_direct[pos]];
block[c->scantable.permutated[pos]] = val;
}
}
return 0;
}
static inline void yuv2rgb(uint8_t *out, int Y, int U, int V)
{
out[0] = av_clip_uint8(Y + ( 91881 * V + 32768 >> 16));
out[1] = av_clip_uint8(Y + (-22554 * U - 46802 * V + 32768 >> 16));
out[2] = av_clip_uint8(Y + (116130 * U + 32768 >> 16));
}
static int jpg_decode_data(JPGContext *c, int width, int height,
const uint8_t *src, int src_size,
uint8_t *dst, int dst_stride,
const uint8_t *mask, int mask_stride, int num_mbs,
int swapuv)
{
GetBitContext gb;
uint8_t *tmp;
int mb_w, mb_h, mb_x, mb_y, i, j;
int bx, by;
int unesc_size;
int ret;
tmp = av_realloc(c->buf, src_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!tmp)
return AVERROR(ENOMEM);
c->buf = tmp;
jpg_unescape(src, src_size, c->buf, &unesc_size);
memset(c->buf + unesc_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
init_get_bits(&gb, c->buf, unesc_size * 8);
width = FFALIGN(width, 16);
mb_w = width >> 4;
mb_h = (height + 15) >> 4;
if (!num_mbs)
num_mbs = mb_w * mb_h;
for (i = 0; i < 3; i++)
c->prev_dc[i] = 1024;
bx = by = 0;
for (mb_y = 0; mb_y < mb_h; mb_y++) {
for (mb_x = 0; mb_x < mb_w; mb_x++) {
if (mask && !mask[mb_x]) {
bx += 16;
continue;
}
for (j = 0; j < 2; j++) {
for (i = 0; i < 2; i++) {
if ((ret = jpg_decode_block(c, &gb, 0,
c->block[i + j * 2])) != 0)
return ret;
c->dsp.idct(c->block[i + j * 2]);
}
}
for (i = 1; i < 3; i++) {
if ((ret = jpg_decode_block(c, &gb, i, c->block[i + 3])) != 0)
return ret;
c->dsp.idct(c->block[i + 3]);
}
for (j = 0; j < 16; j++) {
uint8_t *out = dst + bx * 3 + (by + j) * dst_stride;
for (i = 0; i < 16; i++) {
int Y, U, V;
Y = c->block[(j >> 3) * 2 + (i >> 3)][(i & 7) + (j & 7) * 8];
U = c->block[4 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128;
V = c->block[5 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128;
yuv2rgb(out + i * 3, Y, U, V);
}
}
if (!--num_mbs)
return 0;
bx += 16;
}
bx = 0;
by += 16;
if (mask)
mask += mask_stride;
}
return 0;
}
static void kempf_restore_buf(const uint8_t *src, int len,
uint8_t *dst, int stride,
const uint8_t *jpeg_tile, int tile_stride,
int width, int height,
const uint8_t *pal, int npal, int tidx)
{
GetBitContext gb;
int i, j, nb, col;
init_get_bits(&gb, src, len * 8);
if (npal <= 2) nb = 1;
else if (npal <= 4) nb = 2;
else if (npal <= 16) nb = 4;
else nb = 8;
for (j = 0; j < height; j++, dst += stride, jpeg_tile += tile_stride) {
if (get_bits(&gb, 8))
continue;
for (i = 0; i < width; i++) {
col = get_bits(&gb, nb);
if (col != tidx)
memcpy(dst + i * 3, pal + col * 3, 3);
else
memcpy(dst + i * 3, jpeg_tile + i * 3, 3);
}
}
}
static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
const uint8_t *src, int src_size)
{
int width, height;
int hdr, zsize, npal, tidx = -1, ret;
int i, j;
const uint8_t *src_end = src + src_size;
uint8_t pal[768], transp[3];
uLongf dlen = (c->tile_width + 1) * c->tile_height;
int sub_type;
int nblocks, cblocks, bstride;
int bits, bitbuf, coded;
uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 +
tile_y * c->tile_height * c->framebuf_stride;
if (src_size < 2)
return AVERROR_INVALIDDATA;
width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width);
height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height);
hdr = *src++;
sub_type = hdr >> 5;
if (sub_type == 0) {
int j;
memcpy(transp, src, 3);
src += 3;
for (j = 0; j < height; j++, dst += c->framebuf_stride)
for (i = 0; i < width; i++)
memcpy(dst + i * 3, transp, 3);
return 0;
} else if (sub_type == 1) {
return jpg_decode_data(&c->jc, width, height, src, src_end - src,
dst, c->framebuf_stride, NULL, 0, 0, 0);
}
if (sub_type != 2) {
memcpy(transp, src, 3);
src += 3;
}
npal = *src++ + 1;
memcpy(pal, src, npal * 3); src += npal * 3;
if (sub_type != 2) {
for (i = 0; i < npal; i++) {
if (!memcmp(pal + i * 3, transp, 3)) {
tidx = i;
break;
}
}
}
if (src_end - src < 2)
return 0;
zsize = (src[0] << 8) | src[1]; src += 2;
if (src_end - src < zsize + (sub_type != 2))
return AVERROR_INVALIDDATA;
ret = uncompress(c->kempf_buf, &dlen, src, zsize);
if (ret)
return AVERROR_INVALIDDATA;
src += zsize;
if (sub_type == 2) {
kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
NULL, 0, width, height, pal, npal, tidx);
return 0;
}
nblocks = *src++ + 1;
cblocks = 0;
bstride = FFALIGN(width, 16) >> 4;
// blocks are coded LSB and we need normal bitreader for JPEG data
bits = 0;
for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
if (!bits) {
if (src >= src_end)
return AVERROR_INVALIDDATA;
bitbuf = *src++;
bits = 8;
}
coded = bitbuf & 1;
bits--;
bitbuf >>= 1;
cblocks += coded;
if (cblocks > nblocks)
return AVERROR_INVALIDDATA;
c->kempf_flags[j + i * bstride] = coded;
}
}
memset(c->jpeg_tile, 0, c->tile_stride * height);
jpg_decode_data(&c->jc, width, height, src, src_end - src,
c->jpeg_tile, c->tile_stride,
c->kempf_flags, bstride, nblocks, 0);
kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
c->jpeg_tile, c->tile_stride,
width, height, pal, npal, tidx);
return 0;
}
static int g2m_init_buffers(G2MContext *c)
{
int aligned_height;
if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) {
c->framebuf_stride = FFALIGN(c->width * 3, 16);
aligned_height = FFALIGN(c->height, 16);
av_free(c->framebuf);
c->framebuf = av_mallocz(c->framebuf_stride * aligned_height);
if (!c->framebuf)
return AVERROR(ENOMEM);
}
if (!c->synth_tile || !c->jpeg_tile ||
c->old_tile_w < c->tile_width ||
c->old_tile_h < c->tile_height) {
c->tile_stride = FFALIGN(c->tile_width, 16) * 3;
aligned_height = FFALIGN(c->tile_height, 16);
av_free(c->synth_tile);
av_free(c->jpeg_tile);
av_free(c->kempf_buf);
av_free(c->kempf_flags);
c->synth_tile = av_mallocz(c->tile_stride * aligned_height);
c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height);
c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height
+ FF_INPUT_BUFFER_PADDING_SIZE);
c->kempf_flags = av_mallocz( c->tile_width * aligned_height);
if (!c->synth_tile || !c->jpeg_tile ||
!c->kempf_buf || !c->kempf_flags)
return AVERROR(ENOMEM);
}
return 0;
}
static int g2m_load_cursor(AVCodecContext *avctx, G2MContext *c,
GetByteContext *gb)
{
int i, j, k;
uint8_t *dst;
uint32_t bits;
uint32_t cur_size, cursor_w, cursor_h, cursor_stride;
uint32_t cursor_hot_x, cursor_hot_y;
int cursor_fmt;
uint8_t *tmp;
cur_size = bytestream2_get_be32(gb);
cursor_w = bytestream2_get_byte(gb);
cursor_h = bytestream2_get_byte(gb);
cursor_hot_x = bytestream2_get_byte(gb);
cursor_hot_y = bytestream2_get_byte(gb);
cursor_fmt = bytestream2_get_byte(gb);
cursor_stride = FFALIGN(cursor_w, 32) * 4;
if (cursor_w < 1 || cursor_w > 256 ||
cursor_h < 1 || cursor_h > 256) {
av_log(avctx, AV_LOG_ERROR, "Invalid cursor dimensions %dx%d\n",
cursor_w, cursor_h);
return AVERROR_INVALIDDATA;
}
if (cursor_hot_x > cursor_w || cursor_hot_y > cursor_h) {
av_log(avctx, AV_LOG_WARNING, "Invalid hotspot position %d,%d\n",
cursor_hot_x, cursor_hot_y);
cursor_hot_x = FFMIN(cursor_hot_x, cursor_w - 1);
cursor_hot_y = FFMIN(cursor_hot_y, cursor_h - 1);
}
if (cur_size - 9 > bytestream2_get_bytes_left(gb) ||
c->cursor_w * c->cursor_h / 4 > cur_size) {
av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d/%d\n",
cur_size, bytestream2_get_bytes_left(gb));
return AVERROR_INVALIDDATA;
}
if (cursor_fmt != 1 && cursor_fmt != 32) {
avpriv_report_missing_feature(avctx, "Cursor format %d",
cursor_fmt);
return AVERROR_PATCHWELCOME;
}
tmp = av_realloc(c->cursor, cursor_stride * cursor_h);
if (!tmp) {
av_log(avctx, AV_LOG_ERROR, "Cannot allocate cursor buffer\n");
return AVERROR(ENOMEM);
}
c->cursor = tmp;
c->cursor_w = cursor_w;
c->cursor_h = cursor_h;
c->cursor_hot_x = cursor_hot_x;
c->cursor_hot_y = cursor_hot_y;
c->cursor_fmt = cursor_fmt;
c->cursor_stride = cursor_stride;
dst = c->cursor;
switch (c->cursor_fmt) {
case 1: // old monochrome
for (j = 0; j < c->cursor_h; j++) {
for (i = 0; i < c->cursor_w; i += 32) {
bits = bytestream2_get_be32(gb);
for (k = 0; k < 32; k++) {
dst[0] = !!(bits & 0x80000000);
dst += 4;
bits <<= 1;
}
}
}
dst = c->cursor;
for (j = 0; j < c->cursor_h; j++) {
for (i = 0; i < c->cursor_w; i += 32) {
bits = bytestream2_get_be32(gb);
for (k = 0; k < 32; k++) {
int mask_bit = !!(bits & 0x80000000);
switch (dst[0] * 2 + mask_bit) {
case 0:
dst[0] = 0xFF; dst[1] = 0x00;
dst[2] = 0x00; dst[3] = 0x00;
break;
case 1:
dst[0] = 0xFF; dst[1] = 0xFF;
dst[2] = 0xFF; dst[3] = 0xFF;
break;
default:
dst[0] = 0x00; dst[1] = 0x00;
dst[2] = 0x00; dst[3] = 0x00;
}
dst += 4;
bits <<= 1;
}
}
}
break;
case 32: // full colour
/* skip monochrome version of the cursor and decode RGBA instead */
bytestream2_skip(gb, c->cursor_h * (FFALIGN(c->cursor_w, 32) >> 3));
for (j = 0; j < c->cursor_h; j++) {
for (i = 0; i < c->cursor_w; i++) {
int val = bytestream2_get_be32(gb);
*dst++ = val >> 0;
*dst++ = val >> 8;
*dst++ = val >> 16;
*dst++ = val >> 24;
}
}
break;
default:
return AVERROR_PATCHWELCOME;
}
return 0;
}
#define APPLY_ALPHA(src, new, alpha) \
src = (src * (256 - alpha) + new * alpha) >> 8
static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride)
{
int i, j;
int x, y, w, h;
const uint8_t *cursor;
if (!c->cursor)
return;
x = c->cursor_x - c->cursor_hot_x;
y = c->cursor_y - c->cursor_hot_y;
cursor = c->cursor;
w = c->cursor_w;
h = c->cursor_h;
if (x + w > c->width)
w = c->width - x;
if (y + h > c->height)
h = c->height - y;
if (x < 0) {
w += x;
cursor += -x * 4;
} else {
dst += x * 3;
}
if (y < 0) {
h += y;
cursor += -y * c->cursor_stride;
} else {
dst += y * stride;
}
if (w < 0 || h < 0)
return;
for (j = 0; j < h; j++) {
for (i = 0; i < w; i++) {
uint8_t alpha = cursor[i * 4];
APPLY_ALPHA(dst[i * 3 + 0], cursor[i * 4 + 1], alpha);
APPLY_ALPHA(dst[i * 3 + 1], cursor[i * 4 + 2], alpha);
APPLY_ALPHA(dst[i * 3 + 2], cursor[i * 4 + 3], alpha);
}
dst += stride;
cursor += c->cursor_stride;
}
}
static int g2m_decode_frame(AVCodecContext *avctx, void *data,
int *got_picture_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
G2MContext *c = avctx->priv_data;
AVFrame *pic = data;
GetByteContext bc, tbc;
int magic;
int got_header = 0;
uint32_t chunk_size;
int chunk_type;
int i;
int ret;
if (buf_size < 12) {
av_log(avctx, AV_LOG_ERROR,
"Frame should have at least 12 bytes, got %d instead\n",
buf_size);
return AVERROR_INVALIDDATA;
}
bytestream2_init(&bc, buf, buf_size);
magic = bytestream2_get_be32(&bc);
if ((magic & ~0xF) != MKBETAG('G', '2', 'M', '0') ||
(magic & 0xF) < 2 || (magic & 0xF) > 4) {
av_log(avctx, AV_LOG_ERROR, "Wrong magic %08X\n", magic);
return AVERROR_INVALIDDATA;
}
if ((magic & 0xF) != 4) {
av_log(avctx, AV_LOG_ERROR, "G2M2 and G2M3 are not yet supported\n");
return AVERROR(ENOSYS);
}
while (bytestream2_get_bytes_left(&bc) > 5) {
chunk_size = bytestream2_get_le32(&bc) - 1;
chunk_type = bytestream2_get_byte(&bc);
if (chunk_size > bytestream2_get_bytes_left(&bc)) {
av_log(avctx, AV_LOG_ERROR, "Invalid chunk size %d type %02X\n",
chunk_size, chunk_type);
break;
}
switch (chunk_type) {
case FRAME_INFO:
c->got_header = 0;
if (chunk_size < 21) {
av_log(avctx, AV_LOG_ERROR, "Invalid frame info size %d\n",
chunk_size);
break;
}
c->width = bytestream2_get_be32(&bc);
c->height = bytestream2_get_be32(&bc);
if (c->width < 16 || c->width > avctx->width ||
c->height < 16 || c->height > avctx->height) {
av_log(avctx, AV_LOG_ERROR,
"Invalid frame dimensions %dx%d\n",
c->width, c->height);
ret = AVERROR_INVALIDDATA;
goto header_fail;
}
if (c->width != avctx->width || c->height != avctx->height)
avcodec_set_dimensions(avctx, c->width, c->height);
c->compression = bytestream2_get_be32(&bc);
if (c->compression != 2 && c->compression != 3) {
av_log(avctx, AV_LOG_ERROR,
"Unknown compression method %d\n",
c->compression);
return AVERROR_PATCHWELCOME;
}
c->tile_width = bytestream2_get_be32(&bc);
c->tile_height = bytestream2_get_be32(&bc);
if (!c->tile_width || !c->tile_height) {
av_log(avctx, AV_LOG_ERROR,
"Invalid tile dimensions %dx%d\n",
c->tile_width, c->tile_height);
ret = AVERROR_INVALIDDATA;
goto header_fail;
}
c->tiles_x = (c->width + c->tile_width - 1) / c->tile_width;
c->tiles_y = (c->height + c->tile_height - 1) / c->tile_height;
c->bpp = bytestream2_get_byte(&bc);
chunk_size -= 21;
bytestream2_skip(&bc, chunk_size);
if (g2m_init_buffers(c)) {
ret = AVERROR(ENOMEM);
goto header_fail;
}
got_header = 1;
break;
case TILE_DATA:
if (!c->tiles_x || !c->tiles_y) {
av_log(avctx, AV_LOG_WARNING,
"No frame header - skipping tile\n");
bytestream2_skip(&bc, bytestream2_get_bytes_left(&bc));
break;
}
if (chunk_size < 2) {
av_log(avctx, AV_LOG_ERROR, "Invalid tile data size %d\n",
chunk_size);
break;
}
c->tile_x = bytestream2_get_byte(&bc);
c->tile_y = bytestream2_get_byte(&bc);
if (c->tile_x >= c->tiles_x || c->tile_y >= c->tiles_y) {
av_log(avctx, AV_LOG_ERROR,
"Invalid tile pos %d,%d (in %dx%d grid)\n",
c->tile_x, c->tile_y, c->tiles_x, c->tiles_y);
break;
}
chunk_size -= 2;
ret = 0;
switch (c->compression) {
case COMPR_EPIC_J_B:
av_log(avctx, AV_LOG_ERROR,
"ePIC j-b compression is not implemented yet\n");
return AVERROR(ENOSYS);
case COMPR_KEMPF_J_B:
ret = kempf_decode_tile(c, c->tile_x, c->tile_y,
buf + bytestream2_tell(&bc),
chunk_size);
break;
}
if (ret && c->framebuf)
av_log(avctx, AV_LOG_ERROR, "Error decoding tile %d,%d\n",
c->tile_x, c->tile_y);
bytestream2_skip(&bc, chunk_size);
break;
case CURSOR_POS:
if (chunk_size < 5) {
av_log(avctx, AV_LOG_ERROR, "Invalid cursor pos size %d\n",
chunk_size);
break;
}
c->cursor_x = bytestream2_get_be16(&bc);
c->cursor_y = bytestream2_get_be16(&bc);
bytestream2_skip(&bc, chunk_size - 4);
break;
case CURSOR_SHAPE:
if (chunk_size < 8) {
av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d\n",
chunk_size);
break;
}
bytestream2_init(&tbc, buf + bytestream2_tell(&bc),
chunk_size - 4);
g2m_load_cursor(avctx, c, &tbc);
bytestream2_skip(&bc, chunk_size);
break;
case CHUNK_CC:
case CHUNK_CD:
bytestream2_skip(&bc, chunk_size);
break;
default:
av_log(avctx, AV_LOG_WARNING, "Skipping chunk type %02X\n",
chunk_type);
bytestream2_skip(&bc, chunk_size);
}
}
if (got_header)
c->got_header = 1;
if (c->width && c->height && c->framebuf) {
if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
return ret;
pic->key_frame = got_header;
pic->pict_type = got_header ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
for (i = 0; i < avctx->height; i++)
memcpy(pic->data[0] + i * pic->linesize[0],
c->framebuf + i * c->framebuf_stride,
c->width * 3);
g2m_paint_cursor(c, pic->data[0], pic->linesize[0]);
*got_picture_ptr = 1;
}
return buf_size;
header_fail:
c->width = c->height = 0;
c->tiles_x = c->tiles_y = 0;
return ret;
}
static av_cold int g2m_decode_init(AVCodecContext *avctx)
{
G2MContext * const c = avctx->priv_data;
int ret;
if ((ret = jpg_init(avctx, &c->jc)) != 0) {
av_log(avctx, AV_LOG_ERROR, "Cannot initialise VLCs\n");
jpg_free_context(&c->jc);
return AVERROR(ENOMEM);
}
avctx->pix_fmt = AV_PIX_FMT_RGB24;
return 0;
}
static av_cold int g2m_decode_end(AVCodecContext *avctx)
{
G2MContext * const c = avctx->priv_data;
jpg_free_context(&c->jc);
av_freep(&c->kempf_buf);
av_freep(&c->kempf_flags);
av_freep(&c->synth_tile);
av_freep(&c->jpeg_tile);
av_freep(&c->cursor);
av_freep(&c->framebuf);
return 0;
}
AVCodec ff_g2m_decoder = {
.name = "g2m",
.long_name = NULL_IF_CONFIG_SMALL("Go2Meeting"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_G2M,
.priv_data_size = sizeof(G2MContext),
.init = g2m_decode_init,
.close = g2m_decode_end,
.decode = g2m_decode_frame,
.capabilities = CODEC_CAP_DR1,
};
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5817_0 |
crossvul-cpp_data_bad_3689_0 | /****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2005-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/in.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/gfp.h>
#include <linux/cpu_rmap.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
#include "mcdi.h"
#include "workarounds.h"
/**************************************************************************
*
* Type name strings
*
**************************************************************************
*/
/* Loopback mode names (see LOOPBACK_MODE()) */
const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
const char *efx_loopback_mode_names[] = {
[LOOPBACK_NONE] = "NONE",
[LOOPBACK_DATA] = "DATAPATH",
[LOOPBACK_GMAC] = "GMAC",
[LOOPBACK_XGMII] = "XGMII",
[LOOPBACK_XGXS] = "XGXS",
[LOOPBACK_XAUI] = "XAUI",
[LOOPBACK_GMII] = "GMII",
[LOOPBACK_SGMII] = "SGMII",
[LOOPBACK_XGBR] = "XGBR",
[LOOPBACK_XFI] = "XFI",
[LOOPBACK_XAUI_FAR] = "XAUI_FAR",
[LOOPBACK_GMII_FAR] = "GMII_FAR",
[LOOPBACK_SGMII_FAR] = "SGMII_FAR",
[LOOPBACK_XFI_FAR] = "XFI_FAR",
[LOOPBACK_GPHY] = "GPHY",
[LOOPBACK_PHYXS] = "PHYXS",
[LOOPBACK_PCS] = "PCS",
[LOOPBACK_PMAPMD] = "PMA/PMD",
[LOOPBACK_XPORT] = "XPORT",
[LOOPBACK_XGMII_WS] = "XGMII_WS",
[LOOPBACK_XAUI_WS] = "XAUI_WS",
[LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
[LOOPBACK_GMII_WS] = "GMII_WS",
[LOOPBACK_XFI_WS] = "XFI_WS",
[LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
[LOOPBACK_PHYXS_WS] = "PHYXS_WS",
};
const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
const char *efx_reset_type_names[] = {
[RESET_TYPE_INVISIBLE] = "INVISIBLE",
[RESET_TYPE_ALL] = "ALL",
[RESET_TYPE_WORLD] = "WORLD",
[RESET_TYPE_DISABLE] = "DISABLE",
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
[RESET_TYPE_INT_ERROR] = "INT_ERROR",
[RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
[RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
[RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
};
#define EFX_MAX_MTU (9 * 1024)
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
* queued onto this work queue. This is not a per-nic work queue, because
* efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
*/
static struct workqueue_struct *reset_workqueue;
/**************************************************************************
*
* Configurable values
*
*************************************************************************/
/*
* Use separate channels for TX and RX events
*
* Set this to 1 to use separate channels for TX and RX. It allows us
* to control interrupt affinity separately for TX and RX.
*
* This is only used in MSI-X interrupt mode
*/
static unsigned int separate_tx_channels;
module_param(separate_tx_channels, uint, 0444);
MODULE_PARM_DESC(separate_tx_channels,
"Use separate channels for TX and RX");
/* This is the weight assigned to each of the (per-channel) virtual
* NAPI devices.
*/
static int napi_weight = 64;
/* This is the time (in jiffies) between invocations of the hardware
* monitor. On Falcon-based NICs, this will:
* - Check the on-board hardware monitor;
* - Poll the link state and reconfigure the hardware as necessary.
*/
static unsigned int efx_monitor_interval = 1 * HZ;
/* This controls whether or not the driver will initialise devices
* with invalid MAC addresses stored in the EEPROM or flash. If true,
* such devices will be initialised with a random locally-generated
* MAC address. This allows for loading the sfc_mtd driver to
* reprogram the flash, even if the flash contents (including the MAC
* address) have previously been erased.
*/
static unsigned int allow_bad_hwaddr;
/* Initial interrupt moderation settings. They can be modified after
* module load with ethtool.
*
* The default for RX should strike a balance between increasing the
* round-trip latency and reducing overhead.
*/
static unsigned int rx_irq_mod_usec = 60;
/* Initial interrupt moderation settings. They can be modified after
* module load with ethtool.
*
* This default is chosen to ensure that a 10G link does not go idle
* while a TX queue is stopped after it has become full. A queue is
* restarted when it drops below half full. The time this takes (assuming
* worst case 3 descriptors per packet and 1024 descriptors) is
* 512 / 3 * 1.2 = 205 usec.
*/
static unsigned int tx_irq_mod_usec = 150;
/* This is the first interrupt mode to try out of:
* 0 => MSI-X
* 1 => MSI
* 2 => legacy
*/
static unsigned int interrupt_mode;
/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
* i.e. the number of CPUs among which we may distribute simultaneous
* interrupt handling.
*
* Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
* The default (0) means to assign an interrupt to each package (level II cache)
*/
static unsigned int rss_cpus;
module_param(rss_cpus, uint, 0444);
MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
static int phy_flash_cfg;
module_param(phy_flash_cfg, int, 0644);
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
static unsigned irq_adapt_low_thresh = 10000;
module_param(irq_adapt_low_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_low_thresh,
"Threshold score for reducing IRQ moderation");
static unsigned irq_adapt_high_thresh = 20000;
module_param(irq_adapt_high_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_high_thresh,
"Threshold score for increasing IRQ moderation");
static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
NETIF_MSG_TX_ERR | NETIF_MSG_HW);
module_param(debug, uint, 0);
MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
/**************************************************************************
*
* Utility functions and prototypes
*
*************************************************************************/
static void efx_remove_channels(struct efx_nic *efx);
static void efx_remove_port(struct efx_nic *efx);
static void efx_init_napi(struct efx_nic *efx);
static void efx_fini_napi(struct efx_nic *efx);
static void efx_fini_napi_channel(struct efx_channel *channel);
static void efx_fini_struct(struct efx_nic *efx);
static void efx_start_all(struct efx_nic *efx);
static void efx_stop_all(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
if ((efx->state == STATE_RUNNING) || \
(efx->state == STATE_DISABLED)) \
ASSERT_RTNL(); \
} while (0)
/**************************************************************************
*
* Event queue processing
*
*************************************************************************/
/* Process channel's event queue
*
* This function is responsible for processing the event queue of a
* single channel. The caller must guarantee that this function will
* never be concurrently called more than once on the same channel,
* though different channels may be being processed concurrently.
*/
static int efx_process_channel(struct efx_channel *channel, int budget)
{
struct efx_nic *efx = channel->efx;
int spent;
if (unlikely(efx->reset_pending || !channel->enabled))
return 0;
spent = efx_nic_process_eventq(channel, budget);
if (spent == 0)
return 0;
/* Deliver last RX packet. */
if (channel->rx_pkt) {
__efx_rx_packet(channel, channel->rx_pkt,
channel->rx_pkt_csummed);
channel->rx_pkt = NULL;
}
efx_rx_strategy(channel);
efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
return spent;
}
/* Mark channel as finished processing
*
* Note that since we will not receive further interrupts for this
* channel before we finish processing and call the eventq_read_ack()
* method, there is no need to use the interrupt hold-off timers.
*/
static inline void efx_channel_processed(struct efx_channel *channel)
{
/* The interrupt handler for this channel may set work_pending
* as soon as we acknowledge the events we've seen. Make sure
* it's cleared before then. */
channel->work_pending = false;
smp_wmb();
efx_nic_eventq_read_ack(channel);
}
/* NAPI poll handler
*
* NAPI guarantees serialisation of polls of the same device, which
* provides the guarantee required by efx_process_channel().
*/
static int efx_poll(struct napi_struct *napi, int budget)
{
struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str);
struct efx_nic *efx = channel->efx;
int spent;
netif_vdbg(efx, intr, efx->net_dev,
"channel %d NAPI poll executing on CPU %d\n",
channel->channel, raw_smp_processor_id());
spent = efx_process_channel(channel, budget);
if (spent < budget) {
if (channel->channel < efx->n_rx_channels &&
efx->irq_rx_adaptive &&
unlikely(++channel->irq_count == 1000)) {
if (unlikely(channel->irq_mod_score <
irq_adapt_low_thresh)) {
if (channel->irq_moderation > 1) {
channel->irq_moderation -= 1;
efx->type->push_irq_moderation(channel);
}
} else if (unlikely(channel->irq_mod_score >
irq_adapt_high_thresh)) {
if (channel->irq_moderation <
efx->irq_rx_moderation) {
channel->irq_moderation += 1;
efx->type->push_irq_moderation(channel);
}
}
channel->irq_count = 0;
channel->irq_mod_score = 0;
}
efx_filter_rfs_expire(channel);
/* There is no race here; although napi_disable() will
* only wait for napi_complete(), this isn't a problem
* since efx_channel_processed() will have no effect if
* interrupts have already been disabled.
*/
napi_complete(napi);
efx_channel_processed(channel);
}
return spent;
}
/* Process the eventq of the specified channel immediately on this CPU
*
* Disable hardware generated interrupts, wait for any existing
* processing to finish, then directly poll (and ack ) the eventq.
* Finally reenable NAPI and interrupts.
*
* This is for use only during a loopback self-test. It must not
* deliver any packets up the stack as this can result in deadlock.
*/
void efx_process_channel_now(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
BUG_ON(channel->channel >= efx->n_channels);
BUG_ON(!channel->enabled);
BUG_ON(!efx->loopback_selftest);
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
efx->legacy_irq_enabled = false;
}
if (channel->irq)
synchronize_irq(channel->irq);
/* Wait for any NAPI processing to complete */
napi_disable(&channel->napi_str);
/* Poll the channel */
efx_process_channel(channel, channel->eventq_mask + 1);
/* Ack the eventq. This may cause an interrupt to be generated
* when they are reenabled */
efx_channel_processed(channel);
napi_enable(&channel->napi_str);
if (efx->legacy_irq)
efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
}
/* Create event queue
* Event queue memory allocations are done only once. If the channel
* is reset, the memory buffer will be reused; this guards against
* errors during channel reset and also simplifies interrupt handling.
*/
static int efx_probe_eventq(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
unsigned long entries;
netif_dbg(channel->efx, probe, channel->efx->net_dev,
"chan %d create event queue\n", channel->channel);
/* Build an event queue with room for one event per tx and rx buffer,
* plus some extra for link state events and MCDI completions. */
entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
return efx_nic_probe_eventq(channel);
}
/* Prepare channel's event queue */
static void efx_init_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d init event queue\n", channel->channel);
channel->eventq_read_ptr = 0;
efx_nic_init_eventq(channel);
}
static void efx_fini_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d fini event queue\n", channel->channel);
efx_nic_fini_eventq(channel);
}
static void efx_remove_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d remove event queue\n", channel->channel);
efx_nic_remove_eventq(channel);
}
/**************************************************************************
*
* Channel handling
*
*************************************************************************/
/* Allocate and initialise a channel structure, optionally copying
* parameters (but not resources) from an old channel structure. */
static struct efx_channel *
efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
{
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
struct efx_tx_queue *tx_queue;
int j;
if (old_channel) {
channel = kmalloc(sizeof(*channel), GFP_KERNEL);
if (!channel)
return NULL;
*channel = *old_channel;
channel->napi_dev = NULL;
memset(&channel->eventq, 0, sizeof(channel->eventq));
rx_queue = &channel->rx_queue;
rx_queue->buffer = NULL;
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
for (j = 0; j < EFX_TXQ_TYPES; j++) {
tx_queue = &channel->tx_queue[j];
if (tx_queue->channel)
tx_queue->channel = channel;
tx_queue->buffer = NULL;
memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
}
} else {
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
if (!channel)
return NULL;
channel->efx = efx;
channel->channel = i;
for (j = 0; j < EFX_TXQ_TYPES; j++) {
tx_queue = &channel->tx_queue[j];
tx_queue->efx = efx;
tx_queue->queue = i * EFX_TXQ_TYPES + j;
tx_queue->channel = channel;
}
}
rx_queue = &channel->rx_queue;
rx_queue->efx = efx;
setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
(unsigned long)rx_queue);
return channel;
}
static int efx_probe_channel(struct efx_channel *channel)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
int rc;
netif_dbg(channel->efx, probe, channel->efx->net_dev,
"creating channel %d\n", channel->channel);
rc = efx_probe_eventq(channel);
if (rc)
goto fail1;
efx_for_each_channel_tx_queue(tx_queue, channel) {
rc = efx_probe_tx_queue(tx_queue);
if (rc)
goto fail2;
}
efx_for_each_channel_rx_queue(rx_queue, channel) {
rc = efx_probe_rx_queue(rx_queue);
if (rc)
goto fail3;
}
channel->n_rx_frm_trunc = 0;
return 0;
fail3:
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_remove_rx_queue(rx_queue);
fail2:
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue);
fail1:
return rc;
}
static void efx_set_channel_names(struct efx_nic *efx)
{
struct efx_channel *channel;
const char *type = "";
int number;
efx_for_each_channel(channel, efx) {
number = channel->channel;
if (efx->n_channels > efx->n_rx_channels) {
if (channel->channel < efx->n_rx_channels) {
type = "-rx";
} else {
type = "-tx";
number -= efx->n_rx_channels;
}
}
snprintf(efx->channel_name[channel->channel],
sizeof(efx->channel_name[0]),
"%s%s-%d", efx->name, type, number);
}
}
static int efx_probe_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
int rc;
/* Restart special buffer allocation */
efx->next_buffer_table = 0;
efx_for_each_channel(channel, efx) {
rc = efx_probe_channel(channel);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create channel %d\n",
channel->channel);
goto fail;
}
}
efx_set_channel_names(efx);
return 0;
fail:
efx_remove_channels(efx);
return rc;
}
/* Channels are shutdown and reinitialised whilst the NIC is running
* to propagate configuration changes (mtu, checksum offload), or
* to clear hardware error conditions
*/
static void efx_init_channels(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
struct efx_channel *channel;
/* Calculate the rx buffer allocation parameters required to
* support the current MTU, including padding for header
* alignment and overruns.
*/
efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_hash_size +
efx->type->rx_buffer_padding);
efx->rx_buffer_order = get_order(efx->rx_buffer_len +
sizeof(struct efx_rx_page_state));
/* Initialise the channels */
efx_for_each_channel(channel, efx) {
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"init chan %d\n", channel->channel);
efx_init_eventq(channel);
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_init_tx_queue(tx_queue);
/* The rx buffer allocation strategy is MTU dependent */
efx_rx_strategy(channel);
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_init_rx_queue(rx_queue);
WARN_ON(channel->rx_pkt != NULL);
efx_rx_strategy(channel);
}
}
/* This enables event queue processing and packet transmission.
*
* Note that this function is not allowed to fail, since that would
* introduce too much complexity into the suspend/resume path.
*/
static void efx_start_channel(struct efx_channel *channel)
{
struct efx_rx_queue *rx_queue;
netif_dbg(channel->efx, ifup, channel->efx->net_dev,
"starting chan %d\n", channel->channel);
/* The interrupt handler for this channel may set work_pending
* as soon as we enable it. Make sure it's cleared before
* then. Similarly, make sure it sees the enabled flag set. */
channel->work_pending = false;
channel->enabled = true;
smp_wmb();
/* Fill the queues before enabling NAPI */
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fast_push_rx_descriptors(rx_queue);
napi_enable(&channel->napi_str);
}
/* This disables event queue processing and packet transmission.
* This function does not guarantee that all queue processing
* (e.g. RX refill) is complete.
*/
static void efx_stop_channel(struct efx_channel *channel)
{
if (!channel->enabled)
return;
netif_dbg(channel->efx, ifdown, channel->efx->net_dev,
"stop chan %d\n", channel->channel);
channel->enabled = false;
napi_disable(&channel->napi_str);
}
static void efx_fini_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->port_enabled);
rc = efx_nic_flush_queues(efx);
if (rc && EFX_WORKAROUND_7803(efx)) {
/* Schedule a reset to recover from the flush failure. The
* descriptor caches reference memory we're about to free,
* but falcon_reconfigure_mac_wrapper() won't reconnect
* the MACs because of the pending reset. */
netif_err(efx, drv, efx->net_dev,
"Resetting to recover from flush failure\n");
efx_schedule_reset(efx, RESET_TYPE_ALL);
} else if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
} else {
netif_dbg(efx, drv, efx->net_dev,
"successfully flushed all queues\n");
}
efx_for_each_channel(channel, efx) {
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"shut down chan %d\n", channel->channel);
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue);
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_fini_tx_queue(tx_queue);
efx_fini_eventq(channel);
}
}
static void efx_remove_channel(struct efx_channel *channel)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"destroy chan %d\n", channel->channel);
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_remove_rx_queue(rx_queue);
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue);
efx_remove_eventq(channel);
}
static void efx_remove_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
efx_remove_channel(channel);
}
int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
{
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
u32 old_rxq_entries, old_txq_entries;
unsigned i;
int rc;
efx_stop_all(efx);
efx_fini_channels(efx);
/* Clone channels */
memset(other_channel, 0, sizeof(other_channel));
for (i = 0; i < efx->n_channels; i++) {
channel = efx_alloc_channel(efx, i, efx->channel[i]);
if (!channel) {
rc = -ENOMEM;
goto out;
}
other_channel[i] = channel;
}
/* Swap entry counts and channel pointers */
old_rxq_entries = efx->rxq_entries;
old_txq_entries = efx->txq_entries;
efx->rxq_entries = rxq_entries;
efx->txq_entries = txq_entries;
for (i = 0; i < efx->n_channels; i++) {
channel = efx->channel[i];
efx->channel[i] = other_channel[i];
other_channel[i] = channel;
}
rc = efx_probe_channels(efx);
if (rc)
goto rollback;
efx_init_napi(efx);
/* Destroy old channels */
for (i = 0; i < efx->n_channels; i++) {
efx_fini_napi_channel(other_channel[i]);
efx_remove_channel(other_channel[i]);
}
out:
/* Free unused channel structures */
for (i = 0; i < efx->n_channels; i++)
kfree(other_channel[i]);
efx_init_channels(efx);
efx_start_all(efx);
return rc;
rollback:
/* Swap back */
efx->rxq_entries = old_rxq_entries;
efx->txq_entries = old_txq_entries;
for (i = 0; i < efx->n_channels; i++) {
channel = efx->channel[i];
efx->channel[i] = other_channel[i];
other_channel[i] = channel;
}
goto out;
}
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
{
mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
}
/**************************************************************************
*
* Port handling
*
**************************************************************************/
/* This ensures that the kernel is kept informed (via
* netif_carrier_on/off) of the link status, and also maintains the
* link status's stop on the port's TX queue.
*/
void efx_link_status_changed(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
* that no events are triggered between unregister_netdev() and the
* driver unloading. A more general condition is that NETDEV_CHANGE
* can only be generated between NETDEV_UP and NETDEV_DOWN */
if (!netif_running(efx->net_dev))
return;
if (link_state->up != netif_carrier_ok(efx->net_dev)) {
efx->n_link_state_changes++;
if (link_state->up)
netif_carrier_on(efx->net_dev);
else
netif_carrier_off(efx->net_dev);
}
/* Status message for kernel log */
if (link_state->up) {
netif_info(efx, link, efx->net_dev,
"link up at %uMbps %s-duplex (MTU %d)%s\n",
link_state->speed, link_state->fd ? "full" : "half",
efx->net_dev->mtu,
(efx->promiscuous ? " [PROMISC]" : ""));
} else {
netif_info(efx, link, efx->net_dev, "link down\n");
}
}
void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
{
efx->link_advertising = advertising;
if (advertising) {
if (advertising & ADVERTISED_Pause)
efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
else
efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
if (advertising & ADVERTISED_Asym_Pause)
efx->wanted_fc ^= EFX_FC_TX;
}
}
void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
{
efx->wanted_fc = wanted_fc;
if (efx->link_advertising) {
if (wanted_fc & EFX_FC_RX)
efx->link_advertising |= (ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
else
efx->link_advertising &= ~(ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
if (wanted_fc & EFX_FC_TX)
efx->link_advertising ^= ADVERTISED_Asym_Pause;
}
}
static void efx_fini_port(struct efx_nic *efx);
/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
* the MAC appropriately. All other PHY configuration changes are pushed
* through phy_op->set_settings(), and pushed asynchronously to the MAC
* through efx_monitor().
*
* Callers must hold the mac_lock
*/
int __efx_reconfigure_port(struct efx_nic *efx)
{
enum efx_phy_mode phy_mode;
int rc;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
/* Serialise the promiscuous flag with efx_set_multicast_list. */
if (efx_dev_registered(efx)) {
netif_addr_lock_bh(efx->net_dev);
netif_addr_unlock_bh(efx->net_dev);
}
/* Disable PHY transmit in mac level loopbacks */
phy_mode = efx->phy_mode;
if (LOOPBACK_INTERNAL(efx))
efx->phy_mode |= PHY_MODE_TX_DISABLED;
else
efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
rc = efx->type->reconfigure_port(efx);
if (rc)
efx->phy_mode = phy_mode;
return rc;
}
/* Reinitialise the MAC to pick up new PHY settings, even if the port is
* disabled. */
int efx_reconfigure_port(struct efx_nic *efx)
{
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
mutex_lock(&efx->mac_lock);
rc = __efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
return rc;
}
/* Asynchronous work item for changing MAC promiscuity and multicast
* hash. Avoid a drain/rx_ingress enable by reconfiguring the current
* MAC directly. */
static void efx_mac_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
mutex_lock(&efx->mac_lock);
if (efx->port_enabled) {
efx->type->push_multicast_hash(efx);
efx->mac_op->reconfigure(efx);
}
mutex_unlock(&efx->mac_lock);
}
static int efx_probe_port(struct efx_nic *efx)
{
unsigned char *perm_addr;
int rc;
netif_dbg(efx, probe, efx->net_dev, "create port\n");
if (phy_flash_cfg)
efx->phy_mode = PHY_MODE_SPECIAL;
/* Connect up MAC/PHY operations table */
rc = efx->type->probe_port(efx);
if (rc)
return rc;
/* Sanity check MAC address */
perm_addr = efx->net_dev->perm_addr;
if (is_valid_ether_addr(perm_addr)) {
memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
} else {
netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
perm_addr);
if (!allow_bad_hwaddr) {
rc = -EINVAL;
goto err;
}
random_ether_addr(efx->net_dev->dev_addr);
netif_info(efx, probe, efx->net_dev,
"using locally-generated MAC %pM\n",
efx->net_dev->dev_addr);
}
return 0;
err:
efx->type->remove_port(efx);
return rc;
}
static int efx_init_port(struct efx_nic *efx)
{
int rc;
netif_dbg(efx, drv, efx->net_dev, "init port\n");
mutex_lock(&efx->mac_lock);
rc = efx->phy_op->init(efx);
if (rc)
goto fail1;
efx->port_initialized = true;
/* Reconfigure the MAC before creating dma queues (required for
* Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
efx->mac_op->reconfigure(efx);
/* Ensure the PHY advertises the correct flow control settings */
rc = efx->phy_op->reconfigure(efx);
if (rc)
goto fail2;
mutex_unlock(&efx->mac_lock);
return 0;
fail2:
efx->phy_op->fini(efx);
fail1:
mutex_unlock(&efx->mac_lock);
return rc;
}
static void efx_start_port(struct efx_nic *efx)
{
netif_dbg(efx, ifup, efx->net_dev, "start port\n");
BUG_ON(efx->port_enabled);
mutex_lock(&efx->mac_lock);
efx->port_enabled = true;
/* efx_mac_work() might have been scheduled after efx_stop_port(),
* and then cancelled by efx_flush_all() */
efx->type->push_multicast_hash(efx);
efx->mac_op->reconfigure(efx);
mutex_unlock(&efx->mac_lock);
}
/* Prevent efx_mac_work() and efx_monitor() from working */
static void efx_stop_port(struct efx_nic *efx)
{
netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
mutex_lock(&efx->mac_lock);
efx->port_enabled = false;
mutex_unlock(&efx->mac_lock);
/* Serialise against efx_set_multicast_list() */
if (efx_dev_registered(efx)) {
netif_addr_lock_bh(efx->net_dev);
netif_addr_unlock_bh(efx->net_dev);
}
}
static void efx_fini_port(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
if (!efx->port_initialized)
return;
efx->phy_op->fini(efx);
efx->port_initialized = false;
efx->link_state.up = false;
efx_link_status_changed(efx);
}
static void efx_remove_port(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
efx->type->remove_port(efx);
}
/**************************************************************************
*
* NIC handling
*
**************************************************************************/
/* This configures the PCI device to enable I/O and DMA. */
static int efx_init_io(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
int rc;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
rc = pci_enable_device(pci_dev);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to enable PCI device\n");
goto fail1;
}
pci_set_master(pci_dev);
/* Set the PCI DMA mask. Try all possibilities from our
* genuine mask down to 32 bits, because some architectures
* (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
* masks event though they reject 46 bit masks.
*/
while (dma_mask > 0x7fffffffUL) {
if (pci_dma_supported(pci_dev, dma_mask) &&
((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
break;
dma_mask >>= 1;
}
if (rc) {
netif_err(efx, probe, efx->net_dev,
"could not find a suitable DMA mask\n");
goto fail2;
}
netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask);
rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
if (rc) {
/* pci_set_consistent_dma_mask() is not *allowed* to
* fail with a mask that pci_set_dma_mask() accepted,
* but just in case...
*/
netif_err(efx, probe, efx->net_dev,
"failed to set consistent DMA mask\n");
goto fail2;
}
efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
if (rc) {
netif_err(efx, probe, efx->net_dev,
"request for memory BAR failed\n");
rc = -EIO;
goto fail3;
}
efx->membase = ioremap_nocache(efx->membase_phys,
efx->type->mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
(unsigned long long)efx->membase_phys,
efx->type->mem_map_size);
rc = -ENOMEM;
goto fail4;
}
netif_dbg(efx, probe, efx->net_dev,
"memory BAR at %llx+%x (virtual %p)\n",
(unsigned long long)efx->membase_phys,
efx->type->mem_map_size, efx->membase);
return 0;
fail4:
pci_release_region(efx->pci_dev, EFX_MEM_BAR);
fail3:
efx->membase_phys = 0;
fail2:
pci_disable_device(efx->pci_dev);
fail1:
return rc;
}
static void efx_fini_io(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
if (efx->membase) {
iounmap(efx->membase);
efx->membase = NULL;
}
if (efx->membase_phys) {
pci_release_region(efx->pci_dev, EFX_MEM_BAR);
efx->membase_phys = 0;
}
pci_disable_device(efx->pci_dev);
}
/* Get number of channels wanted. Each channel will have its own IRQ,
* 1 RX queue and/or 2 TX queues. */
static int efx_wanted_channels(void)
{
cpumask_var_t core_mask;
int count;
int cpu;
if (rss_cpus)
return rss_cpus;
if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
printk(KERN_WARNING
"sfc: RSS disabled due to allocation failure\n");
return 1;
}
count = 0;
for_each_online_cpu(cpu) {
if (!cpumask_test_cpu(cpu, core_mask)) {
++count;
cpumask_or(core_mask, core_mask,
topology_core_cpumask(cpu));
}
}
free_cpumask_var(core_mask);
return count;
}
static int
efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
{
#ifdef CONFIG_RFS_ACCEL
int i, rc;
efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
if (!efx->net_dev->rx_cpu_rmap)
return -ENOMEM;
for (i = 0; i < efx->n_rx_channels; i++) {
rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
xentries[i].vector);
if (rc) {
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
return rc;
}
}
#endif
return 0;
}
/* Probe the number and type of interrupts we are able to obtain, and
* the resulting numbers of channels and RX queues.
*/
static int efx_probe_interrupts(struct efx_nic *efx)
{
int max_channels =
min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
int rc, i;
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
struct msix_entry xentries[EFX_MAX_CHANNELS];
int n_channels;
n_channels = efx_wanted_channels();
if (separate_tx_channels)
n_channels *= 2;
n_channels = min(n_channels, max_channels);
for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
if (rc > 0) {
netif_err(efx, drv, efx->net_dev,
"WARNING: Insufficient MSI-X vectors"
" available (%d < %d).\n", rc, n_channels);
netif_err(efx, drv, efx->net_dev,
"WARNING: Performance may be reduced.\n");
EFX_BUG_ON_PARANOID(rc >= n_channels);
n_channels = rc;
rc = pci_enable_msix(efx->pci_dev, xentries,
n_channels);
}
if (rc == 0) {
efx->n_channels = n_channels;
if (separate_tx_channels) {
efx->n_tx_channels =
max(efx->n_channels / 2, 1U);
efx->n_rx_channels =
max(efx->n_channels -
efx->n_tx_channels, 1U);
} else {
efx->n_tx_channels = efx->n_channels;
efx->n_rx_channels = efx->n_channels;
}
rc = efx_init_rx_cpu_rmap(efx, xentries);
if (rc) {
pci_disable_msix(efx->pci_dev);
return rc;
}
for (i = 0; i < n_channels; i++)
efx_get_channel(efx, i)->irq =
xentries[i].vector;
} else {
/* Fall back to single channel MSI */
efx->interrupt_mode = EFX_INT_MODE_MSI;
netif_err(efx, drv, efx->net_dev,
"could not enable MSI-X\n");
}
}
/* Try single interrupt MSI */
if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
efx->n_channels = 1;
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
} else {
netif_err(efx, drv, efx->net_dev,
"could not enable MSI\n");
efx->interrupt_mode = EFX_INT_MODE_LEGACY;
}
}
/* Assume legacy interrupts */
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
efx->legacy_irq = efx->pci_dev->irq;
}
return 0;
}
static void efx_remove_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
/* Remove MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx)
channel->irq = 0;
pci_disable_msi(efx->pci_dev);
pci_disable_msix(efx->pci_dev);
/* Remove legacy interrupt */
efx->legacy_irq = 0;
}
static void efx_set_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
efx->tx_channel_offset =
separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
/* We need to adjust the TX queue numbers if we have separate
* RX-only and TX-only channels.
*/
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel)
tx_queue->queue -= (efx->tx_channel_offset *
EFX_TXQ_TYPES);
}
}
static int efx_probe_nic(struct efx_nic *efx)
{
size_t i;
int rc;
netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
/* Carry out hardware-type specific initialisation */
rc = efx->type->probe(efx);
if (rc)
return rc;
/* Determine the number of channels and queues by trying to hook
* in MSI-X interrupts. */
rc = efx_probe_interrupts(efx);
if (rc)
goto fail;
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
efx->rx_indir_table[i] = i % efx->n_rx_channels;
efx_set_channels(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
/* Initialise the interrupt moderation settings */
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
true);
return 0;
fail:
efx->type->remove(efx);
return rc;
}
static void efx_remove_nic(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
efx_remove_interrupts(efx);
efx->type->remove(efx);
}
/**************************************************************************
*
* NIC startup/shutdown
*
*************************************************************************/
static int efx_probe_all(struct efx_nic *efx)
{
int rc;
rc = efx_probe_nic(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
goto fail1;
}
rc = efx_probe_port(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to create port\n");
goto fail2;
}
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
rc = efx_probe_channels(efx);
if (rc)
goto fail3;
rc = efx_probe_filters(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create filter tables\n");
goto fail4;
}
return 0;
fail4:
efx_remove_channels(efx);
fail3:
efx_remove_port(efx);
fail2:
efx_remove_nic(efx);
fail1:
return rc;
}
/* Called after previous invocation(s) of efx_stop_all, restarts the
* port, kernel transmit queue, NAPI processing and hardware interrupts,
* and ensures that the port is scheduled to be reconfigured.
* This function is safe to call multiple times when the NIC is in any
* state. */
static void efx_start_all(struct efx_nic *efx)
{
struct efx_channel *channel;
EFX_ASSERT_RESET_SERIALISED(efx);
/* Check that it is appropriate to restart the interface. All
* of these flags are safe to read under just the rtnl lock */
if (efx->port_enabled)
return;
if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
return;
if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
return;
/* Mark the port as enabled so port reconfigurations can start, then
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
if (efx_dev_registered(efx) && netif_device_present(efx->net_dev))
netif_tx_wake_all_queues(efx->net_dev);
efx_for_each_channel(channel, efx)
efx_start_channel(channel);
if (efx->legacy_irq)
efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
/* Switch to event based MCDI completions after enabling interrupts.
* If a reset has been scheduled, then we need to stay in polled mode.
* Rather than serialising efx_mcdi_mode_event() [which sleeps] and
* reset_pending [modified from an atomic context], we instead guarantee
* that efx_mcdi_mode_poll() isn't reverted erroneously */
efx_mcdi_mode_event(efx);
if (efx->reset_pending)
efx_mcdi_mode_poll(efx);
/* Start the hardware monitor if there is one. Otherwise (we're link
* event driven), we have to poll the PHY because after an event queue
* flush, we could have a missed a link state change */
if (efx->type->monitor != NULL) {
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval);
} else {
mutex_lock(&efx->mac_lock);
if (efx->phy_op->poll(efx))
efx_link_status_changed(efx);
mutex_unlock(&efx->mac_lock);
}
efx->type->start_stats(efx);
}
/* Flush all delayed work. Should only be called when no more delayed work
* will be scheduled. This doesn't flush pending online resets (efx_reset),
* since we're holding the rtnl_lock at this point. */
static void efx_flush_all(struct efx_nic *efx)
{
/* Make sure the hardware monitor is stopped */
cancel_delayed_work_sync(&efx->monitor_work);
/* Stop scheduled port reconfigurations */
cancel_work_sync(&efx->mac_work);
}
/* Quiesce hardware and software without bringing the link down.
* Safe to call multiple times, when the nic and interface is in any
* state. The caller is guaranteed to subsequently be in a position
* to modify any hardware and software state they see fit without
* taking locks. */
static void efx_stop_all(struct efx_nic *efx)
{
struct efx_channel *channel;
EFX_ASSERT_RESET_SERIALISED(efx);
/* port_enabled can be read safely under the rtnl lock */
if (!efx->port_enabled)
return;
efx->type->stop_stats(efx);
/* Switch to MCDI polling on Siena before disabling interrupts */
efx_mcdi_mode_poll(efx);
/* Disable interrupts and wait for ISR to complete */
efx_nic_disable_interrupts(efx);
if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
efx->legacy_irq_enabled = false;
}
efx_for_each_channel(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
}
/* Stop all NAPI processing and synchronous rx refills */
efx_for_each_channel(channel, efx)
efx_stop_channel(channel);
/* Stop all asynchronous port reconfigurations. Since all
* event processing has already been stopped, there is no
* window to loose phy events */
efx_stop_port(efx);
/* Flush efx_mac_work(), refill_workqueue, monitor_work */
efx_flush_all(efx);
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
if (efx_dev_registered(efx)) {
netif_tx_stop_all_queues(efx->net_dev);
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
}
static void efx_remove_all(struct efx_nic *efx)
{
efx_remove_filters(efx);
efx_remove_channels(efx);
efx_remove_port(efx);
efx_remove_nic(efx);
}
/**************************************************************************
*
* Interrupt moderation
*
**************************************************************************/
static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int resolution)
{
if (usecs == 0)
return 0;
if (usecs < resolution)
return 1; /* never round down to 0 */
return usecs / resolution;
}
/* Set interrupt moderation parameters */
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx)
{
struct efx_channel *channel;
unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
EFX_ASSERT_RESET_SERIALISED(efx);
if (tx_ticks > EFX_IRQ_MOD_MAX || rx_ticks > EFX_IRQ_MOD_MAX)
return -EINVAL;
if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
!rx_may_override_tx) {
netif_err(efx, drv, efx->net_dev, "Channels are shared. "
"RX and TX IRQ moderation must be equal\n");
return -EINVAL;
}
efx->irq_rx_adaptive = rx_adaptive;
efx->irq_rx_moderation = rx_ticks;
efx_for_each_channel(channel, efx) {
if (efx_channel_has_rx_queue(channel))
channel->irq_moderation = rx_ticks;
else if (efx_channel_has_tx_queues(channel))
channel->irq_moderation = tx_ticks;
}
return 0;
}
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive)
{
*rx_adaptive = efx->irq_rx_adaptive;
*rx_usecs = efx->irq_rx_moderation * EFX_IRQ_MOD_RESOLUTION;
/* If channels are shared between RX and TX, so is IRQ
* moderation. Otherwise, IRQ moderation is the same for all
* TX channels and is not adaptive.
*/
if (efx->tx_channel_offset == 0)
*tx_usecs = *rx_usecs;
else
*tx_usecs =
efx->channel[efx->tx_channel_offset]->irq_moderation *
EFX_IRQ_MOD_RESOLUTION;
}
/**************************************************************************
*
* Hardware monitor
*
**************************************************************************/
/* Run periodically off the general workqueue */
static void efx_monitor(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic,
monitor_work.work);
netif_vdbg(efx, timer, efx->net_dev,
"hardware monitor executing on CPU %d\n",
raw_smp_processor_id());
BUG_ON(efx->type->monitor == NULL);
/* If the mac_lock is already held then it is likely a port
* reconfiguration is already in place, which will likely do
* most of the work of monitor() anyway. */
if (mutex_trylock(&efx->mac_lock)) {
if (efx->port_enabled)
efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock);
}
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval);
}
/**************************************************************************
*
* ioctls
*
*************************************************************************/
/* Net device ioctl
* Context: process, rtnl_lock() held.
*/
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr);
EFX_ASSERT_RESET_SERIALISED(efx);
/* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
(data->phy_id & 0xfc00) == 0x0400)
data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
return mdio_mii_ioctl(&efx->mdio, data, cmd);
}
/**************************************************************************
*
* NAPI interface
*
**************************************************************************/
static void efx_init_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_for_each_channel(channel, efx) {
channel->napi_dev = efx->net_dev;
netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight);
}
}
static void efx_fini_napi_channel(struct efx_channel *channel)
{
if (channel->napi_dev)
netif_napi_del(&channel->napi_str);
channel->napi_dev = NULL;
}
static void efx_fini_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
efx_fini_napi_channel(channel);
}
/**************************************************************************
*
* Kernel netpoll interface
*
*************************************************************************/
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Although in the common case interrupts will be disabled, this is not
* guaranteed. However, all our work happens inside the NAPI callback,
* so no locking is required.
*/
static void efx_netpoll(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
efx_schedule_channel(channel);
}
#endif
/**************************************************************************
*
* Kernel net device interface
*
*************************************************************************/
/* Context: process, rtnl_lock() held. */
static int efx_net_open(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
EFX_ASSERT_RESET_SERIALISED(efx);
netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
raw_smp_processor_id());
if (efx->state == STATE_DISABLED)
return -EIO;
if (efx->phy_mode & PHY_MODE_SPECIAL)
return -EBUSY;
if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
return -EIO;
/* Notify the kernel of the link state polled during driver load,
* before the monitor starts running */
efx_link_status_changed(efx);
efx_start_all(efx);
return 0;
}
/* Context: process, rtnl_lock() held.
* Note that the kernel will ignore our return code; this method
* should really be a void.
*/
static int efx_net_stop(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
if (efx->state != STATE_DISABLED) {
/* Stop the device and flush all the channels */
efx_stop_all(efx);
efx_fini_channels(efx);
efx_init_channels(efx);
}
return 0;
}
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_mac_stats *mac_stats = &efx->mac_stats;
spin_lock_bh(&efx->stats_lock);
efx->type->update_stats(efx);
spin_unlock_bh(&efx->stats_lock);
stats->rx_packets = mac_stats->rx_packets;
stats->tx_packets = mac_stats->tx_packets;
stats->rx_bytes = mac_stats->rx_bytes;
stats->tx_bytes = mac_stats->tx_bytes;
stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
stats->multicast = mac_stats->rx_multicast;
stats->collisions = mac_stats->tx_collision;
stats->rx_length_errors = (mac_stats->rx_gtjumbo +
mac_stats->rx_length_error);
stats->rx_crc_errors = mac_stats->rx_bad;
stats->rx_frame_errors = mac_stats->rx_align_error;
stats->rx_fifo_errors = mac_stats->rx_overflow;
stats->rx_missed_errors = mac_stats->rx_missed;
stats->tx_window_errors = mac_stats->tx_late_collision;
stats->rx_errors = (stats->rx_length_errors +
stats->rx_crc_errors +
stats->rx_frame_errors +
mac_stats->rx_symbol_error);
stats->tx_errors = (stats->tx_window_errors +
mac_stats->tx_bad);
return stats;
}
/* Context: netif_tx_lock held, BHs disabled. */
static void efx_watchdog(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
netif_err(efx, tx_err, efx->net_dev,
"TX stuck with port_enabled=%d: resetting channels\n",
efx->port_enabled);
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
/* Context: process, rtnl_lock() held. */
static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc = 0;
EFX_ASSERT_RESET_SERIALISED(efx);
if (new_mtu > EFX_MAX_MTU)
return -EINVAL;
efx_stop_all(efx);
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
efx_fini_channels(efx);
mutex_lock(&efx->mac_lock);
/* Reconfigure the MAC before enabling the dma queues so that
* the RX buffers don't overflow */
net_dev->mtu = new_mtu;
efx->mac_op->reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_init_channels(efx);
efx_start_all(efx);
return rc;
}
static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data;
char *new_addr = addr->sa_data;
EFX_ASSERT_RESET_SERIALISED(efx);
if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev,
"invalid ethernet MAC address requested: %pM\n",
new_addr);
return -EINVAL;
}
memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
/* Reconfigure the MAC */
mutex_lock(&efx->mac_lock);
efx->mac_op->reconfigure(efx);
mutex_unlock(&efx->mac_lock);
return 0;
}
/* Context: netif_addr_lock held, BHs disabled. */
static void efx_set_multicast_list(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct netdev_hw_addr *ha;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
u32 crc;
int bit;
efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
/* Build multicast hash table */
if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
netdev_for_each_mc_addr(ha, net_dev) {
crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
set_bit_le(bit, mc_hash->byte);
}
/* Broadcast packets go through the multicast hash filter.
* ether_crc_le() of the broadcast address is 0xbe2612ff
* so we always add bit 0xff to the mask.
*/
set_bit_le(0xff, mc_hash->byte);
}
if (efx->port_enabled)
queue_work(efx->workqueue, &efx->mac_work);
/* Otherwise efx_start_port() will do this */
}
static int efx_set_features(struct net_device *net_dev, u32 data)
{
struct efx_nic *efx = netdev_priv(net_dev);
/* If disabling RX n-tuple filtering, clear existing filters */
if (net_dev->features & ~data & NETIF_F_NTUPLE)
efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
return 0;
}
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
.ndo_get_stats64 = efx_net_stats,
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_multicast_list,
.ndo_set_features = efx_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
#endif
.ndo_setup_tc = efx_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
};
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
efx_mtd_rename(efx);
efx_set_channel_names(efx);
}
static int efx_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *net_dev = ptr;
if (net_dev->netdev_ops == &efx_netdev_ops &&
event == NETDEV_CHANGENAME)
efx_update_name(netdev_priv(net_dev));
return NOTIFY_DONE;
}
static struct notifier_block efx_netdev_notifier = {
.notifier_call = efx_netdev_event,
};
static ssize_t
show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%d\n", efx->phy_type);
}
static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
struct efx_channel *channel;
int rc;
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &efx_netdev_ops;
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
/* Clear MAC statistics */
efx->mac_op->update_stats(efx);
memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
rtnl_lock();
rc = dev_alloc_name(net_dev, net_dev->name);
if (rc < 0)
goto fail_locked;
efx_update_name(efx);
rc = register_netdevice(net_dev);
if (rc)
goto fail_locked;
efx_for_each_channel(channel, efx) {
struct efx_tx_queue *tx_queue;
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_init_tx_queue_core_txq(tx_queue);
}
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(efx->net_dev);
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to init net dev attributes\n");
goto fail_registered;
}
return 0;
fail_locked:
rtnl_unlock();
netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
return rc;
fail_registered:
unregister_netdev(net_dev);
return rc;
}
static void efx_unregister_netdev(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
if (!efx->net_dev)
return;
BUG_ON(netdev_priv(efx->net_dev) != efx);
/* Free up any skbs still remaining. This has to happen before
* we try to unregister the netdev as running their destructors
* may be needed to get the device ref. count to 0. */
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_release_tx_buffers(tx_queue);
}
if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
}
}
/**************************************************************************
*
* Device reset and suspend
*
**************************************************************************/
/* Tears down the entire software state and most of the hardware state
* before reset. */
void efx_reset_down(struct efx_nic *efx, enum reset_type method)
{
EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
efx_fini_channels(efx);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
efx->phy_op->fini(efx);
efx->type->fini(efx);
}
/* This function will always ensure that the locks acquired in
* efx_reset_down() are released. A failure return code indicates
* that we were unable to reinitialise the hardware, and the
* driver should be disabled. If ok is false, then the rx and tx
* engines are not restarted, pending a RESET_DISABLE. */
int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
{
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
rc = efx->type->init(efx);
if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
goto fail;
}
if (!ok)
goto fail;
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
rc = efx->phy_op->init(efx);
if (rc)
goto fail;
if (efx->phy_op->reconfigure(efx))
netif_err(efx, drv, efx->net_dev,
"could not restore PHY settings\n");
}
efx->mac_op->reconfigure(efx);
efx_init_channels(efx);
efx_restore_filters(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
return 0;
fail:
efx->port_initialized = false;
mutex_unlock(&efx->mac_lock);
return rc;
}
/* Reset the NIC using the specified method. Note that the reset may
* fail, in which case the card will be left in an unusable state.
*
* Caller must hold the rtnl_lock.
*/
int efx_reset(struct efx_nic *efx, enum reset_type method)
{
int rc, rc2;
bool disabled;
netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
RESET_TYPE(method));
netif_device_detach(efx->net_dev);
efx_reset_down(efx, method);
rc = efx->type->reset(efx, method);
if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
goto out;
}
/* Clear flags for the scopes we covered. We assume the NIC and
* driver are now quiescent so that there is no race here.
*/
efx->reset_pending &= -(1 << (method + 1));
/* Reinitialise bus-mastering, which may have been turned off before
* the reset was scheduled. This is still appropriate, even in the
* RESET_TYPE_DISABLE since this driver generally assumes the hardware
* can respond to requests. */
pci_set_master(efx->pci_dev);
out:
/* Leave device stopped if necessary */
disabled = rc || method == RESET_TYPE_DISABLE;
rc2 = efx_reset_up(efx, method, !disabled);
if (rc2) {
disabled = true;
if (!rc)
rc = rc2;
}
if (disabled) {
dev_close(efx->net_dev);
netif_err(efx, drv, efx->net_dev, "has been disabled\n");
efx->state = STATE_DISABLED;
} else {
netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
netif_device_attach(efx->net_dev);
}
return rc;
}
/* The worker thread exists so that code that cannot sleep can
* schedule a reset for later.
*/
static void efx_reset_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
unsigned long pending = ACCESS_ONCE(efx->reset_pending);
if (!pending)
return;
/* If we're not RUNNING then don't reset. Leave the reset_pending
* flags set so that efx_pci_probe_main will be retried */
if (efx->state != STATE_RUNNING) {
netif_info(efx, drv, efx->net_dev,
"scheduled reset quenched. NIC not RUNNING\n");
return;
}
rtnl_lock();
(void)efx_reset(efx, fls(pending) - 1);
rtnl_unlock();
}
void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{
enum reset_type method;
switch (type) {
case RESET_TYPE_INVISIBLE:
case RESET_TYPE_ALL:
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
break;
default:
method = efx->type->map_reset_reason(type);
netif_dbg(efx, drv, efx->net_dev,
"scheduling %s reset for %s\n",
RESET_TYPE(method), RESET_TYPE(type));
break;
}
set_bit(method, &efx->reset_pending);
/* efx_process_channel() will no longer read events once a
* reset is scheduled. So switch back to poll'd MCDI completions. */
efx_mcdi_mode_poll(efx);
queue_work(reset_workqueue, &efx->reset_work);
}
/**************************************************************************
*
* List of NICs we support
*
**************************************************************************/
/* PCI device ID table */
static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
.driver_data = (unsigned long) &falcon_a1_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
.driver_data = (unsigned long) &falcon_b0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID),
.driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID),
.driver_data = (unsigned long) &siena_a0_nic_type},
{0} /* end of list */
};
/**************************************************************************
*
* Dummy PHY/MAC operations
*
* Can be used for some unimplemented operations
* Needed so all function pointers are valid and do not have to be tested
* before use
*
**************************************************************************/
int efx_port_dummy_op_int(struct efx_nic *efx)
{
return 0;
}
void efx_port_dummy_op_void(struct efx_nic *efx) {}
static bool efx_port_dummy_op_poll(struct efx_nic *efx)
{
return false;
}
static const struct efx_phy_operations efx_dummy_phy_operations = {
.init = efx_port_dummy_op_int,
.reconfigure = efx_port_dummy_op_int,
.poll = efx_port_dummy_op_poll,
.fini = efx_port_dummy_op_void,
};
/**************************************************************************
*
* Data housekeeping
*
**************************************************************************/
/* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures).
*/
static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
struct pci_dev *pci_dev, struct net_device *net_dev)
{
int i;
/* Initialise common structures */
memset(efx, 0, sizeof(*efx));
spin_lock_init(&efx->biu_lock);
#ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list);
#endif
INIT_WORK(&efx->reset_work, efx_reset_work);
INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_INIT;
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->mac_op = type->default_mac_ops;
efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
INIT_WORK(&efx->mac_work, efx_mac_work);
for (i = 0; i < EFX_MAX_CHANNELS; i++) {
efx->channel[i] = efx_alloc_channel(efx, i, NULL);
if (!efx->channel[i])
goto fail;
}
efx->type = type;
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
/* Higher numbered interrupt modes are less capable! */
efx->interrupt_mode = max(efx->type->max_interrupt_mode,
interrupt_mode);
/* Would be good to use the net_dev name, but we're too early */
snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
pci_name(pci_dev));
efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
if (!efx->workqueue)
goto fail;
return 0;
fail:
efx_fini_struct(efx);
return -ENOMEM;
}
static void efx_fini_struct(struct efx_nic *efx)
{
int i;
for (i = 0; i < EFX_MAX_CHANNELS; i++)
kfree(efx->channel[i]);
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
}
}
/**************************************************************************
*
* PCI interface
*
**************************************************************************/
/* Main body of final NIC shutdown code
* This is called only at module unload (or hotplug removal).
*/
static void efx_pci_remove_main(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
#endif
efx_nic_fini_interrupt(efx);
efx_fini_channels(efx);
efx_fini_port(efx);
efx->type->fini(efx);
efx_fini_napi(efx);
efx_remove_all(efx);
}
/* Final NIC shutdown
* This is called only at module unload (or hotplug removal).
*/
static void efx_pci_remove(struct pci_dev *pci_dev)
{
struct efx_nic *efx;
efx = pci_get_drvdata(pci_dev);
if (!efx)
return;
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
efx->state = STATE_FINI;
dev_close(efx->net_dev);
/* Allow any queued efx_resets() to complete */
rtnl_unlock();
efx_unregister_netdev(efx);
efx_mtd_remove(efx);
/* Wait for any scheduled resets to complete. No more will be
* scheduled from this point because efx_stop_all() has been
* called, we are no longer registered with driverlink, and
* the net_device's have been removed. */
cancel_work_sync(&efx->reset_work);
efx_pci_remove_main(efx);
efx_fini_io(efx);
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
pci_set_drvdata(pci_dev, NULL);
efx_fini_struct(efx);
free_netdev(efx->net_dev);
};
/* Main body of NIC initialisation
* This is called at module load (or hotplug insertion, theoretically).
*/
static int efx_pci_probe_main(struct efx_nic *efx)
{
int rc;
/* Do start-of-day initialisation */
rc = efx_probe_all(efx);
if (rc)
goto fail1;
efx_init_napi(efx);
rc = efx->type->init(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to initialise NIC\n");
goto fail3;
}
rc = efx_init_port(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to initialise port\n");
goto fail4;
}
efx_init_channels(efx);
rc = efx_nic_init_interrupt(efx);
if (rc)
goto fail5;
return 0;
fail5:
efx_fini_channels(efx);
efx_fini_port(efx);
fail4:
efx->type->fini(efx);
fail3:
efx_fini_napi(efx);
efx_remove_all(efx);
fail1:
return rc;
}
/* NIC initialisation
*
* This is called at module load (or hotplug insertion,
* theoretically). It sets up PCI mappings, tests and resets the NIC,
* sets up and registers the network devices with the kernel and hooks
* the interrupt service routine. It does not prepare the device for
* transmission; this is left to the first time one of the network
* interfaces is brought up (i.e. efx_net_open).
*/
static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
struct net_device *net_dev;
struct efx_nic *efx;
int i, rc;
/* Allocate and initialise a struct net_device and struct efx_nic */
net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
EFX_MAX_RX_QUEUES);
if (!net_dev)
return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO |
NETIF_F_RXCSUM);
if (type->offload_features & NETIF_F_V6_CSUM)
net_dev->features |= NETIF_F_TSO6;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
/* All offloads can be toggled */
net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
efx = netdev_priv(net_dev);
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
rc = efx_init_struct(efx, type, pci_dev, net_dev);
if (rc)
goto fail1;
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
if (rc)
goto fail2;
/* No serialisation is required with the reset path because
* we're in STATE_INIT. */
for (i = 0; i < 5; i++) {
rc = efx_pci_probe_main(efx);
/* Serialise against efx_reset(). No more resets will be
* scheduled since efx_stop_all() has been called, and we
* have not and never have been registered with either
* the rtnetlink or driverlink layers. */
cancel_work_sync(&efx->reset_work);
if (rc == 0) {
if (efx->reset_pending) {
/* If there was a scheduled reset during
* probe, the NIC is probably hosed anyway */
efx_pci_remove_main(efx);
rc = -EIO;
} else {
break;
}
}
/* Retry if a recoverably reset event has been scheduled */
if (efx->reset_pending &
~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) ||
!efx->reset_pending)
goto fail3;
efx->reset_pending = 0;
}
if (rc) {
netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n");
goto fail4;
}
/* Switch to the running state before we expose the device to the OS,
* so that dev_open()|efx_start_all() will actually start the device */
efx->state = STATE_RUNNING;
rc = efx_register_netdev(efx);
if (rc)
goto fail5;
netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
rtnl_lock();
efx_mtd_probe(efx); /* allowed to fail */
rtnl_unlock();
return 0;
fail5:
efx_pci_remove_main(efx);
fail4:
fail3:
efx_fini_io(efx);
fail2:
efx_fini_struct(efx);
fail1:
WARN_ON(rc > 0);
netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
return rc;
}
static int efx_pm_freeze(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
efx->state = STATE_FINI;
netif_device_detach(efx->net_dev);
efx_stop_all(efx);
efx_fini_channels(efx);
return 0;
}
static int efx_pm_thaw(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
efx->state = STATE_INIT;
efx_init_channels(efx);
mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
netif_device_attach(efx->net_dev);
efx->state = STATE_RUNNING;
efx->type->resume_wol(efx);
/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
queue_work(reset_workqueue, &efx->reset_work);
return 0;
}
static int efx_pm_poweroff(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct efx_nic *efx = pci_get_drvdata(pci_dev);
efx->type->fini(efx);
efx->reset_pending = 0;
pci_save_state(pci_dev);
return pci_set_power_state(pci_dev, PCI_D3hot);
}
/* Used for both resume and restore */
static int efx_pm_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct efx_nic *efx = pci_get_drvdata(pci_dev);
int rc;
rc = pci_set_power_state(pci_dev, PCI_D0);
if (rc)
return rc;
pci_restore_state(pci_dev);
rc = pci_enable_device(pci_dev);
if (rc)
return rc;
pci_set_master(efx->pci_dev);
rc = efx->type->reset(efx, RESET_TYPE_ALL);
if (rc)
return rc;
rc = efx->type->init(efx);
if (rc)
return rc;
efx_pm_thaw(dev);
return 0;
}
static int efx_pm_suspend(struct device *dev)
{
int rc;
efx_pm_freeze(dev);
rc = efx_pm_poweroff(dev);
if (rc)
efx_pm_resume(dev);
return rc;
}
static struct dev_pm_ops efx_pm_ops = {
.suspend = efx_pm_suspend,
.resume = efx_pm_resume,
.freeze = efx_pm_freeze,
.thaw = efx_pm_thaw,
.poweroff = efx_pm_poweroff,
.restore = efx_pm_resume,
};
static struct pci_driver efx_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = efx_pci_table,
.probe = efx_pci_probe,
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
};
/**************************************************************************
*
* Kernel module interface
*
*************************************************************************/
module_param(interrupt_mode, uint, 0444);
MODULE_PARM_DESC(interrupt_mode,
"Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
static int __init efx_init_module(void)
{
int rc;
printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
rc = register_netdevice_notifier(&efx_netdev_notifier);
if (rc)
goto err_notifier;
reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!reset_workqueue) {
rc = -ENOMEM;
goto err_reset;
}
rc = pci_register_driver(&efx_pci_driver);
if (rc < 0)
goto err_pci;
return 0;
err_pci:
destroy_workqueue(reset_workqueue);
err_reset:
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
}
static void __exit efx_exit_module(void)
{
printk(KERN_INFO "Solarflare NET driver unloading\n");
pci_unregister_driver(&efx_pci_driver);
destroy_workqueue(reset_workqueue);
unregister_netdevice_notifier(&efx_netdev_notifier);
}
module_init(efx_init_module);
module_exit(efx_exit_module);
MODULE_AUTHOR("Solarflare Communications and "
"Michael Brown <mbrown@fensystems.co.uk>");
MODULE_DESCRIPTION("Solarflare Communications network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efx_pci_table);
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3689_0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.